Examples for remorte sensing applications#
[9]:
import os, sys
sys.path.insert(0, os.path.abspath(".."))
import geoclide as gc
import math
import numpy as np
Find the x and y components of the satellite position knowing its altitude and its viewing zenith and azimuth angles#
[10]:
vza = 45. # viewing zenith angle in degrees
vaa = 45. # viewing azimuth angle in degrees
sat_altitude = 700. # satellite altitude in kilometers
origin = gc.Point(0., 0., 0.) # origin is the viewer seeing the satellite
# The vaa start from north going clockwise.
# Let's assume that in our coordinate system the x axis is in the north direction
# Then theta (zenith) angle = vza and phi (azimuth) angle = -vaa
theta = vza
phi = -vaa
# Get the vector from ground to the satellite
dir_to_sat = gc.ang2vec(theta=theta, phi=phi)
ray = gc.Ray(o=origin, d=dir_to_sat) # create the ray, starting from origin going in dir_to_sat direction
Here without considering the sphericity of the earth#
[11]:
b1 = gc.BBox(p1=gc.Point(-math.inf, -math.inf, 0.), p2=gc.Point(math.inf, math.inf, sat_altitude))
ds_pp = gc.calc_intersection(b1, ray) # return an xarray dataset
ds_pp['phit'].values
[11]:
array([ 494.97474683, -494.97474683, 700. ])
Here with the consideration of the sphericity of the earth#
[12]:
earth_radius = 6378. # the equatorial earth radius in kilometers
oTw = gc.get_translate_tf(gc.Vector(0., 0., -earth_radius))
sphere_sat_alti = gc.Sphere(radius=earth_radius+sat_altitude, oTw=oTw) # apply oTw to move the sphere center to earth center
ds_sp = gc.calc_intersection(sphere_sat_alti, ray) # return an xarray dataset
ds_sp['phit'].values
[12]:
array([ 472.61058011, -472.61058011, 668.37229212])
Satellite camera directions (3MI example)#
Compute all the pixel directions#
[13]:
# Number of camera pixels in x and y
nx = 509
ny = 255
# Satellite altitude (in km)
z_sat = 830.
# Nadir resolution (in km2)
nad_res = 4.
# swath (in km2)
swath = 2200
# Field of view of 1 pixel and of the camera
pixel_fov = np.rad2deg(np.arctan(nad_res/z_sat))
full_fov = np.rad2deg(np.arctan(0.5*swath/z_sat)) - pixel_fov
focal_length = ((nx-1)*0.5) / ((np.tan(np.radians(full_fov))))
focal_pos = gc.Point(x=0., y=0., z=focal_length)
x_ = -(nx-1)*0.5 + np.arange(nx)
y_ = -(ny-1)*0.5 + np.arange(ny)
x, y = np.meshgrid(x_, y_)
x = x.flatten()
y = y.flatten()
z = np.zeros_like(x)
id_pixels = gc.Point(x,y,z) # id = (0, 0, 0) corresponds to the pixel at the center of the camera
dir_pixels = gc.normalize(id_pixels - focal_pos)
[14]:
# directions of first y pixels (x, y, z components)
dir_pixels.to_numpy().reshape(ny,nx,3)[0,:]
[14]:
array([[-0.73904935, -0.36952468, -0.56325622],
[-0.73772467, -0.37032029, -0.56446895],
[-0.73639141, -0.37111789, -0.56568472],
...,
[ 0.73639141, -0.37111789, -0.56568472],
[ 0.73772467, -0.37032029, -0.56446895],
[ 0.73904935, -0.36952468, -0.56325622]], shape=(509, 3))
Select only pixels that view a specific box zone#
[15]:
# box of size 50 km2 in x and y, and 10 km in z
box = gc.BBox(p1=gc.Point(-25., -25., 0.), p2=gc.Point(25.,25.,10.))
# satellite position/pixel positions. We duplicate to get same size as the number of pixels
sat_pos = gc.Point(np.zeros_like(dir_pixels.x), np.zeros_like(dir_pixels.x),
np.full_like(dir_pixels.x, z_sat))
# create th rays
r_sat = gc.Ray(sat_pos, dir_pixels)
is_intersection = box.is_intersection(r_sat)
[16]:
# pixels directions and x and y id of pixels
dir_pixels.to_numpy()[is_intersection][0:10], id_pixels.to_numpy()[is_intersection][0:10]
[16]:
(array([[-0.02581156, -0.02581156, -0.99933354],
[-0.02065172, -0.02581465, -0.99945341],
[-0.01549024, -0.02581706, -0.99954666],
[-0.01032751, -0.02581878, -0.99961329],
[-0.00516396, -0.02581982, -0.99965328],
[ 0. , -0.02582016, -0.9996666 ],
[ 0.00516396, -0.02581982, -0.99965328],
[ 0.01032751, -0.02581878, -0.99961329],
[ 0.01549024, -0.02581706, -0.99954666],
[ 0.02065172, -0.02581465, -0.99945341]]),
array([[-5., -5., 0.],
[-4., -5., 0.],
[-3., -5., 0.],
[-2., -5., 0.],
[-1., -5., 0.],
[ 0., -5., 0.],
[ 1., -5., 0.],
[ 2., -5., 0.],
[ 3., -5., 0.],
[ 4., -5., 0.]]))