Skip to content
Snippets Groups Projects
Commit b9f47429 authored by s192327's avatar s192327
Browse files

added retrieval of cameras looking at specified point

parent e6e7584f
No related branches found
No related tags found
No related merge requests found
...@@ -4,7 +4,7 @@ import pix4DUtils as p4d ...@@ -4,7 +4,7 @@ import pix4DUtils as p4d
# %% LOAD CAMERAS AND SCENE # %% LOAD CAMERAS AND SCENE
project_dir = "../datasets/2020-07-01-DTU-Risoe-80m-15d" project_dir = "../datasets/OL-test"
scene = p4d.loadScene(project_dir) scene = p4d.loadScene(project_dir)
cameras = p4d.loadCameras(project_dir) cameras = p4d.loadCameras(project_dir)
...@@ -16,7 +16,7 @@ bounds_scene, bounds_world, Hs = p4d.getBoundsHomography(scene,cameras,n_points= ...@@ -16,7 +16,7 @@ bounds_scene, bounds_world, Hs = p4d.getBoundsHomography(scene,cameras,n_points=
# %% SAVE BOUNDARIES AS JSON FILE # %% SAVE BOUNDARIES AS JSON FILE
p4d.saveBoundariesJSON(bounds_scene,scene.path,"bounds_mosaic") p4d.saveBoundariesJSON(bounds_scene,scene.path,"bounds_mosaic")
p4d.saveBoundariesgeoJSON(bounds_world[:,:,:2],scene.crs,scene.path,"bounds_world") p4d.saveBoundariesgeoJSON(bounds_world[:,:,:2],scene.crs_name,scene.path,"bounds_world")
# %% SAVE IMAGES OF MOSAICS WITH EACH CAMERA'S VIEWING AREA # %% SAVE IMAGES OF MOSAICS WITH EACH CAMERA'S VIEWING AREA
...@@ -29,3 +29,7 @@ p4d.saveHomographyPictures(scene.img,cameras,Hs,scene.path) ...@@ -29,3 +29,7 @@ p4d.saveHomographyPictures(scene.img,cameras,Hs,scene.path)
# %% COMPARE ORIGINAL PICTURES WITH HOMOGRAPHY IMAGES # %% COMPARE ORIGINAL PICTURES WITH HOMOGRAPHY IMAGES
p4d.compareHomographyPicture(scene,cameras,Hs,threshold=None) p4d.compareHomographyPicture(scene,cameras,Hs,threshold=None)
# %% GET ALL CAMERAS LOOKING AT A PARTICULAR POINT
result = p4d.getLookingCameras(scene,cameras,Hs,("mosaic",(0,0)))
\ No newline at end of file
...@@ -14,7 +14,7 @@ import open3d as o3d ...@@ -14,7 +14,7 @@ import open3d as o3d
# SCENE # SCENE
class Scene: class Scene:
def __init__(self, path, width, height, roi, img, dsm, crs, transform, offset): def __init__(self, path, width, height, roi, img, dsm, crs, crs_name, transform, offset):
self.path = path self.path = path
self.width = width self.width = width
self.height = height self.height = height
...@@ -22,6 +22,7 @@ class Scene: ...@@ -22,6 +22,7 @@ class Scene:
self.img = img self.img = img
self.dsm = dsm self.dsm = dsm
self.crs = crs self.crs = crs
self.crs_name = crs_name
self.transform = transform self.transform = transform
self.offset = offset self.offset = offset
self.pcd = None self.pcd = None
...@@ -72,7 +73,7 @@ class Scene: ...@@ -72,7 +73,7 @@ class Scene:
def computeMesh(self): def computeMesh(self):
if self.mesh is None: if self.mesh is None:
self.mesh = 1 # (TO BE IMPLEMENTED !!) self.mesh = 1 # (! TO BE IMPLEMENTED !)
# CAMERA # CAMERA
class Camera: class Camera:
...@@ -151,6 +152,7 @@ def loadScene(project_dir): ...@@ -151,6 +152,7 @@ def loadScene(project_dir):
# crs # crs
crs_string = rgb.GetProjection() crs_string = rgb.GetProjection()
crs_name = osr.SpatialReference(crs_string).GetAttrValue('AUTHORITY',0)+":"+osr.SpatialReference(crs_string).GetAttrValue('AUTHORITY',1)
# transformation matrix to get crs coordinates from image pixels # transformation matrix to get crs coordinates from image pixels
transform_np = np.asarray(rgb.GetGeoTransform()).reshape(2,3)[:,[1,2,0]] transform_np = np.asarray(rgb.GetGeoTransform()).reshape(2,3)[:,[1,2,0]]
...@@ -167,6 +169,7 @@ def loadScene(project_dir): ...@@ -167,6 +169,7 @@ def loadScene(project_dir):
img_np, # BGR (0 to 255) array of float64 (h x w x 3) img_np, # BGR (0 to 255) array of float64 (h x w x 3)
dsm_np, # DSM (meters) array of float32 (h x w) dsm_np, # DSM (meters) array of float32 (h x w)
crs_string, # CRS string (N/A) crs_string, # CRS string (N/A)
crs_name, # CRS name string (N/A)
transform_np, # Transf. px->m array of float64 (2,3) transform_np, # Transf. px->m array of float64 (2,3)
offset, # Local ref. frame array of float64 (1,3) offset, # Local ref. frame array of float64 (1,3)
) )
...@@ -557,7 +560,8 @@ def saveBoundariesJSON(bounds,path,name): ...@@ -557,7 +560,8 @@ def saveBoundariesJSON(bounds,path,name):
# %% saveBoundariesgeoJSON function # %% saveBoundariesgeoJSON function
def saveBoundariesgeoJSON(bounds,crs,path,name): def saveBoundariesgeoJSON(bounds,crs,path,name):
"""Saves the boundaries as geoJSON MultiPolygon in the specified coordinate system """Saves the boundaries as geoJSON MultiPolygon in the specified
coordinate system
Parameters: Parameters:
bounds: array of size N_boundaries x N_points x 2 bounds: array of size N_boundaries x N_points x 2
...@@ -582,7 +586,7 @@ def saveBoundariesgeoJSON(bounds,crs,path,name): ...@@ -582,7 +586,7 @@ def saveBoundariesgeoJSON(bounds,crs,path,name):
crs_obj = { crs_obj = {
"type": "name", "type": "name",
"properties": { "properties": {
"name": osr.SpatialReference(crs).GetAttrValue('AUTHORITY',0)+":"+osr.SpatialReference(crs).GetAttrValue('AUTHORITY',1) "name": crs
} }
} }
...@@ -685,7 +689,7 @@ def compareHomographyPicture(scene,cameras,Hs,threshold=None): ...@@ -685,7 +689,7 @@ def compareHomographyPicture(scene,cameras,Hs,threshold=None):
"""Compare the original pictures and the homography views by plotting them """Compare the original pictures and the homography views by plotting them
side by side. The original pictures are undistorted, since the homography side by side. The original pictures are undistorted, since the homography
views don't take distortion into account. If threshold is set, points views don't take distortion into account. If threshold is set, points
that are too distant from the fitted plane will be blacked out that are too distant from the fitted plane will be blacked out.
Parameters: Parameters:
scene: scene object of pix4DUtils module scene: scene object of pix4DUtils module
...@@ -816,3 +820,79 @@ def compareHomographyPicture(scene,cameras,Hs,threshold=None): ...@@ -816,3 +820,79 @@ def compareHomographyPicture(scene,cameras,Hs,threshold=None):
break break
cv2.destroyAllWindows() cv2.destroyAllWindows()
# %% getLookingCameras function
def getLookingCameras(scene,cameras,Hs,point):
"""Calculates and returns the cameras that are looking at a particular
point in the mosaic. The point can be specified as either a pixel
coordinate in the mosaic, a UTM coordinate, or a pixel coordinate
in one of the cameras.
Parameters:
scene: scene object of pix4DUtils module
Scene of a pix4D project
cameras: list of cameras
Camera objects of pix4DUtils module
Hs: array of size n_cameras x 3 x 3
The homography used for each camera, computed by the "getBoundsHomography" function
point: tuple, the point of interest
Can be either ("utm",(coord_x,coord_y)) or ("mosaic",(px_x,_px_y)) or ("camera",n_camera,(px_x,px_y))
Returns:
result: list of cameras
Including the pixel coordinates of the point in each camera
"""
# IF THE POINT IS PROVIDED AS UTM COORDINATES:
if point[0] == "utm":
point_utm = np.array(point[1])
# convert it to pixel values in the mosaic
point_mosaic = UTMtoPixel(np.array([point[1]]),scene.transform)
point_mosaic = np.vstack((point_mosaic.T,1))
# ELSE, IF IS PROVIDED DIRECTLY IN UTM COORDINATES:
elif point[0] == "mosaic":
# just append a one
point_mosaic = np.vstack((np.array(point[1]).reshape(-1,1),1))
# and compute corresponding utm coordinates
point_utm = np.dot(scene.transform,point_mosaic).reshape(-1,)
# FINALLY, IF IT IS PROVIDED IN ONE CAMERA PIXEL COORDINATES:
elif point[0] == "camera":
# project it to mosaic coordinates using inverse homography
point_mosaic = np.dot(np.linalg.inv(Hs[point[1],:,:]),np.vstack((np.array(point[2]).reshape(-1,1),1)))
# and normalize it
point_mosaic = point_mosaic / point_mosaic[-1,0]
# and compute corresponding utm coordinates
point_utm = np.dot(scene.transform,point_mosaic).reshape(-1,)
# GET CORRESPONDING PIXEL VALUES IN EACH CAMERA (USING HOMOGRAPHY)
# mutiply point_mosaic for each camera's homography
point_cameras = np.array([np.dot(Hs[i,:,:],point_mosaic).T for i,camera in enumerate(cameras)])
# and normalize them
point_cameras = np.rint((point_cameras / np.expand_dims(point_cameras[:,:,2],2))[:,:,:2])
# !TODO! DISTORT PROJECTED POINTS
# CHECK THAT PIXEL IS WITHIN CAMERA RESOLUTION AND CREATE BOOLEAN ARRAY
in_camera = [((0<= point_cameras[i,0,0] <= camera.imgwidth) and (0<= point_cameras[i,0,1] <= camera.imgheight)) for i,camera in enumerate(cameras)]
# RETURN CAMERAS THAT CONTAIN THE SPECIFIED POINT, INCLUDING PICTURE NAME AND PIXEL COORDINATES IN EACH PICTURE
# initialize output
result = {
"point":{
"inside_roi" : scene.roi[int(point_mosaic[0,0]),int(point_mosaic[1,0])],
scene.crs_name : {"x":point_utm[0], "y":point_utm[1]},
"mosaic_px" : {"u":point_mosaic[0,0], "v":point_mosaic[1,0]}
},
"cameras": []}
# for each retrieved camera:
for i,camera in enumerate(cameras):
# append the camera and the corresponding pixel coordinates to the result
if in_camera[i]:
result["cameras"].append({
"camera_n": i,
"camera": camera,
"coordinates": {"u":point_cameras[i,0,0], "v":point_cameras[i,0,1]}
})
return result
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment