1- import numpy as np
2-
3- def get_intrinsic_matrix (field_of_view_deg , image_width , image_height ):
4- """
5- Returns intrinsic matrix K.
6- """
7- # For our Carla camera alpha_u = alpha_v = alpha
8- # alpha can be computed given the cameras field of view via
9- field_of_view_rad = field_of_view_deg * np .pi / 180
10- alpha = (image_width / 2.0 ) / np .tan (field_of_view_rad / 2. )
11- # TODO step 1: Complete this function
12- raise NotImplementedError
13-
14- def project_polyline (polyline_world , trafo_world_to_cam , K ):
15- """
16- Returns array uv which contains the pixel coordinates of the polyline.
17-
18- Parameters
19- ----------
20- polyline_world : array_like, shape (M,3)
21- Each row of this array is a vertex (x,y,z) of the polyline.
22- trafo_world_to_cam : array_like, shape (4,4)
23- Transformation matrix, that maps vectors (x_world, y_world, z_world, 1)
24- to vectors (x_cam, y_cam, z_cam, 1).
25- K: array_like, shape (3,3)
26- Intrinsic matrix of the camera.
27-
28- Returns:
29- --------
30- uv : ndarray, shape (M,2)
31- Pixel coordinates of the projected polyline
32- First column is u, second column is v
33- """
34- # TODO step 1: Write this function
35- raise NotImplementedError
36-
37-
38- class CameraGeometry (object ):
39- def __init__ (self , height = 1.3 , yaw_deg = 0 , pitch_deg = - 5 , roll_deg = 0 , image_width = 1024 , image_height = 512 , field_of_view_deg = 45 ):
40- # scalar constants
41- self .height = height
42- self .pitch_deg = pitch_deg
43- self .roll_deg = roll_deg
44- self .yaw_deg = yaw_deg
45- self .image_width = image_width
46- self .image_height = image_height
47- self .field_of_view_deg = field_of_view_deg
48- # camera intriniscs and extrinsics
49- self .intrinsic_matrix = get_intrinsic_matrix (field_of_view_deg , image_width , image_height )
50- self .inverse_intrinsic_matrix = np .linalg .inv (self .intrinsic_matrix )
51- ## Note that "rotation_cam_to_road" has the math symbol R_{rc} in the book
52- yaw = np .deg2rad (yaw_deg )
53- pitch = np .deg2rad (pitch_deg )
54- roll = np .deg2rad (roll_deg )
55- cy , sy = np .cos (yaw ), np .sin (yaw )
56- cp , sp = np .cos (pitch ), np .sin (pitch )
57- cr , sr = np .cos (roll ), np .sin (roll )
58- rotation_road_to_cam = np .array ([[cr * cy + sp * sr + sy , cr * sp * sy - cy * sr , - cp * sy ],
59- [cp * sr , cp * cr , sp ],
60- [cr * sy - cy * sp * sr , - cr * cy * sp - sr * sy , cp * cy ]])
61- self .rotation_cam_to_road = rotation_road_to_cam .T # for rotation matrices, taking the transpose is the same as inversion
62-
63- # TODO step 2: replace the 'None' values in the following code with correct expressions
64-
65- self .translation_cam_to_road = None
66- self .trafo_cam_to_road = None
67- # compute vector nc. Note that R_{rc}^T = R_{cr}
68- self .road_normal_camframe = None
69-
70-
71- def camframe_to_roadframe (self ,vec_in_cam_frame ):
72- return self .rotation_cam_to_road @ vec_in_cam_frame + self .translation_cam_to_road
73-
74- def uv_to_roadXYZ_camframe (self ,u ,v ):
75- """
76- Inverse perspective mapping from pixel coordinates to 3d coordinates.
77-
78- Parameters
79- ----------
80- u,v: Both float
81- Pixel coordinates of some part of the road.
82-
83- Returns:
84- --------
85- XYZ: array_like, shape(3,)
86- Three dimensional point in the camera reference frame that lies on the road
87- and was mapped by the camera to pixel coordinates u,v
88- """
89- # TODO step 2: Write this function
90- raise NotImplementedError
91-
92- def uv_to_roadXYZ_roadframe (self ,u ,v ):
93- r_camframe = self .uv_to_roadXYZ_camframe (u ,v )
94- return self .camframe_to_roadframe (r_camframe )
95-
96- def uv_to_roadXYZ_roadframe_iso8855 (self ,u ,v ):
97- X ,Y ,Z = self .uv_to_roadXYZ_roadframe (u ,v )
98- return np .array ([Z ,- X ,- Y ]) # read book section on coordinate systems to understand this
99-
100- def precompute_grid (self ,dist = 60 ):
101- """
102- Precomputes a grid that will be used for polynomial fitting at a later stage.
103-
104- Parameters
105- ----------
106- dist : float
107- Distance thereshold in meters. For the grid, only pixel coordinates [u,v]
108- are considered that depict parts of the road plane that are no more than
109- a distance `dist` away along the road.
110-
111- Returns:
112- --------
113- cut_v: float
114- Threshold for the pixel coordinate v, that corresponds to the `dist`input.
115-
116- grid: array_like, shape (M,2)
117- A list of x,y coordinates. Each element corresponds to the x-y coordinates
118- of one pixel [u,v] (v>cut_v).
119- """
120- cut_v = int (self .compute_minimum_v (dist = dist )+ 1 )
121- # TODO step 3: compute `grid`
122- grid = None
123- return cut_v , grid
124-
125- def compute_minimum_v (self , dist ):
126- """
127- Find cut_v such that pixels with v<cut_v are irrelevant for polynomial fitting.
128- Everything that is further than `dist` along the road is considered irrelevant.
129- """
130- trafo_road_to_cam = np .linalg .inv (self .trafo_cam_to_road )
131- point_far_away_on_road = trafo_road_to_cam @ np .array ([0 ,0 ,dist ,1 ])
132- uv_vec = self .intrinsic_matrix @ point_far_away_on_road [:3 ]
133- uv_vec /= uv_vec [2 ]
134- cut_v = uv_vec [1 ]
1+ import numpy as np
2+
3+ def get_intrinsic_matrix (field_of_view_deg , image_width , image_height ):
4+ """
5+ Returns intrinsic matrix K.
6+ """
7+ # For our Carla camera alpha_u = alpha_v = alpha
8+ # alpha can be computed given the cameras field of view via
9+ field_of_view_rad = field_of_view_deg * np .pi / 180
10+ alpha = (image_width / 2.0 ) / np .tan (field_of_view_rad / 2. )
11+ # TODO step 1: Complete this function
12+ raise NotImplementedError
13+
14+ def project_polyline (polyline_world , trafo_world_to_cam , K ):
15+ """
16+ Returns array uv which contains the pixel coordinates of the polyline.
17+
18+ Parameters
19+ ----------
20+ polyline_world : array_like, shape (M,3)
21+ Each row of this array is a vertex (x,y,z) of the polyline.
22+ trafo_world_to_cam : array_like, shape (4,4)
23+ Transformation matrix, that maps vectors (x_world, y_world, z_world, 1)
24+ to vectors (x_cam, y_cam, z_cam, 1).
25+ K: array_like, shape (3,3)
26+ Intrinsic matrix of the camera.
27+
28+ Returns:
29+ --------
30+ uv : ndarray, shape (M,2)
31+ Pixel coordinates of the projected polyline
32+ First column is u, second column is v
33+ """
34+ # TODO step 1: Write this function
35+ raise NotImplementedError
36+
37+
38+ class CameraGeometry (object ):
39+ def __init__ (self , height = 1.3 , yaw_deg = 0 , pitch_deg = - 5 , roll_deg = 0 , image_width = 1024 , image_height = 512 , field_of_view_deg = 45 ):
40+ # scalar constants
41+ self .height = height
42+ self .pitch_deg = pitch_deg
43+ self .roll_deg = roll_deg
44+ self .yaw_deg = yaw_deg
45+ self .image_width = image_width
46+ self .image_height = image_height
47+ self .field_of_view_deg = field_of_view_deg
48+ # camera intriniscs and extrinsics
49+ self .intrinsic_matrix = get_intrinsic_matrix (field_of_view_deg , image_width , image_height )
50+ self .inverse_intrinsic_matrix = np .linalg .inv (self .intrinsic_matrix )
51+ ## Note that "rotation_cam_to_road" has the math symbol R_{rc} in the book
52+ yaw = np .deg2rad (yaw_deg )
53+ pitch = np .deg2rad (pitch_deg )
54+ roll = np .deg2rad (roll_deg )
55+ cy , sy = np .cos (yaw ), np .sin (yaw )
56+ cp , sp = np .cos (pitch ), np .sin (pitch )
57+ cr , sr = np .cos (roll ), np .sin (roll )
58+ rotation_road_to_cam = np .array ([[cr * cy + sp * sr * sy , cr * sp * sy - cy * sr , - cp * sy ],
59+ [cp * sr , cp * cr , sp ],
60+ [cr * sy - cy * sp * sr , - cr * cy * sp - sr * sy , cp * cy ]])
61+ self .rotation_cam_to_road = rotation_road_to_cam .T # for rotation matrices, taking the transpose is the same as inversion
62+
63+ # TODO step 2: replace the 'None' values in the following code with correct expressions
64+
65+ self .translation_cam_to_road = None
66+ self .trafo_cam_to_road = None
67+ # compute vector nc. Note that R_{rc}^T = R_{cr}
68+ self .road_normal_camframe = None
69+
70+
71+ def camframe_to_roadframe (self ,vec_in_cam_frame ):
72+ return self .rotation_cam_to_road @ vec_in_cam_frame + self .translation_cam_to_road
73+
74+ def uv_to_roadXYZ_camframe (self ,u ,v ):
75+ """
76+ Inverse perspective mapping from pixel coordinates to 3d coordinates.
77+
78+ Parameters
79+ ----------
80+ u,v: Both float
81+ Pixel coordinates of some part of the road.
82+
83+ Returns:
84+ --------
85+ XYZ: array_like, shape(3,)
86+ Three dimensional point in the camera reference frame that lies on the road
87+ and was mapped by the camera to pixel coordinates u,v
88+ """
89+ # TODO step 2: Write this function
90+ raise NotImplementedError
91+
92+ def uv_to_roadXYZ_roadframe (self ,u ,v ):
93+ r_camframe = self .uv_to_roadXYZ_camframe (u ,v )
94+ return self .camframe_to_roadframe (r_camframe )
95+
96+ def uv_to_roadXYZ_roadframe_iso8855 (self ,u ,v ):
97+ X ,Y ,Z = self .uv_to_roadXYZ_roadframe (u ,v )
98+ return np .array ([Z ,- X ,- Y ]) # read book section on coordinate systems to understand this
99+
100+ def precompute_grid (self ,dist = 60 ):
101+ """
102+ Precomputes a grid that will be used for polynomial fitting at a later stage.
103+
104+ Parameters
105+ ----------
106+ dist : float
107+ Distance thereshold in meters. For the grid, only pixel coordinates [u,v]
108+ are considered that depict parts of the road plane that are no more than
109+ a distance `dist` away along the road.
110+
111+ Returns:
112+ --------
113+ cut_v: float
114+ Threshold for the pixel coordinate v, that corresponds to the `dist`input.
115+
116+ grid: array_like, shape (M,2)
117+ A list of x,y coordinates. Each element corresponds to the x-y coordinates
118+ of one pixel [u,v] (v>cut_v).
119+ """
120+ cut_v = int (self .compute_minimum_v (dist = dist )+ 1 )
121+ # TODO step 3: compute `grid`
122+ grid = None
123+ return cut_v , grid
124+
125+ def compute_minimum_v (self , dist ):
126+ """
127+ Find cut_v such that pixels with v<cut_v are irrelevant for polynomial fitting.
128+ Everything that is further than `dist` along the road is considered irrelevant.
129+ """
130+ trafo_road_to_cam = np .linalg .inv (self .trafo_cam_to_road )
131+ point_far_away_on_road = trafo_road_to_cam @ np .array ([0 ,0 ,dist ,1 ])
132+ uv_vec = self .intrinsic_matrix @ point_far_away_on_road [:3 ]
133+ uv_vec /= uv_vec [2 ]
134+ cut_v = uv_vec [1 ]
135135 return cut_v
0 commit comments