Skip to content

Commit 83fe2f6

Browse files
f-dybrentyi
andauthored
fix core tests (#3687)
* fix core tests * Try alternative fix for pyright errors * Run ruff, bump core code checks to 3.11 * Run correct ruff version * Use the previous pyright version 👀 * cv2 ignore, pin dev dependency to newer pytorch version * ruff * Remove pyright error suppressions --------- Co-authored-by: Brent Yi <[email protected]>
1 parent 2adcc38 commit 83fe2f6

File tree

24 files changed

+49
-40
lines changed

24 files changed

+49
-40
lines changed

.github/workflows/core_code_checks.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,10 @@ jobs:
1515

1616
steps:
1717
- uses: actions/checkout@v3
18-
- name: Set up Python 3.8.13
18+
- name: Set up Python 3.11.13
1919
uses: actions/setup-python@v4
2020
with:
21-
python-version: '3.8.13'
21+
python-version: '3.11.13'
2222
- uses: actions/cache@v3
2323
with:
2424
path: ${{ env.pythonLocation }}

nerfstudio/cameras/camera_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def get_interpolated_poses(pose_a: NDArray, pose_b: NDArray, steps: int = 10) ->
172172
quat_b = quaternion_from_matrix(pose_b[:3, :3])
173173

174174
ts = np.linspace(0, 1, steps)
175-
quats = [quaternion_slerp(quat_a, quat_b, t) for t in ts]
175+
quats = [quaternion_slerp(quat_a, quat_b, float(t)) for t in ts]
176176
trans = [(1 - t) * pose_a[:3, 3] + t * pose_b[:3, 3] for t in ts]
177177

178178
poses_ab = []
@@ -199,7 +199,7 @@ def get_interpolated_k(
199199
List of interpolated camera poses
200200
"""
201201
Ks: List[Float[Tensor, "3 3"]] = []
202-
ts = np.linspace(0, 1, steps)
202+
ts = torch.linspace(0, 1, steps, dtype=k_a.dtype, device=k_a.device)
203203
for t in ts:
204204
new_k = k_a * (1.0 - t) + k_b * t
205205
Ks.append(new_k)
@@ -218,7 +218,7 @@ def get_interpolated_time(
218218
steps: number of steps the interpolated pose path should contain
219219
"""
220220
times: List[Float[Tensor, "1"]] = []
221-
ts = np.linspace(0, 1, steps)
221+
ts = torch.linspace(0, 1, steps, dtype=time_a.dtype, device=time_a.device)
222222
for t in ts:
223223
new_t = time_a * (1.0 - t) + time_b * t
224224
times.append(new_t)

nerfstudio/cameras/rays.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,7 @@ def get_weights(self, densities: Float[Tensor, "*batch num_samples 1"]) -> Float
136136
Weights for each sample
137137
"""
138138

139+
assert self.deltas is not None, "Deltas must be set to compute weights"
139140
delta_density = self.deltas * densities
140141
alphas = 1 - torch.exp(-delta_density)
141142

nerfstudio/data/dataparsers/colmap_dataparser.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -455,6 +455,7 @@ def _load_3D_points(self, colmap_path: Path, transform_matrix: torch.Tensor, sca
455455
points3D_image_ids.append(
456456
torch.cat((nids, torch.full((max_num_points - len(nids),), -1, dtype=torch.int64)))
457457
)
458+
assert downscale_factor is not None
458459
points3D_image_xy.append(
459460
torch.cat((nxy, torch.full((max_num_points - len(nxy), nxy.shape[-1]), 0, dtype=torch.float32)))
460461
/ downscale_factor

nerfstudio/data/dataparsers/dycheck_dataparser.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -289,8 +289,8 @@ def process_frames(self, frame_names: List[str], time_ids: np.ndarray) -> Tuple[
289289
cam_json = load_from_json(self.data / f"camera/{frame}.json")
290290
c2w = torch.as_tensor(cam_json["orientation"]).T
291291
position = torch.as_tensor(cam_json["position"])
292-
position -= self._center # some scenes look weird (wheel)
293-
position *= self._scale * self.config.scale_factor
292+
position -= torch.as_tensor(self._center) # some scenes look weird (wheel)
293+
position *= torch.as_tensor(self._scale) * self.config.scale_factor
294294
pose = torch.zeros([3, 4])
295295
pose[:3, :3] = c2w
296296
pose[:3, 3] = position

nerfstudio/data/dataparsers/nerfstudio_dataparser.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -484,6 +484,7 @@ def _get_fname(self, filepath: Path, data_dir: Path, downsample_folder_prefix="i
484484
CONSOLE.log(f"Auto image downscale factor of {self.downscale_factor}")
485485
else:
486486
self.downscale_factor = self.config.downscale_factor
487+
assert self.downscale_factor is not None
487488

488489
if self.downscale_factor > 1:
489490
return data_dir / f"{downsample_folder_prefix}{self.downscale_factor}" / filepath.name

nerfstudio/data/pixel_samplers.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,7 @@ def sample_method_fisheye(
227227

228228
rand_samples = torch.rand((samples_needed, 2), device=device)
229229
# Convert random samples to radius and theta.
230+
assert self.config.fisheye_crop_radius is not None
230231
radii = self.config.fisheye_crop_radius * torch.sqrt(rand_samples[:, 0])
231232
theta = 2.0 * torch.pi * rand_samples[:, 1]
232233

nerfstudio/data/utils/data_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def get_depth_image_from_path(
116116
else:
117117
image = cv2.imread(str(filepath.absolute()), cv2.IMREAD_ANYDEPTH)
118118
image = image.astype(np.float32) * scale_factor
119-
image = cv2.resize(image, (width, height), interpolation=interpolation)
119+
image = cv2.resize(image, (width, height), interpolation=interpolation) # type: ignore
120120
return torch.from_numpy(image[:, :, np.newaxis])
121121

122122

nerfstudio/exporter/exporter_utils.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,7 @@ def collect_camera_poses(pipeline: VanillaPipeline) -> Tuple[List[Dict[str, Any]
345345
camera_optimizer = None
346346
if hasattr(pipeline.model, "camera_optimizer"):
347347
camera_optimizer = pipeline.model.camera_optimizer
348+
assert isinstance(camera_optimizer, CameraOptimizer)
348349

349350
train_frames = collect_camera_poses_for_dataset(train_dataset, camera_optimizer)
350351
# Note: returning original poses, even if --eval-mode=all

nerfstudio/field_components/encodings.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,11 +45,6 @@ def __init__(self, in_dim: int) -> None:
4545
raise ValueError("Input dimension should be greater than zero")
4646
super().__init__(in_dim=in_dim)
4747

48-
@classmethod
49-
def get_tcnn_encoding_config(cls) -> dict:
50-
"""Get the encoding configuration for tcnn if implemented"""
51-
raise NotImplementedError("Encoding does not have a TCNN implementation")
52-
5348
@abstractmethod
5449
def forward(self, in_tensor: Shaped[Tensor, "*bs input_dim"]) -> Shaped[Tensor, "*bs output_dim"]:
5550
"""Call forward and returns and processed tensor
@@ -217,6 +212,7 @@ def __init__(
217212
self.min_freq = min_freq_exp
218213
self.max_freq = max_freq_exp
219214
self.register_buffer(name="b_matrix", tensor=basis)
215+
self.b_matrix: Tensor
220216
self.include_input = include_input
221217

222218
def get_out_dim(self) -> int:

0 commit comments

Comments
 (0)