diff --git a/docs/source/changelog.rst b/docs/source/changelog.rst
index 2bcde39cd..f07f1b42e 100644
--- a/docs/source/changelog.rst
+++ b/docs/source/changelog.rst
@@ -8,6 +8,8 @@ Upcoming version (not yet released)
Added
^^^^^
+- Added ``Mjlab-Velocity-Flat-Run-Unitree-G1`` task with velocity command
+ curriculum for training the G1 to run on flat terrain.
- Added ``STAIRS_TERRAINS_CFG`` terrain preset for progressive stair
curriculum training and ``@terrain_preset`` decorator for composing
terrain configurations from reusable presets.
diff --git a/src/mjlab/sensor/terrain_height_sensor.py b/src/mjlab/sensor/terrain_height_sensor.py
index a123c04db..c5305b909 100644
--- a/src/mjlab/sensor/terrain_height_sensor.py
+++ b/src/mjlab/sensor/terrain_height_sensor.py
@@ -64,9 +64,19 @@ def _compute_data(self) -> TerrainHeightData:
heights = frame_z.unsqueeze(-1) - hit_z # [B, F, N]
miss = raw.distances.view(B, F, N) < 0
- heights = torch.where(
- miss, torch.full_like(heights, self.cfg.max_distance), heights
- )
+ # When all rays for a frame miss there are two cases:
+ # 1. Frame is below or at the terrain surface (rays start below and
+ # point down, never hitting anything). True clearance is ~0.
+ # 2. Frame is genuinely above max_distance. True clearance >=
+ # max_distance.
+ # We distinguish them using frame_z clamped to [0, max_distance].
+ # For partial misses (some rays hit, some don't), max_distance is
+ # the right fallback since the frame is above terrain.
+ all_miss = miss.all(dim=-1, keepdim=True).expand_as(miss) # [B, F, N]
+ fallback = frame_z.unsqueeze(-1).clamp(0, self.cfg.max_distance)
+ fallback = fallback.expand_as(heights) # [B, F, N]
+ miss_value = torch.where(all_miss, fallback, self.cfg.max_distance)
+ heights = torch.where(miss, miss_value, heights)
reduction = self.cfg.reduction
if reduction == "min":
diff --git a/src/mjlab/tasks/velocity/config/g1/__init__.py b/src/mjlab/tasks/velocity/config/g1/__init__.py
index debab01bd..799a3fe83 100644
--- a/src/mjlab/tasks/velocity/config/g1/__init__.py
+++ b/src/mjlab/tasks/velocity/config/g1/__init__.py
@@ -3,6 +3,7 @@
from .env_cfgs import (
unitree_g1_flat_env_cfg,
+ unitree_g1_flat_run_env_cfg,
unitree_g1_rough_env_cfg,
)
from .rl_cfg import unitree_g1_ppo_runner_cfg
@@ -22,3 +23,11 @@
rl_cfg=unitree_g1_ppo_runner_cfg(),
runner_cls=VelocityOnPolicyRunner,
)
+
+register_mjlab_task(
+ task_id="Mjlab-Velocity-Flat-Run-Unitree-G1",
+ env_cfg=unitree_g1_flat_run_env_cfg(),
+ play_env_cfg=unitree_g1_flat_run_env_cfg(play=True),
+ rl_cfg=unitree_g1_ppo_runner_cfg(),
+ runner_cls=VelocityOnPolicyRunner,
+)
diff --git a/src/mjlab/tasks/velocity/config/g1/env_cfgs.py b/src/mjlab/tasks/velocity/config/g1/env_cfgs.py
index 28bb3f468..6b4ec78f7 100644
--- a/src/mjlab/tasks/velocity/config/g1/env_cfgs.py
+++ b/src/mjlab/tasks/velocity/config/g1/env_cfgs.py
@@ -7,6 +7,7 @@
from mjlab.envs import ManagerBasedRlEnvCfg
from mjlab.envs import mdp as envs_mdp
from mjlab.envs.mdp.actions import JointPositionActionCfg
+from mjlab.managers.curriculum_manager import CurriculumTermCfg
from mjlab.managers.event_manager import EventTermCfg
from mjlab.managers.reward_manager import RewardTermCfg
from mjlab.sensor import (
@@ -26,9 +27,8 @@ def unitree_g1_rough_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:
"""Create Unitree G1 rough terrain velocity configuration."""
cfg = make_velocity_env_cfg()
- cfg.sim.mujoco.ccd_iterations = 500
- cfg.sim.contact_sensor_maxmatch = 500
- cfg.sim.nconmax = 70
+ cfg.sim.njmax = 200
+ cfg.sim.nconmax = 30
cfg.scene.entities = {"robot": get_g1_robot_cfg()}
@@ -189,10 +189,7 @@ def unitree_g1_flat_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:
"""Create Unitree G1 flat terrain velocity configuration."""
cfg = unitree_g1_rough_env_cfg(play=play)
- cfg.sim.njmax = 300
- cfg.sim.mujoco.ccd_iterations = 50
- cfg.sim.contact_sensor_maxmatch = 64
- cfg.sim.nconmax = None
+ cfg.sim.njmax = 170
# Switch to flat terrain.
assert cfg.scene.terrain is not None
@@ -211,10 +208,29 @@ def unitree_g1_flat_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:
# Disable terrain curriculum (not present in play mode since rough clears all).
cfg.curriculum.pop("terrain_levels", None)
+ return cfg
+
+
+def unitree_g1_flat_run_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:
+ """G1 flat terrain with velocity curriculum for learning to run."""
+ cfg = unitree_g1_flat_env_cfg(play=play)
+
+ cfg.curriculum["command_vel"] = CurriculumTermCfg(
+ func=mdp.commands_vel,
+ params={
+ "command_name": "twist",
+ "velocity_stages": [
+ {"step": 0, "lin_vel_x": (-1.0, 1.0)},
+ {"step": 5000 * 24, "lin_vel_x": (-1.5, 2.0), "ang_vel_z": (-1.5, 1.5)},
+ {"step": 10000 * 24, "lin_vel_x": (-2.0, 3.0), "ang_vel_z": (-2.0, 2.0)},
+ ],
+ },
+ )
+
if play:
twist_cmd = cfg.commands["twist"]
assert isinstance(twist_cmd, UniformVelocityCommandCfg)
- twist_cmd.ranges.lin_vel_x = (-1.5, 2.0)
- twist_cmd.ranges.ang_vel_z = (-0.7, 0.7)
+ twist_cmd.ranges.lin_vel_x = (-2.0, 3.0)
+ twist_cmd.ranges.ang_vel_z = (-2.0, 2.0)
return cfg
diff --git a/src/mjlab/tasks/velocity/config/go1/__init__.py b/src/mjlab/tasks/velocity/config/go1/__init__.py
index 259c0baa8..8bea2efdf 100644
--- a/src/mjlab/tasks/velocity/config/go1/__init__.py
+++ b/src/mjlab/tasks/velocity/config/go1/__init__.py
@@ -5,7 +5,7 @@
unitree_go1_flat_env_cfg,
unitree_go1_rough_env_cfg,
)
-from .rl_cfg import unitree_go1_ppo_runner_cfg
+from .rl_cfg import unitree_go1_flat_ppo_runner_cfg, unitree_go1_ppo_runner_cfg
register_mjlab_task(
task_id="Mjlab-Velocity-Rough-Unitree-Go1",
@@ -19,6 +19,6 @@
task_id="Mjlab-Velocity-Flat-Unitree-Go1",
env_cfg=unitree_go1_flat_env_cfg(),
play_env_cfg=unitree_go1_flat_env_cfg(play=True),
- rl_cfg=unitree_go1_ppo_runner_cfg(),
+ rl_cfg=unitree_go1_flat_ppo_runner_cfg(),
runner_cls=VelocityOnPolicyRunner,
)
diff --git a/src/mjlab/tasks/velocity/config/go1/env_cfgs.py b/src/mjlab/tasks/velocity/config/go1/env_cfgs.py
index 918c0a1a7..1dc1f4892 100644
--- a/src/mjlab/tasks/velocity/config/go1/env_cfgs.py
+++ b/src/mjlab/tasks/velocity/config/go1/env_cfgs.py
@@ -23,7 +23,6 @@
TerrainHeightSensorCfg,
)
from mjlab.tasks.velocity import mdp
-from mjlab.tasks.velocity.mdp import UniformVelocityCommandCfg
from mjlab.tasks.velocity.velocity_env_cfg import make_velocity_env_cfg
TerrainType = Literal["rough", "obstacles"]
@@ -35,10 +34,11 @@ def unitree_go1_rough_env_cfg(
"""Create Unitree Go1 rough terrain velocity configuration."""
cfg = make_velocity_env_cfg()
- cfg.sim.mujoco.ccd_iterations = 500
+ cfg.sim.njmax = 120
+ cfg.sim.nconmax = 20
+
cfg.sim.mujoco.impratio = 10
cfg.sim.mujoco.cone = "elliptic"
- cfg.sim.contact_sensor_maxmatch = 500
cfg.scene.entities = {"robot": get_go1_robot_cfg()}
@@ -203,9 +203,11 @@ def unitree_go1_rough_env_cfg(
for reward_name in ["foot_clearance", "foot_slip"]:
cfg.rewards[reward_name].params["asset_cfg"].site_names = site_names
- cfg.rewards["body_ang_vel"].weight = 0.0
- cfg.rewards["angular_momentum"].weight = 0.0
+ cfg.rewards["body_ang_vel"].weight = -1e-4
+ cfg.rewards["angular_momentum"].weight = -1e-4
cfg.rewards["air_time"].weight = 0.0
+ cfg.rewards["joint_vel_l2"] = RewardTermCfg(func=mdp.joint_vel_l2, weight=-1e-5)
+ cfg.rewards["joint_acc_l2"] = RewardTermCfg(func=mdp.joint_acc_l2, weight=-1e-7)
# Per-body-group collision penalties.
cfg.rewards["self_collisions"] = RewardTermCfg(
@@ -218,6 +220,11 @@ def unitree_go1_rough_env_cfg(
weight=-0.1,
params={"sensor_name": shank_ground_cfg.name},
)
+ cfg.rewards["thigh_collision"] = RewardTermCfg(
+ func=mdp.self_collision_cost,
+ weight=-0.5,
+ params={"sensor_name": thigh_ground_cfg.name},
+ )
cfg.rewards["trunk_head_collision"] = RewardTermCfg(
func=mdp.self_collision_cost,
weight=-0.1,
@@ -262,10 +269,7 @@ def unitree_go1_flat_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:
"""Create Unitree Go1 flat terrain velocity configuration."""
cfg = unitree_go1_rough_env_cfg(play=play)
- cfg.sim.njmax = 300
- cfg.sim.mujoco.ccd_iterations = 50
- cfg.sim.contact_sensor_maxmatch = 64
- cfg.sim.nconmax = None
+ cfg.sim.njmax = 50
# Switch to flat terrain.
assert cfg.scene.terrain is not None
@@ -288,7 +292,12 @@ def unitree_go1_flat_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:
cfg.rewards["upright"].params.pop("terrain_sensor_names", None)
# Remove granular collision rewards (not useful on flat ground).
- for key in ("self_collisions", "shank_collision", "trunk_head_collision"):
+ for key in (
+ "self_collisions",
+ "shank_collision",
+ "thigh_collision",
+ "trunk_head_collision",
+ ):
cfg.rewards.pop(key, None)
# On flat terrain fell_over is sufficient; thigh contact implies fallen.
@@ -302,10 +311,4 @@ def unitree_go1_flat_env_cfg(play: bool = False) -> ManagerBasedRlEnvCfg:
# Disable terrain curriculum (not present in play mode since rough clears all).
cfg.curriculum.pop("terrain_levels", None)
- if play:
- twist_cmd = cfg.commands["twist"]
- assert isinstance(twist_cmd, UniformVelocityCommandCfg)
- twist_cmd.ranges.lin_vel_x = (-1.5, 2.0)
- twist_cmd.ranges.ang_vel_z = (-0.7, 0.7)
-
return cfg
diff --git a/src/mjlab/tasks/velocity/config/go1/rl_cfg.py b/src/mjlab/tasks/velocity/config/go1/rl_cfg.py
index f52a8032b..ecfb4ca44 100644
--- a/src/mjlab/tasks/velocity/config/go1/rl_cfg.py
+++ b/src/mjlab/tasks/velocity/config/go1/rl_cfg.py
@@ -1,5 +1,7 @@
"""RL configuration for Unitree Go1 velocity task."""
+from functools import partial
+
from mjlab.rl import (
RslRlModelCfg,
RslRlOnPolicyRunnerCfg,
@@ -7,7 +9,9 @@
)
-def unitree_go1_ppo_runner_cfg() -> RslRlOnPolicyRunnerCfg:
+def unitree_go1_ppo_runner_cfg(
+ max_iterations: int = 10_000,
+) -> RslRlOnPolicyRunnerCfg:
"""Create RL runner configuration for Unitree Go1 velocity task."""
return RslRlOnPolicyRunnerCfg(
actor=RslRlModelCfg(
@@ -42,5 +46,10 @@ def unitree_go1_ppo_runner_cfg() -> RslRlOnPolicyRunnerCfg:
experiment_name="go1_velocity",
save_interval=50,
num_steps_per_env=24,
- max_iterations=10_000,
+ max_iterations=max_iterations,
)
+
+
+unitree_go1_flat_ppo_runner_cfg = partial(
+ unitree_go1_ppo_runner_cfg, max_iterations=1_500
+)
diff --git a/src/mjlab/tasks/velocity/mdp/curriculums.py b/src/mjlab/tasks/velocity/mdp/curriculums.py
index b2487ebfa..b826fab06 100644
--- a/src/mjlab/tasks/velocity/mdp/curriculums.py
+++ b/src/mjlab/tasks/velocity/mdp/curriculums.py
@@ -47,8 +47,7 @@ def terrain_levels_vel(
# Robots that walked far enough progress to harder terrains.
move_up = distance > terrain_generator.size[0] / 2
- # Robots that walked less than half of their required distance go to
- # simpler terrains.
+ # Robots that walked less than half of their required distance go to simpler terrains.
move_down = (
distance < torch.norm(command[env_ids, :2], dim=1) * env.max_episode_length_s * 0.5
)
@@ -64,8 +63,8 @@ def terrain_levels_vel(
"max": torch.max(levels),
}
- # In curriculum mode num_cols == num_terrains (one column per type),
- # so the column index directly maps to the sub-terrain name.
+ # In curriculum mode num_cols == num_terrains (one column per type), so the column
+ # index directly maps to the sub-terrain name.
sub_terrain_names = list(terrain_generator.sub_terrains.keys())
terrain_origins = terrain.terrain_origins
assert terrain_origins is not None
diff --git a/src/mjlab/tasks/velocity/mdp/rewards.py b/src/mjlab/tasks/velocity/mdp/rewards.py
index ceb78e3fb..7eac9b306 100644
--- a/src/mjlab/tasks/velocity/mdp/rewards.py
+++ b/src/mjlab/tasks/velocity/mdp/rewards.py
@@ -279,6 +279,11 @@ def __init__(self, cfg: RewardTermCfg, env: ManagerBasedRlEnv):
)
self.step_dt = env.step_dt
+ def reset(self, env_ids: torch.Tensor | slice | None = None) -> None:
+ if env_ids is None:
+ env_ids = slice(None)
+ self.peak_heights[env_ids] = 0.0
+
def __call__(
self,
env: ManagerBasedRlEnv,
diff --git a/src/mjlab/tasks/velocity/velocity_env_cfg.py b/src/mjlab/tasks/velocity/velocity_env_cfg.py
index 27d66340a..2e7e78f22 100644
--- a/src/mjlab/tasks/velocity/velocity_env_cfg.py
+++ b/src/mjlab/tasks/velocity/velocity_env_cfg.py
@@ -91,6 +91,7 @@ def make_velocity_env_cfg() -> ManagerBasedRlEnvCfg:
"joint_pos": ObservationTermCfg(
func=mdp.joint_pos_rel,
noise=Unoise(n_min=-0.01, n_max=0.01),
+ params={"biased": True},
),
"joint_vel": ObservationTermCfg(
func=mdp.joint_vel_rel,
@@ -111,6 +112,7 @@ def make_velocity_env_cfg() -> ManagerBasedRlEnvCfg:
critic_terms = {
**actor_terms,
+ "joint_pos": ObservationTermCfg(func=mdp.joint_pos_rel),
"height_scan": ObservationTermCfg(
func=envs_mdp.height_scan,
params={"sensor_name": "terrain_scan"},
@@ -187,7 +189,7 @@ def make_velocity_env_cfg() -> ManagerBasedRlEnvCfg:
ranges=UniformVelocityCommandCfg.Ranges(
lin_vel_x=(-1.0, 1.0),
lin_vel_y=(-1.0, 1.0),
- ang_vel_z=(-0.5, 0.5),
+ ang_vel_z=(-1.0, 1.0),
heading=(-math.pi, math.pi),
),
)
@@ -395,17 +397,6 @@ def make_velocity_env_cfg() -> ManagerBasedRlEnvCfg:
func=mdp.terrain_levels_vel,
params={"command_name": "twist"},
),
- "command_vel": CurriculumTermCfg(
- func=mdp.commands_vel,
- params={
- "command_name": "twist",
- "velocity_stages": [
- {"step": 0, "lin_vel_x": (-1.0, 1.0), "ang_vel_z": (-0.5, 0.5)},
- {"step": 5000 * 24, "lin_vel_x": (-1.5, 2.0), "ang_vel_z": (-0.7, 0.7)},
- {"step": 10000 * 24, "lin_vel_x": (-2.0, 3.0)},
- ],
- },
- ),
}
##
@@ -440,8 +431,6 @@ def make_velocity_env_cfg() -> ManagerBasedRlEnvCfg:
azimuth=90.0,
),
sim=SimulationCfg(
- nconmax=35,
- njmax=1500,
mujoco=MujocoCfg(
timestep=0.005,
iterations=10,
diff --git a/tests/test_height_sensor_accuracy.py b/tests/test_height_sensor_accuracy.py
new file mode 100644
index 000000000..1f56e47b0
--- /dev/null
+++ b/tests/test_height_sensor_accuracy.py
@@ -0,0 +1,199 @@
+"""Verify TerrainHeightSensor.data.heights matches analytic expectations.
+
+Two key properties:
+1. On flat terrain, heights must equal site_pos_w Z (terrain at z=0).
+2. On stepped terrain, heights must equal site_z - step_z.
+"""
+
+from __future__ import annotations
+
+import pytest
+import torch
+from conftest import get_test_device, make_scene_and_sim
+
+from mjlab.sensor import ObjRef, RingPatternCfg, TerrainHeightSensorCfg
+from mjlab.sensor.terrain_height_sensor import TerrainHeightSensor
+
+# Single foot floating above a flat ground plane at z=0.
+FLAT_TERRAIN_XML = """
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+# Step at z=0.3 for x<0, flat ground at z=0 for x>=0.
+# Body at x=0, z=1.0.
+# left_foot at x=-0.5 (over step), right_foot at x=0.5 (over ground).
+STEPPED_TERRAIN_XML = """
+
+
+
+
+
+
+
+
+
+
+
+
+"""
+
+
+def _sensor_cfg(reduction: str = "mean") -> TerrainHeightSensorCfg:
+ return TerrainHeightSensorCfg(
+ name="foot_height_scan",
+ frame=(
+ ObjRef(type="site", name="left_foot", entity="robot"),
+ ObjRef(type="site", name="right_foot", entity="robot"),
+ ),
+ ray_alignment="yaw",
+ pattern=RingPatternCfg.single_ring(radius=0.03, num_samples=6),
+ max_distance=2.0,
+ exclude_parent_body=True,
+ include_geom_groups=(0,),
+ reduction=reduction,
+ )
+
+
+@pytest.fixture(scope="module")
+def device():
+ return get_test_device()
+
+
+def test_flat_terrain_heights_match_site_z(device):
+ """On flat terrain (z=0), sensor heights must equal site_pos_w Z."""
+ cfg = _sensor_cfg()
+ scene, sim = make_scene_and_sim(device, FLAT_TERRAIN_XML, (cfg,))
+ entity = scene["robot"]
+ sensor: TerrainHeightSensor = scene["foot_height_scan"]
+
+ sim.step()
+ sim.forward()
+ sim.sense()
+
+ site_z = entity.data.site_pos_w[0, :, 2] # [num_sites]
+ sensor_heights = sensor.data.heights[0] # [num_frames]
+
+ assert site_z.shape == sensor_heights.shape, (
+ f"Shape mismatch: site_z {site_z.shape} vs sensor {sensor_heights.shape}"
+ )
+ torch.testing.assert_close(
+ sensor_heights,
+ site_z,
+ atol=1e-3,
+ rtol=0,
+ msg="Sensor heights diverge from site_pos_w Z on flat terrain",
+ )
+
+
+def test_flat_terrain_heights_match_site_z_after_motion(device):
+ """After several steps of free-fall, sensor heights still match site Z."""
+ cfg = _sensor_cfg()
+ scene, sim = make_scene_and_sim(device, FLAT_TERRAIN_XML, (cfg,))
+ entity = scene["robot"]
+ sensor: TerrainHeightSensor = scene["foot_height_scan"]
+
+ # Let the body fall for a few steps.
+ for _ in range(10):
+ sim.step()
+ sim.forward()
+ sim.sense()
+
+ site_z = entity.data.site_pos_w[0, :, 2]
+ sensor_heights = sensor.data.heights[0]
+
+ torch.testing.assert_close(
+ sensor_heights,
+ site_z,
+ atol=1e-3,
+ rtol=0,
+ msg="Sensor heights diverge from site Z after motion on flat terrain",
+ )
+
+
+def test_stepped_terrain_analytic_heights(device):
+ """On stepped terrain, heights must equal site_z - terrain_z."""
+ cfg = _sensor_cfg()
+ scene, sim = make_scene_and_sim(device, STEPPED_TERRAIN_XML, (cfg,))
+ sensor: TerrainHeightSensor = scene["foot_height_scan"]
+
+ sim.step()
+ sim.forward()
+ sim.sense()
+
+ heights = sensor.data.heights[0]
+
+ # left_foot at (-0.5, 0, 1.0), over step top at z=0.3 -> height = 0.7.
+ assert heights[0].item() == pytest.approx(0.7, abs=0.05)
+ # right_foot at (0.5, 0, 1.0), over ground at z=0 -> height = 1.0.
+ assert heights[1].item() == pytest.approx(1.0, abs=0.05)
+
+
+def test_reduction_min_vs_mean_on_flat(device):
+ """On flat terrain, min and mean reduction should give identical results."""
+ cfg_min = _sensor_cfg(reduction="min")
+ cfg_min.name = "foot_min"
+ cfg_mean = _sensor_cfg(reduction="mean")
+ cfg_mean.name = "foot_mean"
+
+ scene, sim = make_scene_and_sim(device, FLAT_TERRAIN_XML, (cfg_min, cfg_mean))
+ sim.step()
+ sim.forward()
+ sim.sense()
+
+ sensor_min: TerrainHeightSensor = scene["foot_min"]
+ sensor_mean: TerrainHeightSensor = scene["foot_mean"]
+
+ torch.testing.assert_close(
+ sensor_min.data.heights,
+ sensor_mean.data.heights,
+ atol=1e-3,
+ rtol=0,
+ msg="Min and mean reduction diverge on flat terrain",
+ )
+
+
+def test_foot_below_ground_plane(device):
+ """When foot penetrates ground (site Z < 0), sensor should not report max_distance."""
+ # Foot site at z=0.01 (just barely above ground) and z=-0.01 (slightly below).
+ below_ground_xml = """
+
+
+
+
+
+
+
+
+
+
+
+ """
+ # left_foot at z=0.01 (above ground), right_foot at z=-0.01 (below ground).
+ cfg = _sensor_cfg()
+ scene, sim = make_scene_and_sim(device, below_ground_xml, (cfg,))
+ sim.step()
+ sim.forward()
+ sim.sense()
+
+ sensor: TerrainHeightSensor = scene["foot_height_scan"]
+ heights = sensor.data.heights[0]
+
+ # Left foot (above ground) should report ~0.01.
+ assert heights[0].item() < 0.1, (
+ f"Left foot above ground should report small height, got {heights[0].item()}"
+ )
+ # Right foot (below ground) should NOT report max_distance.
+ assert heights[1].item() < 0.5, (
+ f"Right foot below ground reports {heights[1].item()}, "
+ f"likely max_distance={cfg.max_distance} due to ray miss"
+ )