|
17 | 17 | import omni.isaac.lab.utils.math as math_utils |
18 | 18 | from omni.isaac.lab.assets import Articulation, RigidObject |
19 | 19 | from omni.isaac.lab.managers import SceneEntityCfg |
| 20 | +from omni.isaac.lab.managers.manager_base import ManagerTermBase |
| 21 | +from omni.isaac.lab.managers.manager_term_cfg import ObservationTermCfg |
20 | 22 | from omni.isaac.lab.sensors import Camera, Imu, RayCaster, RayCasterCamera, TiledCamera |
21 | 23 |
|
22 | 24 | if TYPE_CHECKING: |
23 | 25 | from omni.isaac.lab.envs import ManagerBasedEnv, ManagerBasedRLEnv |
24 | 26 |
|
| 27 | + |
25 | 28 | """ |
26 | 29 | Root state. |
27 | 30 | """ |
@@ -273,6 +276,134 @@ def image( |
273 | 276 | return images.clone() |
274 | 277 |
|
275 | 278 |
|
| 279 | +class image_features(ManagerTermBase): |
| 280 | + """Extracted image features from a pre-trained frozen encoder. |
| 281 | +
|
| 282 | + This method calls the :meth:`image` function to retrieve images, and then performs |
| 283 | + inference on those images. |
| 284 | + """ |
| 285 | + |
| 286 | + def __init__(self, cfg: ObservationTermCfg, env: ManagerBasedEnv): |
| 287 | + super().__init__(cfg, env) |
| 288 | + from torchvision import models |
| 289 | + from transformers import AutoModel |
| 290 | + |
| 291 | + def create_theia_model(model_name): |
| 292 | + return { |
| 293 | + "model": ( |
| 294 | + lambda: AutoModel.from_pretrained(f"theaiinstitute/{model_name}", trust_remote_code=True) |
| 295 | + .eval() |
| 296 | + .to("cuda:0") |
| 297 | + ), |
| 298 | + "preprocess": lambda img: (img - torch.amin(img, dim=(1, 2), keepdim=True)) / ( |
| 299 | + torch.amax(img, dim=(1, 2), keepdim=True) - torch.amin(img, dim=(1, 2), keepdim=True) |
| 300 | + ), |
| 301 | + "inference": lambda model, images: model.forward_feature( |
| 302 | + images, do_rescale=False, interpolate_pos_encoding=True |
| 303 | + ), |
| 304 | + } |
| 305 | + |
| 306 | + def create_resnet_model(resnet_name): |
| 307 | + return { |
| 308 | + "model": lambda: getattr(models, resnet_name)(pretrained=True).eval().to("cuda:0"), |
| 309 | + "preprocess": lambda img: ( |
| 310 | + img.permute(0, 3, 1, 2) # Convert [batch, height, width, 3] -> [batch, 3, height, width] |
| 311 | + - torch.tensor([0.485, 0.456, 0.406], device=img.device).view(1, 3, 1, 1) |
| 312 | + ) / torch.tensor([0.229, 0.224, 0.225], device=img.device).view(1, 3, 1, 1), |
| 313 | + "inference": lambda model, images: model(images), |
| 314 | + } |
| 315 | + |
| 316 | + # List of Theia models |
| 317 | + theia_models = [ |
| 318 | + "theia-tiny-patch16-224-cddsv", |
| 319 | + "theia-tiny-patch16-224-cdiv", |
| 320 | + "theia-small-patch16-224-cdiv", |
| 321 | + "theia-base-patch16-224-cdiv", |
| 322 | + "theia-small-patch16-224-cddsv", |
| 323 | + "theia-base-patch16-224-cddsv", |
| 324 | + ] |
| 325 | + |
| 326 | + # List of ResNet models |
| 327 | + resnet_models = ["resnet18", "resnet34", "resnet50", "resnet101"] |
| 328 | + |
| 329 | + self.default_model_zoo_cfg = {} |
| 330 | + |
| 331 | + # Add Theia models to the zoo |
| 332 | + for model_name in theia_models: |
| 333 | + self.default_model_zoo_cfg[model_name] = create_theia_model(model_name) |
| 334 | + |
| 335 | + # Add ResNet models to the zoo |
| 336 | + for resnet_name in resnet_models: |
| 337 | + self.default_model_zoo_cfg[resnet_name] = create_resnet_model(resnet_name) |
| 338 | + |
| 339 | + self.model_zoo_cfg = self.default_model_zoo_cfg |
| 340 | + self.model_zoo = {} |
| 341 | + |
| 342 | + def __call__( |
| 343 | + self, |
| 344 | + env: ManagerBasedEnv, |
| 345 | + sensor_cfg: SceneEntityCfg = SceneEntityCfg("tiled_camera"), |
| 346 | + data_type: str = "rgb", |
| 347 | + convert_perspective_to_orthogonal: bool = False, |
| 348 | + model_zoo_cfg: dict | None = None, |
| 349 | + model_name: str = "ResNet18", |
| 350 | + model_device: str | None = "cuda:0", |
| 351 | + reset_model: bool = False, |
| 352 | + ) -> torch.Tensor: |
| 353 | + """Extracted image features from a pre-trained frozen encoder. |
| 354 | +
|
| 355 | + Args: |
| 356 | + env: The environment. |
| 357 | + sensor_cfg: The sensor configuration to poll. Defaults to SceneEntityCfg("tiled_camera"). |
| 358 | + data_type: THe sensor configuration datatype. Defaults to "rgb". |
| 359 | + convert_perspective_to_orthogonal: Whether to orthogonalize perspective depth images. |
| 360 | + This is used only when the data type is "distance_to_camera". Defaults to False. |
| 361 | + model_zoo_cfg: Map from model name to model configuration dictionary. Each model |
| 362 | + configuration dictionary should include the following entries: |
| 363 | + - "model": A callable that returns the model when invoked without arguments. |
| 364 | + - "preprocess": A callable that processes the images and returns the preprocessed results. |
| 365 | + - "inference": A callable that, when given the model and preprocessed images, |
| 366 | + returns the extracted features. |
| 367 | + model_name: The name of the model to use for inference. Defaults to "ResNet18". |
| 368 | + model_device: The device to store and infer models on. This can be used help offload |
| 369 | + computation from the main environment GPU. Defaults to "cuda:0". |
| 370 | + reset_model: Initialize the model even if it already exists. Defaults to False. |
| 371 | +
|
| 372 | + Returns: |
| 373 | + torch.Tensor: the image features, on the same device as the image |
| 374 | + """ |
| 375 | + if model_zoo_cfg is not None: # use other than default |
| 376 | + self.model_zoo_cfg.update(model_zoo_cfg) |
| 377 | + |
| 378 | + if model_name not in self.model_zoo or reset_model: |
| 379 | + # The following allows to only load a desired subset of a model zoo into GPU memory |
| 380 | + # as it becomes needed, in a "lazy" evaluation. |
| 381 | + print(f"[INFO]: Adding {model_name} to the model zoo") |
| 382 | + self.model_zoo[model_name] = self.model_zoo_cfg[model_name]["model"]() |
| 383 | + |
| 384 | + if model_device is not None and self.model_zoo[model_name].device != model_device: |
| 385 | + # want to offload vision model inference to another device |
| 386 | + self.model_zoo[model_name] = self.model_zoo[model_name].to(model_device) |
| 387 | + |
| 388 | + images = image( |
| 389 | + env=env, |
| 390 | + sensor_cfg=sensor_cfg, |
| 391 | + data_type=data_type, |
| 392 | + convert_perspective_to_orthogonal=convert_perspective_to_orthogonal, |
| 393 | + normalize=True, # want this for training stability |
| 394 | + ) |
| 395 | + |
| 396 | + image_device = images.device |
| 397 | + |
| 398 | + if model_device is not None: |
| 399 | + images = images.to(model_device) |
| 400 | + |
| 401 | + proc_images = self.model_zoo_cfg[model_name]["preprocess"](images) |
| 402 | + features = self.model_zoo_cfg[model_name]["inference"](self.model_zoo[model_name], proc_images) |
| 403 | + |
| 404 | + return features.to(image_device).clone() |
| 405 | + |
| 406 | + |
276 | 407 | """ |
277 | 408 | Actions. |
278 | 409 | """ |
|
0 commit comments