Skip to content

Commit

Permalink
Merge branch 'og-develop' into mesh-scaling-v2
Browse files Browse the repository at this point in the history
  • Loading branch information
ChengshuLi committed Mar 2, 2024
2 parents d268c32 + 0e1b96f commit 772b27b
Show file tree
Hide file tree
Showing 22 changed files with 415 additions and 252 deletions.
41 changes: 27 additions & 14 deletions omnigibson/envs/env_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,29 +419,34 @@ def get_obs(self):
Get the current environment observation.
Returns:
dict: Keyword-mapped observations, which are possibly nested
2-tuple:
dict: Keyword-mapped observations, which are possibly nested
dict: Additional information about the observations
"""
obs = dict()
info = dict()

# Grab all observations from each robot
for robot in self.robots:
obs[robot.name] = robot.get_obs()
obs[robot.name], info[robot.name] = robot.get_obs()

# Add task observations
obs["task"] = self._task.get_obs(env=self)

# Add external sensor observations if they exist
if self._external_sensors is not None:
external_obs = dict()
external_info = dict()
for sensor_name, sensor in self._external_sensors.items():
external_obs[sensor_name] = sensor.get_obs()
external_obs[sensor_name], external_info[sensor_name] = sensor.get_obs()
obs["external"] = external_obs
info["external"] = external_info

# Possibly flatten obs if requested
if self._flatten_obs_space:
obs = recursively_generate_flat_dict(dic=obs)

return obs
return obs, info

def get_scene_graph(self):
"""
Expand Down Expand Up @@ -506,7 +511,7 @@ def step(self, action):
og.sim.step()

# Grab observations
obs = self.get_obs()
obs, obs_info = self.get_obs()

# Step the scene graph builder if necessary
if self._scene_graph_builder is not None:
Expand All @@ -515,6 +520,7 @@ def step(self, action):
# Grab reward, done, and info, and populate with internal info
reward, done, info = self.task.step(self, action)
self._populate_info(info)
info["obs_info"] = obs_info

if done and self._automatic_reset:
# Add lost observation to our information dict, and reset
Expand Down Expand Up @@ -550,7 +556,7 @@ def reset(self):
og.sim.step()

# Grab and return observations
obs = self.get_obs()
obs, obs_info = self.get_obs()

if self._loaded:
# Sanity check to make sure received observations match expected observation space
Expand All @@ -572,18 +578,25 @@ def reset(self):
missing_keys = exp_keys - real_keys
extra_keys = real_keys - exp_keys

log.error("MISSING OBSERVATION KEYS:")
log.error(missing_keys)
log.error("EXTRA OBSERVATION KEYS:")
log.error(extra_keys)
log.error("SHARED OBSERVATION KEY DTYPES AND SHAPES:")
if missing_keys:
log.error("MISSING OBSERVATION KEYS:")
log.error(missing_keys)
if extra_keys:
log.error("EXTRA OBSERVATION KEYS:")
log.error(extra_keys)

mismatched_keys = []
for k in shared_keys:
log.error(exp_obs[k])
log.error(real_obs[k])
if exp_obs[k][2:] != real_obs[k][2:]: # Compare dtypes and shapes
mismatched_keys.append(k)
log.error(f"MISMATCHED OBSERVATION FOR KEY '{k}':")
log.error(f"Expected: {exp_obs[k]}")
log.error(f"Received: {real_obs[k]}")

raise ValueError("Observation space does not match returned observations!")

return obs

return obs, obs_info

@property
def episode_steps(self):
Expand Down
7 changes: 3 additions & 4 deletions omnigibson/examples/objects/draw_bounding_box.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy


def main(random_selection=False, headless=False, short_exec=False):
Expand Down Expand Up @@ -67,22 +66,22 @@ def main(random_selection=False, headless=False, short_exec=False):
env.step(np.array([]))

# Grab observations from viewer camera and write them to disk
obs = cam.get_obs()
obs, _ = cam.get_obs()

for bbox_modality in bbox_modalities:
# Print out each of the modalities
og.log.info(f"Observation modality {bbox_modality}:\n{obs[bbox_modality]}")

# Also write the 2d loose bounding box to disk
if "3d" not in bbox_modality:
colorized_img = lazy.omni.isaac.synthetic_utils.visualization.colorize_bboxes(bboxes_2d_data=obs[bbox_modality], bboxes_2d_rgb=obs["rgb"], num_channels=4)
from omnigibson.utils.deprecated_utils import colorize_bboxes
colorized_img = colorize_bboxes(bboxes_2d_data=obs[bbox_modality], bboxes_2d_rgb=obs["rgb"], num_channels=4)
fpath = f"{bbox_modality}_img.png"
plt.imsave(fpath, colorized_img)
og.log.info(f"Saving modality [{bbox_modality}] image to: {fpath}")

# Always close environment down at end
env.close()


if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion omnigibson/object_states/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from omnigibson.object_states.particle_modifier import ParticleRemover, ParticleApplier
from omnigibson.object_states.particle_source_or_sink import ParticleSource, ParticleSink
from omnigibson.object_states.pose import Pose
from omnigibson.object_states.robot_related_states import IsGrasping
from omnigibson.object_states.robot_related_states import IsGrasping, ObjectsInFOVOfRobot
from omnigibson.object_states.saturated import Saturated
from omnigibson.object_states.slicer_active import SlicerActive
from omnigibson.object_states.temperature import Temperature
Expand Down
2 changes: 1 addition & 1 deletion omnigibson/object_states/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from omnigibson.object_states import *

_ABILITY_TO_STATE_MAPPING = {
"robot": [IsGrasping],
"robot": [IsGrasping, ObjectsInFOVOfRobot],
"attachable": [AttachedTo],
"particleApplier": [ParticleApplier],
"particleRemover": [ParticleRemover],
Expand Down
28 changes: 18 additions & 10 deletions omnigibson/object_states/robot_related_states.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import numpy as np
import omnigibson as og
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin, RelativeObjectState
from omnigibson.sensors import VisionSensor


_IN_REACH_DISTANCE_THRESHOLD = 2.0
Expand Down Expand Up @@ -50,13 +51,20 @@ def _get_value(self, obj):
# return not body_ids.isdisjoint(robot.states[ObjectsInFOVOfRobot].get_value())


# class ObjectsInFOVOfRobot(AbsoluteObjectState):
# def _get_value(self):
# # Pass the FOV through the instance-to-body ID mapping.
# seg = self.simulator.renderer.render_single_robot_camera(self.obj, modes="ins_seg")[0][:, :, 0]
# seg = np.round(seg * MAX_INSTANCE_COUNT).astype(int)
# body_ids = self.simulator.renderer.get_pb_ids_for_instance_ids(seg)

# # Pixels that don't contain an object are marked -1 but we don't want to include that
# # as a body ID.
# return set(np.unique(body_ids)) - {-1}
class ObjectsInFOVOfRobot(AbsoluteObjectState, RobotStateMixin):
def _get_value(self):
"""
Gets all objects in the robot's field of view.
Returns:
list: List of objects in the robot's field of view
"""
if not any(isinstance(sensor, VisionSensor) for sensor in self.robot.sensors.values()):
raise ValueError("No vision sensors found on robot.")
obj_names = []
names_to_exclude = set(['background', 'unlabelled'])
for sensor in self.robot.sensors.values():
if isinstance(sensor, VisionSensor):
_, info = sensor.get_obs()
obj_names.extend([name for name in info['seg_instance'].values() if name not in names_to_exclude])
return [x for x in [og.sim.scene.object_registry("name", x) for x in obj_names] if x is not None]
6 changes: 2 additions & 4 deletions omnigibson/objects/object_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,10 @@
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros, gm
from omnigibson.utils.constants import SemanticClass
from omnigibson.utils.usd_utils import create_joint, CollisionAPI
from omnigibson.prims.entity_prim import EntityPrim
from omnigibson.utils.python_utils import Registerable, classproperty, get_uuid
from omnigibson.utils.constants import PrimType, CLASS_NAME_TO_CLASS_ID
from omnigibson.utils.constants import PrimType, semantic_class_name_to_id
from omnigibson.utils.ui_utils import create_module_logger, suppress_omni_log


Expand Down Expand Up @@ -86,7 +85,7 @@ def __init__(

# Infer class ID if not specified
if class_id is None:
class_id = CLASS_NAME_TO_CLASS_ID.get(category, SemanticClass.USER_ADDED_OBJS)
class_id = semantic_class_name_to_id()[category]
self.class_id = class_id

# Values to be created at runtime
Expand Down Expand Up @@ -191,7 +190,6 @@ def _post_load(self):
lazy.pxr.PhysxSchema.PhysxArticulationAPI.Apply(root_prim)
self.self_collisions = self._load_config["self_collisions"]

# Update semantics
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=self._prim,
semantic_label=self.category,
Expand Down
21 changes: 13 additions & 8 deletions omnigibson/robots/robot_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,28 +293,33 @@ def get_obs(self):
(e.g.: proprio, rgb, etc.)
Returns:
dict: Keyword-mapped dictionary mapping observation modality names to
observations (usually np arrays)
2-tuple:
dict: Keyword-mapped dictionary mapping observation modality names to
observations (usually np arrays)
dict: Keyword-mapped dictionary mapping observation modality names to
additional info
"""
# Our sensors already know what observation modalities it has, so we simply iterate over all of them
# and grab their observations, processing them into a flat dict
obs_dict = dict()
info_dict = dict()
for sensor_name, sensor in self._sensors.items():
obs_dict[sensor_name] = sensor.get_obs()
obs_dict[sensor_name], info_dict[sensor_name] = sensor.get_obs()

# Have to handle proprio separately since it's not an actual sensor
if "proprio" in self._obs_modalities:
obs_dict["proprio"] = self.get_proprioception()
obs_dict["proprio"], info_dict["proprio"] = self.get_proprioception()

return obs_dict
return obs_dict, info_dict

def get_proprioception(self):
"""
Returns:
n-array: numpy array of all robot-specific proprioceptive observations.
dict: empty dictionary, a placeholder for additional info
"""
proprio_dict = self._get_proprioception_dict()
return np.concatenate([proprio_dict[obs] for obs in self._proprio_obs])
return np.concatenate([proprio_dict[obs] for obs in self._proprio_obs]), {}

def _get_proprioception_dict(self):
"""
Expand Down Expand Up @@ -391,7 +396,7 @@ def visualize_sensors(self):
frames = dict()
remaining_obs_modalities = deepcopy(self.obs_modalities)
for sensor in self.sensors.values():
obs = sensor.get_obs()
obs, _ = sensor.get_obs()
sensor_frames = []
if isinstance(sensor, VisionSensor):
# We check for rgb, depth, normal, seg_instance
Expand Down Expand Up @@ -515,7 +520,7 @@ def proprioception_dim(self):
Returns:
int: Size of self.get_proprioception() vector
"""
return len(self.get_proprioception())
return len(self.get_proprioception()[0])

@property
def _default_sensor_config(self):
Expand Down
2 changes: 1 addition & 1 deletion omnigibson/scene_graphs/graph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def _draw_graph():
# Prepare pyplot figure that's sized to match the robot video.
robot = scene.robots[0]
robot_camera_sensor, = [s for s in robot.sensors.values() if isinstance(s, VisionSensor) and "rgb" in s.modalities]
robot_view = (robot_camera_sensor.get_obs()["rgb"][..., :3]).astype(np.uint8)
robot_view = (robot_camera_sensor.get_obs()[0]["rgb"][..., :3]).astype(np.uint8)
imgheight, imgwidth, _ = robot_view.shape

figheight = 4.8
Expand Down
1 change: 1 addition & 0 deletions omnigibson/scenes/scene_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,7 @@ def _load(self):
self._skybox = LightObject(
prim_path="/World/skybox",
name="skybox",
category="background",
light_type="Dome",
intensity=1500,
fixed_base=True,
Expand Down
4 changes: 2 additions & 2 deletions omnigibson/sensors/scan_sensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ def get_local_occupancy_grid(self, scan):

def _get_obs(self):
# Run super first to grab any upstream obs
obs = super()._get_obs()
obs, info = super()._get_obs()

# Add scan info (normalized to [0.0, 1.0])
if "scan" in self._modalities:
Expand All @@ -238,7 +238,7 @@ def _get_obs(self):
if "occupancy_grid" in self._modalities:
obs["occupancy_grid"] = self.get_local_occupancy_grid(scan=obs["scan"])

return obs
return obs, info

@property
def n_horizontal_rays(self):
Expand Down
10 changes: 6 additions & 4 deletions omnigibson/sensors/sensor_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,24 +73,26 @@ def get_obs(self):
if not self._enabled:
return dict()

obs = self._get_obs()
obs, info = self._get_obs()

if self._noise is not None:
for k, v in obs.items():
if k not in self.no_noise_modalities:
obs[k] = self._noise(v)

return obs
return obs, info

def _get_obs(self):
"""
Get sensor reading. Should generally be extended by subclass.
Returns:
dict: Keyword-mapped observations mapping modality names to numpy arrays of arbitrary dimension
2-tuple:
dict: Keyword-mapped observations mapping modality names to numpy arrays of arbitrary dimension
dict: Additional information about the observations.
"""
# Default is returning an empty dict
return dict()
return dict(), dict()

def _load_observation_space(self):
# Fill in observation space based on mapping and active modalities
Expand Down
Loading

0 comments on commit 772b27b

Please sign in to comment.