Skip to content

Commit

Permalink
🎮 Simulator supports seg and depth real-time camera rendering now
Browse files Browse the repository at this point in the history
Check `examples/simulator/example_object_show.py`
  • Loading branch information
Skylark0924 committed Dec 2, 2023
1 parent ad6df8b commit 21e5990
Show file tree
Hide file tree
Showing 5 changed files with 140 additions and 12 deletions.
12 changes: 4 additions & 8 deletions examples/simulator/example_object_show.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,14 @@
asset_files = ["urdf/ycb/002_master_chef_can/002_master_chef_can.urdf",
"urdf/ycb/003_cracker_box/003_cracker_box.urdf",
"urdf/ycb/004_sugar_box/004_sugar_box.urdf",
# "urdf/ycb/005_tomato_soup_can/005_tomato_soup_can.urdf",
# "urdf/ycb/006_mustard_bottle/006_mustard_bottle.urdf",
"urdf/ycb/005_tomato_soup_can/005_tomato_soup_can.urdf",
"urdf/ycb/006_mustard_bottle/006_mustard_bottle.urdf",
"urdf/ycb/007_tuna_fish_can/007_tuna_fish_can.urdf",
"urdf/ycb/008_pudding_box/008_pudding_box.urdf",
"urdf/ycb/009_gelatin_box/009_gelatin_box.urdf",
"urdf/ycb/010_potted_meat_can/010_potted_meat_can.urdf",
"urdf/ycb/011_banana/011_banana.urdf",
]
# asset_files = "urdf/ycb/010/011_banana.urdf"
# asset_files = "urdf/ycb/011_banana/011_banana.urdf"
# asset_files = ["urdf/ycb/010/010_potted_meat_can.urdf", "urdf/ycb/010_potted_meat_can/010_potted_meat_can.urdf"]
# asset_files = "urdf/ycb/013_apple/013_apple.urdf"
# asset_files = ["urdf/ycb/025_mug/025_mug.urdf", "urdf/ycb/025_mug/025_mug.urdf"]
object_sim = rf.sim.ObjectSim(args, asset_file=asset_files)
object_sim.show()
object_sim.create_track_cameras()
object_sim.show(mode="seg")
2 changes: 1 addition & 1 deletion rofunc/simulator/base_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import os
from typing import List

import math
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image as Im
Expand Down
80 changes: 78 additions & 2 deletions rofunc/simulator/object_sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,13 @@

import math
import os.path

import matplotlib.pyplot as plt
import numpy as np

from rofunc.simulator.base_sim import PlaygroundSim
from rofunc.utils.logger.beauty_logger import beauty_print
from rofunc.utils.visualab.image import overlay_seg_w_img


class ObjectSim:
Expand All @@ -42,6 +45,7 @@ def __init__(self, args, asset_file, asset_root=None):
self.viewer = self.PlaygroundSim.viewer

self.num_envs = 1
self.visual_obs_flag = False
self.create_env()

def create_env(self):
Expand Down Expand Up @@ -72,7 +76,7 @@ def create_env(self):

if isinstance(self.asset_file, list):
for i, asset_file in enumerate(self.asset_file):
self.create_object_single(asset_file, i)
self.create_object_single(asset_file, i + 1)
elif isinstance(self.asset_file, str):
self.create_object_single(self.asset_file)

Expand Down Expand Up @@ -105,9 +109,16 @@ def create_object_single(self, asset_file, index=1):
env = self.envs[i]
# add robot
object_handle = self.gym.create_actor(env, object_asset, pose, object_name, i, 0)
self.gym.set_rigid_body_segmentation_id(env, object_handle, 0, index)
object_handles.append(object_handle)

def show(self):
def show(self, mode="rgb"):
"""
Show the simulation
:param mode: visual mode, can be "rgb", "depth", "seg"
:return:
"""
from isaacgym import gymapi

# create a local copy of initial state, which we can send back for reset
Expand All @@ -116,6 +127,9 @@ def show(self):
# subscribe to R event for reset
self.gym.subscribe_viewer_keyboard_event(self.viewer, gymapi.KEY_R, "reset")

if self.visual_obs_flag:
fig = plt.figure("RGB", figsize=(16, 8))

while not self.gym.query_viewer_has_closed(self.viewer):
# Get input actions from the viewer and handle them appropriately
for evt in self.gym.query_viewer_action_events(self.viewer):
Expand All @@ -126,6 +140,45 @@ def show(self):
self.gym.simulate(self.sim)
self.gym.fetch_results(self.sim, True)

if self.visual_obs_flag:
# digest image
self.gym.render_all_camera_sensors(self.sim)
self.gym.start_access_image_tensors(self.sim)

cam_img0 = self.gym.get_camera_image(self.sim, self.envs[0], self.camera_handles[0],
gymapi.IMAGE_COLOR).reshape(1280, 1280, 4)
cam_img0_depth = self.gym.get_camera_image(self.sim, self.envs[0], self.camera_handles[0],
gymapi.IMAGE_DEPTH).reshape(1280, 1280)
cam_img0_seg = self.gym.get_camera_image(self.sim, self.envs[0], self.camera_handles[0],
gymapi.IMAGE_SEGMENTATION).reshape(1280, 1280)
cam_img1 = self.gym.get_camera_image(self.sim, self.envs[0], self.camera_handles[1],
gymapi.IMAGE_COLOR).reshape(1280, 1280, 4)
cam_img1_depth = self.gym.get_camera_image(self.sim, self.envs[0], self.camera_handles[1],
gymapi.IMAGE_DEPTH).reshape(1280, 1280)
cam_img1_seg = self.gym.get_camera_image(self.sim, self.envs[0], self.camera_handles[1],
gymapi.IMAGE_SEGMENTATION).reshape(1280, 1280)

if mode == "rgb":
cam_img = np.concatenate((cam_img0[:, :, :3], cam_img1[:, :, :3]), axis=1)
plt.imshow(cam_img)
elif mode == "depth":
cam_img_depth = np.concatenate((cam_img0_depth, cam_img1_depth), axis=1)
cam_img_depth = cam_img_depth / np.abs(cam_img_depth).max() * 255
plt.imshow(cam_img_depth, 'gray')
elif mode == "seg":
cam_img = np.concatenate((cam_img0[:, :, :3], cam_img1[:, :, :3]), axis=1)
cam_img_seg = np.concatenate((cam_img0_seg, cam_img1_seg), axis=1)
image_with_masks = overlay_seg_w_img(cam_img, cam_img_seg, alpha=0.5)
plt.imshow(image_with_masks)
else:
raise NotImplementedError

plt.axis('off')
plt.pause(1e-9)
fig.clf()

self.gym.end_access_image_tensors(self.sim)

# Step rendering
self.gym.step_graphics(self.sim)
self.gym.draw_viewer(self.viewer, self.sim, False)
Expand All @@ -137,3 +190,26 @@ def show(self):
beauty_print("Done")
self.gym.destroy_viewer(self.viewer)
self.gym.destroy_sim(self.sim)

def create_track_cameras(self):
from isaacgym import gymapi

# track cameras
camera_props = gymapi.CameraProperties()
camera_props.enable_tensors = True
camera_props.width = 1280
camera_props.height = 1280

for env_idx in range(self.num_envs):
env_ptr = self.envs[env_idx]
camera_handle0 = self.gym.create_camera_sensor(env_ptr, camera_props)
self.gym.set_camera_location(camera_handle0, env_ptr, gymapi.Vec3(0.5, -0.8, 1.5), gymapi.Vec3(0, 0, 0))

for env_idx in range(self.num_envs):
env_ptr = self.envs[env_idx]
camera_handle1 = self.gym.create_camera_sensor(env_ptr, camera_props)
self.gym.set_camera_location(camera_handle1, env_ptr, gymapi.Vec3(0.5, 0.8, 1.5), gymapi.Vec3(0, 0, 0))

self.gym.render_all_camera_sensors(self.sim)
self.visual_obs_flag = True
self.camera_handles = [camera_handle0, camera_handle1]
2 changes: 1 addition & 1 deletion rofunc/simulator/utils/ycb2urdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def export_urdf(mesh,
mass = np.maximum(YCB_MASS[piece_name], minimum_mass)
et.SubElement(inertial, 'inertia', ixx='0.0001', ixy='0.0', ixz='0.0',
iyy='0.0001', iyz='0.0', izz='0.0001')
et.SubElement(inertial, 'mass', value='{:.2E}'.format(mass))
et.SubElement(inertial, 'mass', value='{}'.format(mass))

# Visual Information
visual = et.SubElement(link, 'visual')
Expand Down
56 changes: 56 additions & 0 deletions rofunc/utils/visualab/image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import cv2
import numpy as np

color_lst = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (72, 209, 204), (238, 130, 238), (221, 160, 221), (139, 69, 19),
(218, 112, 214), (210, 105, 30), (188, 143, 143), (119, 136, 153), (153, 50, 204), (106, 90, 205),
(135, 206, 250), (70, 130, 180), (138, 43, 226)]


def overlay_seg_w_img(image_rgb, image_seg, alpha=0.5):
"""
:param image_rgb: WxHx3, np.ndarray
:param image_seg: WxH, np.ndarray
:param alpha: Segmentation mask's transparency. float = 0.5,
:return:
"""

def overlay(image, mask, color, alpha, resize=None):
"""Combines image and its segmentation mask into a single image.
https://www.kaggle.com/code/purplejester/showing-samples-with-segmentation-mask-overlay
Params:
image: Training image. np.ndarray,
mask: Segmentation mask. np.ndarray,
color: Color for segmentation mask rendering. tuple[int, int, int] = (255, 0, 0)
alpha: Segmentation mask's transparency. float = 0.5,
resize: If provided, both image and its mask are resized before blending them together.
tuple[int, int] = (1024, 1024))
Returns:
image_combined: The combined image. np.ndarray
"""
color = color[::-1]
colored_mask = np.expand_dims(mask, 0).repeat(3, axis=0)
colored_mask = np.moveaxis(colored_mask, 0, -1)
masked = np.ma.MaskedArray(image, mask=colored_mask, fill_value=color)
image_overlay = masked.filled()

if resize is not None:
image = cv2.resize(image.transpose(1, 2, 0), resize)
image_overlay = cv2.resize(image_overlay.transpose(1, 2, 0), resize)

image_combined = cv2.addWeighted(image, 1 - alpha, image_overlay, alpha, 0)

return image_combined

image_with_masks = np.copy(image_rgb)
masks = np.unique(image_seg)

for mask_i in masks:
if mask_i == 0:
continue
mask = np.where(image_seg == mask_i, 1, 0)
image_with_masks = overlay(image_with_masks, mask, color=color_lst[mask_i], alpha=alpha)
return image_with_masks

0 comments on commit 21e5990

Please sign in to comment.