file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/allegro_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.allegro_hand import AllegroHand
from omniisaacgymenvs.robots.articulations.views.allegro_hand_view import AllegroHandView
from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask
class AllegroHandTask(InHandManipulationTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
InHandManipulationTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.object_type = self._task_cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["full_no_vel", "full"]):
raise Exception("Unknown type of observations!\nobservationType should be one of: [full_no_vel, full]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full_no_vel": 50,
"full": 72,
}
self.object_scale = torch.tensor([1.0, 1.0, 1.0])
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 16
self._num_states = 0
InHandManipulationTask.update_config(self)
def get_starting_positions(self):
self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device)
self.hand_start_orientation = torch.tensor([0.257551, 0.283045, 0.683330, -0.621782], device=self.device)
self.pose_dy, self.pose_dz = -0.2, 0.06
def get_hand(self):
allegro_hand = AllegroHand(
prim_path=self.default_zero_env_path + "/allegro_hand",
name="allegro_hand",
translation=self.hand_start_translation,
orientation=self.hand_start_orientation,
)
self._sim_config.apply_articulation_settings(
"allegro_hand",
get_prim_at_path(allegro_hand.prim_path),
self._sim_config.parse_actor_config("allegro_hand"),
)
allegro_hand_prim = self._stage.GetPrimAtPath(allegro_hand.prim_path)
allegro_hand.set_allegro_hand_properties(stage=self._stage, allegro_hand_prim=allegro_hand_prim)
allegro_hand.set_motor_control_mode(
stage=self._stage, allegro_hand_path=self.default_zero_env_path + "/allegro_hand"
)
def get_hand_view(self, scene):
return AllegroHandView(prim_paths_expr="/World/envs/.*/allegro_hand", name="allegro_hand_view")
def get_observations(self):
self.get_object_goal_observations()
self.hand_dof_pos = self._hands.get_joint_positions(clone=False)
self.hand_dof_vel = self._hands.get_joint_velocities(clone=False)
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
else:
print("Unkown observations type!")
observations = {self._hands.name: {"obs_buf": self.obs_buf}}
return observations
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, 16:19] = self.object_pos
self.obs_buf[:, 19:23] = self.object_rot
self.obs_buf[:, 23:26] = self.goal_pos
self.obs_buf[:, 26:30] = self.goal_rot
self.obs_buf[:, 30:34] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 34:50] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 32:35] = self.object_pos
self.obs_buf[:, 35:39] = self.object_rot
self.obs_buf[:, 39:42] = self.object_linvel
self.obs_buf[:, 42:45] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 45:48] = self.goal_pos
self.obs_buf[:, 48:52] = self.goal_rot
self.obs_buf[:, 52:56] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 56:72] = self.actions
| 6,329 | Python | 42.655172 | 115 | 0.658872 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ball_balance.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.maths import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.balance_bot import BalanceBot
from pxr import PhysxSchema
class BallBalanceTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 12 + 12
self._num_actions = 3
self.anchored = False
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._dt = self._task_cfg["sim"]["dt"]
self._table_position = torch.tensor([0, 0, 0.56])
self._ball_position = torch.tensor([0.0, 0.0, 1.0])
self._ball_radius = 0.1
self._action_speed_scale = self._task_cfg["env"]["actionSpeedScale"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
def set_up_scene(self, scene) -> None:
self.get_balance_table()
self.add_ball()
super().set_up_scene(scene, replicate_physics=False)
self.set_up_table_anchors()
self._balance_bots = ArticulationView(
prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False
)
scene.add(self._balance_bots)
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False
)
scene.add(self._balls)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("balance_bot_view"):
scene.remove_object("balance_bot_view", registry_only=True)
if scene.object_exists("ball_view"):
scene.remove_object("ball_view", registry_only=True)
self._balance_bots = ArticulationView(
prim_paths_expr="/World/envs/.*/BalanceBot/tray", name="balance_bot_view", reset_xform_properties=False
)
scene.add(self._balance_bots)
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/Ball/ball", name="ball_view", reset_xform_properties=False
)
scene.add(self._balls)
def get_balance_table(self):
balance_table = BalanceBot(
prim_path=self.default_zero_env_path + "/BalanceBot", name="BalanceBot", translation=self._table_position
)
self._sim_config.apply_articulation_settings(
"table", get_prim_at_path(balance_table.prim_path), self._sim_config.parse_actor_config("table")
)
def add_ball(self):
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/Ball/ball",
translation=self._ball_position,
name="ball_0",
radius=self._ball_radius,
color=torch.tensor([0.9, 0.6, 0.2]),
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
def set_up_table_anchors(self):
from pxr import Gf
height = 0.08
stage = get_current_stage()
for i in range(self._num_envs):
base_path = f"{self.default_base_env_path}/env_{i}/BalanceBot"
for j, leg_offset in enumerate([(0.4, 0, height), (-0.2, 0.34641, 0), (-0.2, -0.34641, 0)]):
# fix the legs to ground
leg_path = f"{base_path}/lower_leg{j}"
ground_joint_path = leg_path + "_ground"
env_pos = stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}").GetAttribute("xformOp:translate").Get()
anchor_pos = env_pos + Gf.Vec3d(*leg_offset)
self.fix_to_ground(stage, ground_joint_path, leg_path, anchor_pos)
def fix_to_ground(self, stage, joint_path, prim_path, anchor_pos):
from pxr import UsdPhysics, Gf
# D6 fixed joint
d6FixedJoint = UsdPhysics.Joint.Define(stage, joint_path)
d6FixedJoint.CreateBody0Rel().SetTargets(["/World/defaultGroundPlane"])
d6FixedJoint.CreateBody1Rel().SetTargets([prim_path])
d6FixedJoint.CreateLocalPos0Attr().Set(anchor_pos)
d6FixedJoint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0)))
d6FixedJoint.CreateLocalPos1Attr().Set(Gf.Vec3f(0, 0, 0.18))
d6FixedJoint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0, Gf.Vec3f(0, 0, 0)))
# lock all DOF (lock - low is greater than high)
d6Prim = stage.GetPrimAtPath(joint_path)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transX")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transY")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
limitAPI = UsdPhysics.LimitAPI.Apply(d6Prim, "transZ")
limitAPI.CreateLowAttr(1.0)
limitAPI.CreateHighAttr(-1.0)
def get_observations(self) -> dict:
ball_positions, ball_orientations = self._balls.get_world_poses(clone=False)
ball_positions = ball_positions[:, 0:3] - self._env_pos
ball_velocities = self._balls.get_velocities(clone=False)
ball_linvels = ball_velocities[:, 0:3]
ball_angvels = ball_velocities[:, 3:6]
dof_pos = self._balance_bots.get_joint_positions(clone=False)
dof_vel = self._balance_bots.get_joint_velocities(clone=False)
sensor_force_torques = self._balance_bots.get_measured_joint_forces(joint_indices=self._sensor_indices) # (num_envs, num_sensors, 6)
self.obs_buf[..., 0:3] = dof_pos[..., self.actuated_dof_indices]
self.obs_buf[..., 3:6] = dof_vel[..., self.actuated_dof_indices]
self.obs_buf[..., 6:9] = ball_positions
self.obs_buf[..., 9:12] = ball_linvels
self.obs_buf[..., 12:15] = sensor_force_torques[..., 0] / 20.0
self.obs_buf[..., 15:18] = sensor_force_torques[..., 3] / 20.0
self.obs_buf[..., 18:21] = sensor_force_torques[..., 4] / 20.0
self.obs_buf[..., 21:24] = sensor_force_torques[..., 5] / 20.0
self.ball_positions = ball_positions
self.ball_linvels = ball_linvels
observations = {"ball_balance": {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
# update position targets from actions
self.dof_position_targets[..., self.actuated_dof_indices] += (
self._dt * self._action_speed_scale * actions.to(self.device)
)
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.bbot_dof_lower_limits, self.bbot_dof_upper_limits
)
# reset position targets for reset envs
self.dof_position_targets[reset_env_ids] = 0
self._balance_bots.set_joint_position_targets(self.dof_position_targets) # .clone())
def reset_idx(self, env_ids):
num_resets = len(env_ids)
env_ids_32 = env_ids.type(torch.int32)
env_ids_64 = env_ids.type(torch.int64)
min_d = 0.001 # min horizontal dist from origin
max_d = 0.4 # max horizontal dist from origin
min_height = 1.0
max_height = 2.0
min_horizontal_speed = 0
max_horizontal_speed = 2
dists = torch_rand_float(min_d, max_d, (num_resets, 1), self._device)
dirs = torch_random_dir_2((num_resets, 1), self._device)
hpos = dists * dirs
speedscales = (dists - min_d) / (max_d - min_d)
hspeeds = torch_rand_float(min_horizontal_speed, max_horizontal_speed, (num_resets, 1), self._device)
hvels = -speedscales * hspeeds * dirs
vspeeds = -torch_rand_float(5.0, 5.0, (num_resets, 1), self._device).squeeze()
ball_pos = self.initial_ball_pos.clone()
ball_rot = self.initial_ball_rot.clone()
# position
ball_pos[env_ids_64, 0:2] += hpos[..., 0:2]
ball_pos[env_ids_64, 2] += torch_rand_float(min_height, max_height, (num_resets, 1), self._device).squeeze()
# rotation
ball_rot[env_ids_64, 0] = 1
ball_rot[env_ids_64, 1:] = 0
ball_velocities = self.initial_ball_velocities.clone()
# linear
ball_velocities[env_ids_64, 0:2] = hvels[..., 0:2]
ball_velocities[env_ids_64, 2] = vspeeds
# angular
ball_velocities[env_ids_64, 3:6] = 0
# reset root state for bbots and balls in selected envs
self._balls.set_world_poses(ball_pos[env_ids_64], ball_rot[env_ids_64], indices=env_ids_32)
self._balls.set_velocities(ball_velocities[env_ids_64], indices=env_ids_32)
# reset root pose and velocity
self._balance_bots.set_world_poses(
self.initial_bot_pos[env_ids_64].clone(), self.initial_bot_rot[env_ids_64].clone(), indices=env_ids_32
)
self._balance_bots.set_velocities(self.initial_bot_velocities[env_ids_64].clone(), indices=env_ids_32)
# reset DOF states for bbots in selected envs
self._balance_bots.set_joint_positions(self.initial_dof_positions[env_ids_64].clone(), indices=env_ids_32)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
dof_limits = self._balance_bots.get_dof_limits()
self.bbot_dof_lower_limits, self.bbot_dof_upper_limits = torch.t(dof_limits[0].to(device=self._device))
self.initial_dof_positions = self._balance_bots.get_joint_positions()
self.initial_bot_pos, self.initial_bot_rot = self._balance_bots.get_world_poses()
# self.initial_bot_pos[..., 2] = 0.559 # tray_height
self.initial_bot_velocities = self._balance_bots.get_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses()
self.initial_ball_velocities = self._balls.get_velocities()
self.dof_position_targets = torch.zeros(
(self.num_envs, self._balance_bots.num_dof), dtype=torch.float32, device=self._device, requires_grad=False
)
actuated_joints = ["lower_leg0", "lower_leg1", "lower_leg2"]
self.actuated_dof_indices = torch.tensor(
[self._balance_bots._dof_indices[j] for j in actuated_joints], device=self._device, dtype=torch.long
)
force_links = ["upper_leg0", "upper_leg1", "upper_leg2"]
self._sensor_indices = torch.tensor(
[self._balance_bots._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
def calculate_metrics(self) -> None:
ball_dist = torch.sqrt(
self.ball_positions[..., 0] * self.ball_positions[..., 0]
+ (self.ball_positions[..., 2] - 0.7) * (self.ball_positions[..., 2] - 0.7)
+ (self.ball_positions[..., 1]) * self.ball_positions[..., 1]
)
ball_speed = torch.sqrt(
self.ball_linvels[..., 0] * self.ball_linvels[..., 0]
+ self.ball_linvels[..., 1] * self.ball_linvels[..., 1]
+ self.ball_linvels[..., 2] * self.ball_linvels[..., 2]
)
pos_reward = 1.0 / (1.0 + ball_dist)
speed_reward = 1.0 / (1.0 + ball_speed)
self.rew_buf[:] = pos_reward * speed_reward
def is_done(self) -> None:
reset = torch.where(
self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf
)
reset = torch.where(
self.ball_positions[..., 2] < self._ball_radius * 1.5, torch.ones_like(self.reset_buf), reset
)
self.reset_buf[:] = reset
| 13,958 | Python | 44.174757 | 140 | 0.630391 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/cartpole_camera.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from gym import spaces
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.tasks.cartpole import CartpoleTask
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
class CartpoleCameraTask(CartpoleTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
# use multi-dimensional observation for camera RGB
self.observation_space = spaces.Box(
np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * -np.Inf,
np.ones((self.camera_width, self.camera_height, 3), dtype=np.float32) * np.Inf)
RLTask.__init__(self, name, env)
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0])
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self.camera_type = self._task_cfg["env"].get("cameraType", 'rgb')
self.camera_width = self._task_cfg["env"]["cameraWidth"]
self.camera_height = self._task_cfg["env"]["cameraHeight"]
self.camera_channels = 3
self._export_images = self._task_cfg["env"]["exportImages"]
def cleanup(self) -> None:
# initialize remaining buffers
RLTask.cleanup(self)
# override observation buffer for camera data
self.obs_buf = torch.zeros(
(self.num_envs, self.camera_width, self.camera_height, 3), device=self.device, dtype=torch.float)
def set_up_scene(self, scene) -> None:
self.get_cartpole()
RLTask.set_up_scene(self, scene)
# start replicator to capture image data
self.rep.orchestrator._orchestrator._is_started = True
# set up cameras
self.render_products = []
env_pos = self._env_pos.cpu()
for i in range(self._num_envs):
camera = self.rep.create.camera(
position=(-4.2 + env_pos[i][0], env_pos[i][1], 3.0), look_at=(env_pos[i][0], env_pos[i][1], 2.55))
render_product = self.rep.create.render_product(camera, resolution=(self.camera_width, self.camera_height))
self.render_products.append(render_product)
# initialize pytorch writer for vectorized collection
self.pytorch_listener = self.PytorchListener()
self.pytorch_writer = self.rep.WriterRegistry.get("PytorchWriter")
self.pytorch_writer.initialize(listener=self.pytorch_listener, device="cuda")
self.pytorch_writer.attach(self.render_products)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
return
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
self.cart_pos = dof_pos[:, self._cart_dof_idx]
self.cart_vel = dof_vel[:, self._cart_dof_idx]
self.pole_pos = dof_pos[:, self._pole_dof_idx]
self.pole_vel = dof_vel[:, self._pole_dof_idx]
# retrieve RGB data from all render products
images = self.pytorch_listener.get_rgb_data()
if images is not None:
if self._export_images:
from torchvision.utils import save_image, make_grid
img = images/255
save_image(make_grid(img, nrows = 2), 'cartpole_export.png')
self.obs_buf = torch.swapaxes(images, 1, 3).clone().float()/255.0
else:
print("Image tensor is NONE!")
return self.obs_buf
| 5,824 | Python | 41.518248 | 119 | 0.67342 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/anymal_terrain.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.anymal import Anymal
from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView
from omniisaacgymenvs.tasks.utils.anymal_terrain_generator import *
from omniisaacgymenvs.utils.terrain_utils.terrain_utils import *
from pxr import UsdLux, UsdPhysics
class AnymalTerrainTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.height_samples = None
self.custom_origins = False
self.init_done = False
self._env_spacing = 0.0
self._num_observations = 188
self._num_actions = 12
self.update_config(sim_config)
RLTask.__init__(self, name, env)
self.height_points = self.init_height_points()
self.measured_heights = None
# joint positions offsets
self.default_dof_pos = torch.zeros(
(self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False
)
# reward episode sums
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {
"lin_vel_xy": torch_zeros(),
"lin_vel_z": torch_zeros(),
"ang_vel_z": torch_zeros(),
"ang_vel_xy": torch_zeros(),
"orient": torch_zeros(),
"torques": torch_zeros(),
"joint_acc": torch_zeros(),
"base_height": torch_zeros(),
"air_time": torch_zeros(),
"collision": torch_zeros(),
"stumble": torch_zeros(),
"action_rate": torch_zeros(),
"hip": torch_zeros(),
}
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
# normalization
self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"]
self.height_meas_scale = self._task_cfg["env"]["learn"]["heightMeasurementScale"]
self.action_scale = self._task_cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["termination"] = self._task_cfg["env"]["learn"]["terminalReward"]
self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["ang_vel_xy"] = self._task_cfg["env"]["learn"]["angularVelocityXYRewardScale"]
self.rew_scales["orient"] = self._task_cfg["env"]["learn"]["orientationRewardScale"]
self.rew_scales["torque"] = self._task_cfg["env"]["learn"]["torqueRewardScale"]
self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["base_height"] = self._task_cfg["env"]["learn"]["baseHeightRewardScale"]
self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["hip"] = self._task_cfg["env"]["learn"]["hipRewardScale"]
self.rew_scales["fallen_over"] = self._task_cfg["env"]["learn"]["fallenOverRewardScale"]
# command ranges
self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self._task_cfg["env"]["baseInitState"]["pos"]
rot = self._task_cfg["env"]["baseInitState"]["rot"]
v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"]
v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"]
self.base_init_state = pos + rot + v_lin + v_ang
# default joint positions
self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"]
# other
self.decimation = self._task_cfg["env"]["control"]["decimation"]
self.dt = self.decimation * self._task_cfg["sim"]["dt"]
self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.push_interval = int(self._task_cfg["env"]["learn"]["pushInterval_s"] / self.dt + 0.5)
self.Kp = self._task_cfg["env"]["control"]["stiffness"]
self.Kd = self._task_cfg["env"]["control"]["damping"]
self.curriculum = self._task_cfg["env"]["terrain"]["curriculum"]
self.base_threshold = 0.2
self.knee_threshold = 0.1
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._task_cfg["sim"]["default_physics_material"]["static_friction"] = self._task_cfg["env"]["terrain"][
"staticFriction"
]
self._task_cfg["sim"]["default_physics_material"]["dynamic_friction"] = self._task_cfg["env"]["terrain"][
"dynamicFriction"
]
self._task_cfg["sim"]["default_physics_material"]["restitution"] = self._task_cfg["env"]["terrain"][
"restitution"
]
self._task_cfg["sim"]["add_ground_plane"] = False
def _get_noise_scale_vec(self, cfg):
noise_vec = torch.zeros_like(self.obs_buf[0])
self.add_noise = self._task_cfg["env"]["learn"]["addNoise"]
noise_level = self._task_cfg["env"]["learn"]["noiseLevel"]
noise_vec[:3] = self._task_cfg["env"]["learn"]["linearVelocityNoise"] * noise_level * self.lin_vel_scale
noise_vec[3:6] = self._task_cfg["env"]["learn"]["angularVelocityNoise"] * noise_level * self.ang_vel_scale
noise_vec[6:9] = self._task_cfg["env"]["learn"]["gravityNoise"] * noise_level
noise_vec[9:12] = 0.0 # commands
noise_vec[12:24] = self._task_cfg["env"]["learn"]["dofPositionNoise"] * noise_level * self.dof_pos_scale
noise_vec[24:36] = self._task_cfg["env"]["learn"]["dofVelocityNoise"] * noise_level * self.dof_vel_scale
noise_vec[36:176] = (
self._task_cfg["env"]["learn"]["heightMeasurementNoise"] * noise_level * self.height_meas_scale
)
noise_vec[176:188] = 0.0 # previous actions
return noise_vec
def init_height_points(self):
# 1mx1.6m rectangle (without center line)
y = 0.1 * torch.tensor(
[-5, -4, -3, -2, -1, 1, 2, 3, 4, 5], device=self.device, requires_grad=False
) # 10-50cm on each side
x = 0.1 * torch.tensor(
[-8, -7, -6, -5, -4, -3, -2, 2, 3, 4, 5, 6, 7, 8], device=self.device, requires_grad=False
) # 20-80cm on each side
grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
self.num_height_points = grid_x.numel()
points = torch.zeros(self.num_envs, self.num_height_points, 3, device=self.device, requires_grad=False)
points[:, :, 0] = grid_x.flatten()
points[:, :, 1] = grid_y.flatten()
return points
def _create_trimesh(self, create_mesh=True):
self.terrain = Terrain(self._task_cfg["env"]["terrain"], num_robots=self.num_envs)
vertices = self.terrain.vertices
triangles = self.terrain.triangles
position = torch.tensor([-self.terrain.border_size, -self.terrain.border_size, 0.0])
if create_mesh:
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position)
self.height_samples = (
torch.tensor(self.terrain.heightsamples).view(self.terrain.tot_rows, self.terrain.tot_cols).to(self.device)
)
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
self.get_terrain()
self.get_anymal()
super().set_up_scene(scene, collision_filter_global_paths=["/World/terrain"])
self._anymals = AnymalView(
prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True
)
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def initialize_views(self, scene):
# initialize terrain variables even if we do not need to re-create the terrain mesh
self.get_terrain(create_mesh=False)
super().initialize_views(scene)
if scene.object_exists("anymal_view"):
scene.remove_object("anymal_view", registry_only=True)
if scene.object_exists("knees_view"):
scene.remove_object("knees_view", registry_only=True)
if scene.object_exists("base_view"):
scene.remove_object("base_view", registry_only=True)
self._anymals = AnymalView(
prim_paths_expr="/World/envs/.*/anymal", name="anymal_view", track_contact_forces=True
)
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def get_terrain(self, create_mesh=True):
self.env_origins = torch.zeros((self.num_envs, 3), device=self.device, requires_grad=False)
if not self.curriculum:
self._task_cfg["env"]["terrain"]["maxInitMapLevel"] = self._task_cfg["env"]["terrain"]["numLevels"] - 1
self.terrain_levels = torch.randint(
0, self._task_cfg["env"]["terrain"]["maxInitMapLevel"] + 1, (self.num_envs,), device=self.device
)
self.terrain_types = torch.randint(
0, self._task_cfg["env"]["terrain"]["numTerrains"], (self.num_envs,), device=self.device
)
self._create_trimesh(create_mesh=create_mesh)
self.terrain_origins = torch.from_numpy(self.terrain.env_origins).to(self.device).to(torch.float)
def get_anymal(self):
anymal_translation = torch.tensor([0.0, 0.0, 0.66])
anymal_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0])
anymal = Anymal(
prim_path=self.default_zero_env_path + "/anymal",
name="anymal",
translation=anymal_translation,
orientation=anymal_orientation,
)
self._sim_config.apply_articulation_settings(
"anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("anymal")
)
anymal.set_anymal_properties(self._stage, anymal.prim)
anymal.prepare_contacts(self._stage, anymal.prim)
self.dof_names = anymal.dof_names
for i in range(self.num_actions):
name = self.dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
def post_reset(self):
self.base_init_state = torch.tensor(
self.base_init_state, dtype=torch.float, device=self.device, requires_grad=False
)
self.timeout_buf = torch.zeros(self.num_envs, device=self.device, dtype=torch.long)
# initialize some data used later on
self.up_axis_idx = 2
self.common_step_counter = 0
self.extras = {}
self.noise_scale_vec = self._get_noise_scale_vec(self._task_cfg)
self.commands = torch.zeros(
self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False
) # x vel, y vel, yaw vel, heading
self.commands_scale = torch.tensor(
[self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale],
device=self.device,
requires_grad=False,
)
self.gravity_vec = torch.tensor(
get_axis_params(-1.0, self.up_axis_idx), dtype=torch.float, device=self.device
).repeat((self.num_envs, 1))
self.forward_vec = torch.tensor([1.0, 0.0, 0.0], dtype=torch.float, device=self.device).repeat(
(self.num_envs, 1)
)
self.torques = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.actions = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.last_actions = torch.zeros(
self.num_envs, self.num_actions, dtype=torch.float, device=self.device, requires_grad=False
)
self.feet_air_time = torch.zeros(self.num_envs, 4, dtype=torch.float, device=self.device, requires_grad=False)
self.last_dof_vel = torch.zeros((self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False)
for i in range(self.num_envs):
self.env_origins[i] = self.terrain_origins[self.terrain_levels[i], self.terrain_types[i]]
self.num_dof = self._anymals.num_dof
self.dof_pos = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device)
self.dof_vel = torch.zeros((self.num_envs, self.num_dof), dtype=torch.float, device=self.device)
self.base_pos = torch.zeros((self.num_envs, 3), dtype=torch.float, device=self.device)
self.base_quat = torch.zeros((self.num_envs, 4), dtype=torch.float, device=self.device)
self.base_velocities = torch.zeros((self.num_envs, 6), dtype=torch.float, device=self.device)
self.knee_pos = torch.zeros((self.num_envs * 4, 3), dtype=torch.float, device=self.device)
self.knee_quat = torch.zeros((self.num_envs * 4, 4), dtype=torch.float, device=self.device)
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
self.init_done = True
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
positions_offset = torch_rand_float(0.5, 1.5, (len(env_ids), self.num_dof), device=self.device)
velocities = torch_rand_float(-0.1, 0.1, (len(env_ids), self.num_dof), device=self.device)
self.dof_pos[env_ids] = self.default_dof_pos[env_ids] * positions_offset
self.dof_vel[env_ids] = velocities
self.update_terrain_level(env_ids)
self.base_pos[env_ids] = self.base_init_state[0:3]
self.base_pos[env_ids, 0:3] += self.env_origins[env_ids]
self.base_pos[env_ids, 0:2] += torch_rand_float(-0.5, 0.5, (len(env_ids), 2), device=self.device)
self.base_quat[env_ids] = self.base_init_state[3:7]
self.base_velocities[env_ids] = self.base_init_state[7:]
self._anymals.set_world_poses(
positions=self.base_pos[env_ids].clone(), orientations=self.base_quat[env_ids].clone(), indices=indices
)
self._anymals.set_velocities(velocities=self.base_velocities[env_ids].clone(), indices=indices)
self._anymals.set_joint_positions(positions=self.dof_pos[env_ids].clone(), indices=indices)
self._anymals.set_joint_velocities(velocities=self.dof_vel[env_ids].clone(), indices=indices)
self.commands[env_ids, 0] = torch_rand_float(
self.command_x_range[0], self.command_x_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids, 1] = torch_rand_float(
self.command_y_range[0], self.command_y_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids, 3] = torch_rand_float(
self.command_yaw_range[0], self.command_yaw_range[1], (len(env_ids), 1), device=self.device
).squeeze()
self.commands[env_ids] *= (torch.norm(self.commands[env_ids, :2], dim=1) > 0.25).unsqueeze(
1
) # set small commands to zero
self.last_actions[env_ids] = 0.0
self.last_dof_vel[env_ids] = 0.0
self.feet_air_time[env_ids] = 0.0
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 1
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"]["rew_" + key] = (
torch.mean(self.episode_sums[key][env_ids]) / self.max_episode_length_s
)
self.episode_sums[key][env_ids] = 0.0
self.extras["episode"]["terrain_level"] = torch.mean(self.terrain_levels.float())
def update_terrain_level(self, env_ids):
if not self.init_done or not self.curriculum:
# do not change on initial reset
return
root_pos, _ = self._anymals.get_world_poses(clone=False)
distance = torch.norm(root_pos[env_ids, :2] - self.env_origins[env_ids, :2], dim=1)
self.terrain_levels[env_ids] -= 1 * (
distance < torch.norm(self.commands[env_ids, :2]) * self.max_episode_length_s * 0.25
)
self.terrain_levels[env_ids] += 1 * (distance > self.terrain.env_length / 2)
self.terrain_levels[env_ids] = torch.clip(self.terrain_levels[env_ids], 0) % self.terrain.env_rows
self.env_origins[env_ids] = self.terrain_origins[self.terrain_levels[env_ids], self.terrain_types[env_ids]]
def refresh_dof_state_tensors(self):
self.dof_pos = self._anymals.get_joint_positions(clone=False)
self.dof_vel = self._anymals.get_joint_velocities(clone=False)
def refresh_body_state_tensors(self):
self.base_pos, self.base_quat = self._anymals.get_world_poses(clone=False)
self.base_velocities = self._anymals.get_velocities(clone=False)
self.knee_pos, self.knee_quat = self._anymals._knees.get_world_poses(clone=False)
def pre_physics_step(self, actions):
if not self._env._world.is_playing():
return
self.actions = actions.clone().to(self.device)
for i in range(self.decimation):
if self._env._world.is_playing():
torques = torch.clip(
self.Kp * (self.action_scale * self.actions + self.default_dof_pos - self.dof_pos)
- self.Kd * self.dof_vel,
-80.0,
80.0,
)
self._anymals.set_joint_efforts(torques)
self.torques = torques
SimulationContext.step(self._env._world, render=False)
self.refresh_dof_state_tensors()
def post_physics_step(self):
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.refresh_dof_state_tensors()
self.refresh_body_state_tensors()
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5 * wrap_to_pi(self.commands[:, 3] - heading), -1.0, 1.0)
self.check_termination()
self.get_states()
self.calculate_metrics()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.get_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def push_robots(self):
self.base_velocities[:, 0:2] = torch_rand_float(
-1.0, 1.0, (self.num_envs, 2), device=self.device
) # lin vel x/y
self._anymals.set_velocities(self.base_velocities)
def check_termination(self):
self.timeout_buf = torch.where(
self.progress_buf >= self.max_episode_length - 1,
torch.ones_like(self.timeout_buf),
torch.zeros_like(self.timeout_buf),
)
knee_contact = (
torch.norm(self._anymals._knees.get_net_contact_forces(clone=False).view(self._num_envs, 4, 3), dim=-1)
> 1.0
)
self.has_fallen = (torch.norm(self._anymals._base.get_net_contact_forces(clone=False), dim=1) > 1.0) | (
torch.sum(knee_contact, dim=-1) > 1.0
)
self.reset_buf = self.has_fallen.clone()
self.reset_buf = torch.where(self.timeout_buf.bool(), torch.ones_like(self.reset_buf), self.reset_buf)
def calculate_metrics(self):
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"]
# other base velocity penalties
rew_lin_vel_z = torch.square(self.base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_ang_vel_xy = torch.sum(torch.square(self.base_ang_vel[:, :2]), dim=1) * self.rew_scales["ang_vel_xy"]
# orientation penalty
rew_orient = torch.sum(torch.square(self.projected_gravity[:, :2]), dim=1) * self.rew_scales["orient"]
# base height penalty
rew_base_height = torch.square(self.base_pos[:, 2] - 0.52) * self.rew_scales["base_height"]
# torque penalty
rew_torque = torch.sum(torch.square(self.torques), dim=1) * self.rew_scales["torque"]
# joint acc penalty
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - self.dof_vel), dim=1) * self.rew_scales["joint_acc"]
# fallen over penalty
rew_fallen_over = self.has_fallen * self.rew_scales["fallen_over"]
# action rate penalty
rew_action_rate = (
torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
)
# cosmetic penalty for hip motion
rew_hip = (
torch.sum(torch.abs(self.dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["hip"]
)
# total reward
self.rew_buf = (
rew_lin_vel_xy
+ rew_ang_vel_z
+ rew_lin_vel_z
+ rew_ang_vel_xy
+ rew_orient
+ rew_base_height
+ rew_torque
+ rew_joint_acc
+ rew_action_rate
+ rew_hip
+ rew_fallen_over
)
self.rew_buf = torch.clip(self.rew_buf, min=0.0, max=None)
# add termination reward
self.rew_buf += self.rew_scales["termination"] * self.reset_buf * ~self.timeout_buf
# log episode reward sums
self.episode_sums["lin_vel_xy"] += rew_lin_vel_xy
self.episode_sums["ang_vel_z"] += rew_ang_vel_z
self.episode_sums["lin_vel_z"] += rew_lin_vel_z
self.episode_sums["ang_vel_xy"] += rew_ang_vel_xy
self.episode_sums["orient"] += rew_orient
self.episode_sums["torques"] += rew_torque
self.episode_sums["joint_acc"] += rew_joint_acc
self.episode_sums["action_rate"] += rew_action_rate
self.episode_sums["base_height"] += rew_base_height
self.episode_sums["hip"] += rew_hip
def get_observations(self):
self.measured_heights = self.get_heights()
heights = (
torch.clip(self.base_pos[:, 2].unsqueeze(1) - 0.5 - self.measured_heights, -1, 1.0) * self.height_meas_scale
)
self.obs_buf = torch.cat(
(
self.base_lin_vel * self.lin_vel_scale,
self.base_ang_vel * self.ang_vel_scale,
self.projected_gravity,
self.commands[:, :3] * self.commands_scale,
self.dof_pos * self.dof_pos_scale,
self.dof_vel * self.dof_vel_scale,
heights,
self.actions,
),
dim=-1,
)
def get_ground_heights_below_knees(self):
points = self.knee_pos.reshape(self.num_envs, 4, 3)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
def get_ground_heights_below_base(self):
points = self.base_pos.reshape(self.num_envs, 1, 3)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
def get_heights(self, env_ids=None):
if env_ids:
points = quat_apply_yaw(
self.base_quat[env_ids].repeat(1, self.num_height_points), self.height_points[env_ids]
) + (self.base_pos[env_ids, 0:3]).unsqueeze(1)
else:
points = quat_apply_yaw(self.base_quat.repeat(1, self.num_height_points), self.height_points) + (
self.base_pos[:, 0:3]
).unsqueeze(1)
points += self.terrain.border_size
points = (points / self.terrain.horizontal_scale).long()
px = points[:, :, 0].view(-1)
py = points[:, :, 1].view(-1)
px = torch.clip(px, 0, self.height_samples.shape[0] - 2)
py = torch.clip(py, 0, self.height_samples.shape[1] - 2)
heights1 = self.height_samples[px, py]
heights2 = self.height_samples[px + 1, py + 1]
heights = torch.min(heights1, heights2)
return heights.view(self.num_envs, -1) * self.terrain.vertical_scale
@torch.jit.script
def quat_apply_yaw(quat, vec):
quat_yaw = quat.clone().view(-1, 4)
quat_yaw[:, 1:3] = 0.0
quat_yaw = normalize(quat_yaw)
return quat_apply(quat_yaw, vec)
@torch.jit.script
def wrap_to_pi(angles):
angles %= 2 * np.pi
angles -= 2 * np.pi * (angles > np.pi)
return angles
def get_axis_params(value, axis_idx, x_value=0.0, dtype=float, n_dims=3):
"""construct arguments to `Vec` according to axis index."""
zs = np.zeros((n_dims,))
assert axis_idx < n_dims, "the axis dim should be within the vector dimensions"
zs[axis_idx] = 1.0
params = np.where(zs == 1.0, value, zs)
params[0] = x_value
return list(params.astype(dtype))
| 29,337 | Python | 45.568254 | 120 | 0.609128 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/shadow_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.shadow_hand import ShadowHand
from omniisaacgymenvs.robots.articulations.views.shadow_hand_view import ShadowHandView
from omniisaacgymenvs.tasks.shared.in_hand_manipulation import InHandManipulationTask
class ShadowHandTask(InHandManipulationTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
InHandManipulationTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.object_type = self._task_cfg["env"]["objectType"]
assert self.object_type in ["block"]
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["openai", "full_no_vel", "full", "full_state"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]"
)
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"openai": 42,
"full_no_vel": 77,
"full": 157,
"full_state": 187,
}
self.asymmetric_obs = self._task_cfg["env"]["asymmetric_observations"]
self.use_vel_obs = False
self.fingertip_obs = True
self.fingertips = [
"robot0:ffdistal",
"robot0:mfdistal",
"robot0:rfdistal",
"robot0:lfdistal",
"robot0:thdistal",
]
self.num_fingertips = len(self.fingertips)
self.object_scale = torch.tensor([1.0, 1.0, 1.0])
self.force_torque_obs_scale = 10.0
num_states = 0
if self.asymmetric_obs:
num_states = 187
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 20
self._num_states = num_states
InHandManipulationTask.update_config(self)
def get_starting_positions(self):
self.hand_start_translation = torch.tensor([0.0, 0.0, 0.5], device=self.device)
self.hand_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.pose_dy, self.pose_dz = -0.39, 0.10
def get_hand(self):
shadow_hand = ShadowHand(
prim_path=self.default_zero_env_path + "/shadow_hand",
name="shadow_hand",
translation=self.hand_start_translation,
orientation=self.hand_start_orientation,
)
self._sim_config.apply_articulation_settings(
"shadow_hand",
get_prim_at_path(shadow_hand.prim_path),
self._sim_config.parse_actor_config("shadow_hand"),
)
shadow_hand.set_shadow_hand_properties(stage=self._stage, shadow_hand_prim=shadow_hand.prim)
shadow_hand.set_motor_control_mode(stage=self._stage, shadow_hand_path=shadow_hand.prim_path)
def get_hand_view(self, scene):
hand_view = ShadowHandView(prim_paths_expr="/World/envs/.*/shadow_hand", name="shadow_hand_view")
scene.add(hand_view._fingers)
return hand_view
def get_observations(self):
self.get_object_goal_observations()
self.fingertip_pos, self.fingertip_rot = self._hands._fingers.get_world_poses(clone=False)
self.fingertip_pos -= self._env_pos.repeat((1, self.num_fingertips)).reshape(
self.num_envs * self.num_fingertips, 3
)
self.fingertip_velocities = self._hands._fingers.get_velocities(clone=False)
self.hand_dof_pos = self._hands.get_joint_positions(clone=False)
self.hand_dof_vel = self._hands.get_joint_velocities(clone=False)
if self.obs_type == "full_state" or self.asymmetric_obs:
self.vec_sensor_tensor = self._hands.get_measured_joint_forces(
joint_indices=self._hands._sensor_indices
).view(self._num_envs, -1)
if self.obs_type == "openai":
self.compute_fingertip_observations(True)
elif self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
elif self.obs_type == "full_state":
self.compute_full_state(False)
else:
print("Unkown observations type!")
if self.asymmetric_obs:
self.compute_full_state(True)
observations = {self._hands.name: {"obs_buf": self.obs_buf}}
return observations
def compute_fingertip_observations(self, no_vel=False):
if no_vel:
# Per https://arxiv.org/pdf/1808.00177.pdf Table 2
# Fingertip positions
# Object Position, but not orientation
# Relative target orientation
# 3*self.num_fingertips = 15
self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 15)
self.obs_buf[:, 15:18] = self.object_pos
self.obs_buf[:, 18:22] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 22:42] = self.actions
else:
# 13*self.num_fingertips = 65
self.obs_buf[:, 0:65] = self.fingertip_state.reshape(self.num_envs, 65)
self.obs_buf[:, 0:15] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 15:35] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[:, 35:65] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[:, 65:68] = self.object_pos
self.obs_buf[:, 68:72] = self.object_rot
self.obs_buf[:, 72:75] = self.object_linvel
self.obs_buf[:, 75:78] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 78:81] = self.goal_pos
self.obs_buf[:, 81:85] = self.goal_rot
self.obs_buf[:, 85:89] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 89:109] = self.actions
def compute_full_observations(self, no_vel=False):
if no_vel:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, 24:37] = self.object_pos
self.obs_buf[:, 27:31] = self.object_rot
self.obs_buf[:, 31:34] = self.goal_pos
self.obs_buf[:, 34:38] = self.goal_rot
self.obs_buf[:, 38:42] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, 42:57] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 57:77] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 48:51] = self.object_pos
self.obs_buf[:, 51:55] = self.object_rot
self.obs_buf[:, 55:58] = self.object_linvel
self.obs_buf[:, 58:61] = self.vel_obs_scale * self.object_angvel
self.obs_buf[:, 61:64] = self.goal_pos
self.obs_buf[:, 64:68] = self.goal_rot
self.obs_buf[:, 68:72] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
# (7+6)*self.num_fingertips = 65
self.obs_buf[:, 72:87] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[:, 87:107] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[:, 107:137] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[:, 137:157] = self.actions
def compute_full_state(self, asymm_obs=False):
if asymm_obs:
self.states_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.states_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
# self.states_buf[:, 2*self.num_hand_dofs:3*self.num_hand_dofs] = self.force_torque_obs_scale * self.dof_force_tensor
obj_obs_start = 2 * self.num_hand_dofs # 48
self.states_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos
self.states_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot
self.states_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel
self.states_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.states_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos
self.states_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot
self.states_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul(
self.object_rot, quat_conjugate(self.goal_rot)
)
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 72
self.states_buf[
:, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips
] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips
] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips
] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.states_buf[
:, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques
] = (self.force_torque_obs_scale * self.vec_sensor_tensor)
# obs_end = 72 + 65 + 30 = 167
# obs_total = obs_end + num_actions = 187
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.states_buf[:, obs_end : obs_end + self.num_actions] = self.actions
else:
self.obs_buf[:, 0 : self.num_hand_dofs] = unscale(
self.hand_dof_pos, self.hand_dof_lower_limits, self.hand_dof_upper_limits
)
self.obs_buf[:, self.num_hand_dofs : 2 * self.num_hand_dofs] = self.vel_obs_scale * self.hand_dof_vel
self.obs_buf[:, 2 * self.num_hand_dofs : 3 * self.num_hand_dofs] = (
self.force_torque_obs_scale * self.dof_force_tensor
)
obj_obs_start = 3 * self.num_hand_dofs # 48
self.obs_buf[:, obj_obs_start : obj_obs_start + 3] = self.object_pos
self.obs_buf[:, obj_obs_start + 3 : obj_obs_start + 7] = self.object_rot
self.obs_buf[:, obj_obs_start + 7 : obj_obs_start + 10] = self.object_linvel
self.obs_buf[:, obj_obs_start + 10 : obj_obs_start + 13] = self.vel_obs_scale * self.object_angvel
goal_obs_start = obj_obs_start + 13 # 61
self.obs_buf[:, goal_obs_start : goal_obs_start + 3] = self.goal_pos
self.obs_buf[:, goal_obs_start + 3 : goal_obs_start + 7] = self.goal_rot
self.obs_buf[:, goal_obs_start + 7 : goal_obs_start + 11] = quat_mul(
self.object_rot, quat_conjugate(self.goal_rot)
)
# fingertip observations, state(pose and vel) + force-torque sensors
num_ft_states = 13 * self.num_fingertips # 65
num_ft_force_torques = 6 * self.num_fingertips # 30
fingertip_obs_start = goal_obs_start + 11 # 72
self.obs_buf[
:, fingertip_obs_start : fingertip_obs_start + 3 * self.num_fingertips
] = self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + 3 * self.num_fingertips : fingertip_obs_start + 7 * self.num_fingertips
] = self.fingertip_rot.reshape(self.num_envs, 4 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + 7 * self.num_fingertips : fingertip_obs_start + 13 * self.num_fingertips
] = self.fingertip_velocities.reshape(self.num_envs, 6 * self.num_fingertips)
self.obs_buf[
:, fingertip_obs_start + num_ft_states : fingertip_obs_start + num_ft_states + num_ft_force_torques
] = (self.force_torque_obs_scale * self.vec_sensor_tensor)
# obs_end = 96 + 65 + 30 = 167
# obs_total = obs_end + num_actions = 187
obs_end = fingertip_obs_start + num_ft_states + num_ft_force_torques
self.obs_buf[:, obs_end : obs_end + self.num_actions] = self.actions
| 15,107 | Python | 48.211726 | 129 | 0.609188 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/franka_cabinet.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
import numpy as np
import torch
from omni.isaac.cloner import Cloner
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.torch.transformations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.cabinet import Cabinet
from omniisaacgymenvs.robots.articulations.franka import Franka
from omniisaacgymenvs.robots.articulations.views.cabinet_view import CabinetView
from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView
from pxr import Usd, UsdGeom
class FrankaCabinetTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self.distX_offset = 0.04
self.dt = 1 / 60.0
self._num_observations = 23
self._num_actions = 9
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.num_props = self._task_cfg["env"]["numProps"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
def set_up_scene(self, scene) -> None:
self.get_franka()
self.get_cabinet()
if self.num_props > 0:
self.get_props()
super().set_up_scene(scene, filter_collisions=False)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view")
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._cabinets)
scene.add(self._cabinets._drawers)
if self.num_props > 0:
self._props = RigidPrimView(
prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False
)
scene.add(self._props)
self.init_data()
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("franka_view"):
scene.remove_object("franka_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("cabinet_view"):
scene.remove_object("cabinet_view", registry_only=True)
if scene.object_exists("drawers_view"):
scene.remove_object("drawers_view", registry_only=True)
if scene.object_exists("prop_view"):
scene.remove_object("prop_view", registry_only=True)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self._cabinets = CabinetView(prim_paths_expr="/World/envs/.*/cabinet", name="cabinet_view")
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._cabinets)
scene.add(self._cabinets._drawers)
if self.num_props > 0:
self._props = RigidPrimView(
prim_paths_expr="/World/envs/.*/prop/.*", name="prop_view", reset_xform_properties=False
)
scene.add(self._props)
self.init_data()
def get_franka(self):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka")
self._sim_config.apply_articulation_settings(
"franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")
)
def get_cabinet(self):
cabinet = Cabinet(self.default_zero_env_path + "/cabinet", name="cabinet")
self._sim_config.apply_articulation_settings(
"cabinet", get_prim_at_path(cabinet.prim_path), self._sim_config.parse_actor_config("cabinet")
)
def get_props(self):
prop_cloner = Cloner()
drawer_pos = torch.tensor([0.0515, 0.0, 0.7172])
prop_color = torch.tensor([0.2, 0.4, 0.6])
props_per_row = int(math.ceil(math.sqrt(self.num_props)))
prop_size = 0.08
prop_spacing = 0.09
xmin = -0.5 * prop_spacing * (props_per_row - 1)
zmin = -0.5 * prop_spacing * (props_per_row - 1)
prop_count = 0
prop_pos = []
for j in range(props_per_row):
prop_up = zmin + j * prop_spacing
for k in range(props_per_row):
if prop_count >= self.num_props:
break
propx = xmin + k * prop_spacing
prop_pos.append([propx, prop_up, 0.0])
prop_count += 1
prop = DynamicCuboid(
prim_path=self.default_zero_env_path + "/prop/prop_0",
name="prop",
color=prop_color,
size=prop_size,
density=100.0,
)
self._sim_config.apply_articulation_settings(
"prop", get_prim_at_path(prop.prim_path), self._sim_config.parse_actor_config("prop")
)
prop_paths = [f"{self.default_zero_env_path}/prop/prop_{j}" for j in range(self.num_props)]
prop_cloner.clone(
source_prim_path=self.default_zero_env_path + "/prop/prop_0",
prim_paths=prop_paths,
positions=np.array(prop_pos) + drawer_pos.numpy(),
replicate_physics=False,
)
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")),
self._device,
)
lfinger_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")),
self._device,
)
rfinger_pose = get_env_local_pose(
self._env_pos[0],
UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")),
self._device,
)
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = tf_inverse(hand_pose[3:7], hand_pose[0:3])
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(
hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]
)
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
drawer_local_grasp_pose = torch.tensor([0.3, 0.01, 0.0, 1.0, 0.0, 0.0, 0.0], device=self._device)
self.drawer_local_grasp_pos = drawer_local_grasp_pose[0:3].repeat((self._num_envs, 1))
self.drawer_local_grasp_rot = drawer_local_grasp_pose[3:7].repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.drawer_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.drawer_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat(
(self._num_envs, 1)
)
self.franka_default_dof_pos = torch.tensor(
[1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
drawer_pos, drawer_rot = self._cabinets._drawers.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.cabinet_dof_pos = self._cabinets.get_joint_positions(clone=False)
self.cabinet_dof_vel = self._cabinets.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
(
self.franka_grasp_rot,
self.franka_grasp_pos,
self.drawer_grasp_rot,
self.drawer_grasp_pos,
) = self.compute_grasp_transforms(
hand_rot,
hand_pos,
self.franka_local_grasp_rot,
self.franka_local_grasp_pos,
drawer_rot,
drawer_pos,
self.drawer_local_grasp_rot,
self.drawer_local_grasp_pos,
)
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
to_target = self.drawer_grasp_pos - self.franka_grasp_pos
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
to_target,
self.cabinet_dof_pos[:, 3].unsqueeze(-1),
self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {self._frankas.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0)
+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
# reset cabinet
self._cabinets.set_joint_positions(
torch.zeros_like(self._cabinets.get_joint_positions(clone=False)[env_ids]), indices=indices
)
self._cabinets.set_joint_velocities(
torch.zeros_like(self._cabinets.get_joint_velocities(clone=False)[env_ids]), indices=indices
)
# reset props
if self.num_props > 0:
self._props.set_world_poses(
self.default_prop_pos[self.prop_indices[env_ids].flatten()],
self.default_prop_rot[self.prop_indices[env_ids].flatten()],
self.prop_indices[env_ids].flatten().to(torch.int32),
)
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
if self.num_props > 0:
self.default_prop_pos, self.default_prop_rot = self._props.get_world_poses()
self.prop_indices = torch.arange(self._num_envs * self.num_props, device=self._device).view(
self._num_envs, self.num_props
)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = self.compute_franka_reward(
self.reset_buf,
self.progress_buf,
self.actions,
self.cabinet_dof_pos,
self.franka_grasp_pos,
self.drawer_grasp_pos,
self.franka_grasp_rot,
self.drawer_grasp_rot,
self.franka_lfinger_pos,
self.franka_rfinger_pos,
self.gripper_forward_axis,
self.drawer_inward_axis,
self.gripper_up_axis,
self.drawer_up_axis,
self._num_envs,
self.dist_reward_scale,
self.rot_reward_scale,
self.around_handle_reward_scale,
self.open_reward_scale,
self.finger_dist_reward_scale,
self.action_penalty_scale,
self.distX_offset,
self._max_episode_length,
self.franka_dof_pos,
self.finger_close_reward_scale,
)
def is_done(self) -> None:
# reset if drawer is open or max length reached
self.reset_buf = torch.where(self.cabinet_dof_pos[:, 3] > 0.39, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(
self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf
)
def compute_grasp_transforms(
self,
hand_rot,
hand_pos,
franka_local_grasp_rot,
franka_local_grasp_pos,
drawer_rot,
drawer_pos,
drawer_local_grasp_rot,
drawer_local_grasp_pos,
):
global_franka_rot, global_franka_pos = tf_combine(
hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos
)
global_drawer_rot, global_drawer_pos = tf_combine(
drawer_rot, drawer_pos, drawer_local_grasp_rot, drawer_local_grasp_pos
)
return global_franka_rot, global_franka_pos, global_drawer_rot, global_drawer_pos
def compute_franka_reward(
self,
reset_buf,
progress_buf,
actions,
cabinet_dof_pos,
franka_grasp_pos,
drawer_grasp_pos,
franka_grasp_rot,
drawer_grasp_rot,
franka_lfinger_pos,
franka_rfinger_pos,
gripper_forward_axis,
drawer_inward_axis,
gripper_up_axis,
drawer_up_axis,
num_envs,
dist_reward_scale,
rot_reward_scale,
around_handle_reward_scale,
open_reward_scale,
finger_dist_reward_scale,
action_penalty_scale,
distX_offset,
max_episode_length,
joint_positions,
finger_close_reward_scale,
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, int, float, float, float, float, float, float, float, float, Tensor) -> Tuple[Tensor, Tensor]
# distance from hand to the drawer
d = torch.norm(franka_grasp_pos - drawer_grasp_pos, p=2, dim=-1)
dist_reward = 1.0 / (1.0 + d**2)
dist_reward *= dist_reward
dist_reward = torch.where(d <= 0.02, dist_reward * 2, dist_reward)
axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(drawer_grasp_rot, drawer_inward_axis)
axis3 = tf_vector(franka_grasp_rot, gripper_up_axis)
axis4 = tf_vector(drawer_grasp_rot, drawer_up_axis)
dot1 = (
torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of forward axis for gripper
dot2 = (
torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1)
) # alignment of up axis for gripper
# reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1**2 + torch.sign(dot2) * dot2**2)
# bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2], around_handle_reward + 0.5, around_handle_reward
),
around_handle_reward,
)
# reward for distance of each finger from the drawer
finger_dist_reward = torch.zeros_like(rot_reward)
lfinger_dist = torch.abs(franka_lfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
rfinger_dist = torch.abs(franka_rfinger_pos[:, 2] - drawer_grasp_pos[:, 2])
finger_dist_reward = torch.where(
franka_lfinger_pos[:, 2] > drawer_grasp_pos[:, 2],
torch.where(
franka_rfinger_pos[:, 2] < drawer_grasp_pos[:, 2],
(0.04 - lfinger_dist) + (0.04 - rfinger_dist),
finger_dist_reward,
),
finger_dist_reward,
)
finger_close_reward = torch.zeros_like(rot_reward)
finger_close_reward = torch.where(
d <= 0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward
)
# regularization on the actions (summed for each environment)
action_penalty = torch.sum(actions**2, dim=-1)
# how far the cabinet has been opened out
open_reward = cabinet_dof_pos[:, 3] * around_handle_reward + cabinet_dof_pos[:, 3] # drawer_top_joint
rewards = (
dist_reward_scale * dist_reward
+ rot_reward_scale * rot_reward
+ around_handle_reward_scale * around_handle_reward
+ open_reward_scale * open_reward
+ finger_dist_reward_scale * finger_dist_reward
- action_penalty_scale * action_penalty
+ finger_close_reward * finger_close_reward_scale
)
# bonus for opening drawer properly
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.01, rewards + 0.5, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.2, rewards + around_handle_reward, rewards)
rewards = torch.where(cabinet_dof_pos[:, 3] > 0.39, rewards + (2.0 * around_handle_reward), rewards)
# # prevent bad style in opening drawer
# rewards = torch.where(franka_lfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
# torch.ones_like(rewards) * -1, rewards)
# rewards = torch.where(franka_rfinger_pos[:, 0] < drawer_grasp_pos[:, 0] - distX_offset,
# torch.ones_like(rewards) * -1, rewards)
return rewards
| 22,939 | Python | 41.324723 | 222 | 0.599895 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/crazyflie.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.crazyflie import Crazyflie
from omniisaacgymenvs.robots.articulations.views.crazyflie_view import CrazyflieView
EPS = 1e-6 # small constant to avoid divisions by 0 and log(0)
class CrazyflieTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 18
self._num_actions = 4
self._crazyflie_position = torch.tensor([0, 0, 1.0])
self._ball_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
# parameters for the crazyflie
self.arm_length = 0.05
# parameters for the controller
self.motor_damp_time_up = 0.15
self.motor_damp_time_down = 0.15
# I use the multiplier 4, since 4*T ~ time for a step response to finish, where
# T is a time constant of the first-order filter
self.motor_tau_up = 4 * self.dt / (self.motor_damp_time_up + EPS)
self.motor_tau_down = 4 * self.dt / (self.motor_damp_time_down + EPS)
# thrust max
self.mass = 0.028
self.thrust_to_weight = 1.9
self.motor_assymetry = np.array([1.0, 1.0, 1.0, 1.0])
# re-normalizing to sum-up to 4
self.motor_assymetry = self.motor_assymetry * 4.0 / np.sum(self.motor_assymetry)
self.grav_z = -1.0 * self._task_cfg["sim"]["gravity"][2]
def set_up_scene(self, scene) -> None:
self.get_crazyflie()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view")
scene.add(self._copters)
scene.add(self._balls)
for i in range(4):
scene.add(self._copters.physics_rotors[i])
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("crazyflie_view"):
scene.remove_object("crazyflie_view", registry_only=True)
if scene.object_exists("ball_view"):
scene.remove_object("ball_view", registry_only=True)
for i in range(1, 5):
scene.remove_object(f"m{i}_prop_view", registry_only=True)
self._copters = CrazyflieView(prim_paths_expr="/World/envs/.*/Crazyflie", name="crazyflie_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view")
scene.add(self._copters)
scene.add(self._balls)
for i in range(4):
scene.add(self._copters.physics_rotors[i])
def get_crazyflie(self):
copter = Crazyflie(
prim_path=self.default_zero_env_path + "/Crazyflie", name="crazyflie", translation=self._crazyflie_position
)
self._sim_config.apply_articulation_settings(
"crazyflie", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("crazyflie")
)
def get_target(self):
radius = 0.2
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
translation=self._ball_position,
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
rot_x = quat_axis(root_quats, 0)
rot_y = quat_axis(root_quats, 1)
rot_z = quat_axis(root_quats, 2)
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = self.target_positions - root_positions
self.obs_buf[..., 3:6] = rot_x
self.obs_buf[..., 6:9] = rot_y
self.obs_buf[..., 9:12] = rot_z
self.obs_buf[..., 12:15] = root_linvels
self.obs_buf[..., 15:18] = root_angvels
observations = {self._copters.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
if len(set_target_ids) > 0:
self.set_targets(set_target_ids)
actions = actions.clone().to(self._device)
self.actions = actions
# clamp to [-1.0, 1.0]
thrust_cmds = torch.clamp(actions, min=-1.0, max=1.0)
# scale to [0.0, 1.0]
thrust_cmds = (thrust_cmds + 1.0) / 2.0
# filtering the thruster and adding noise
motor_tau = self.motor_tau_up * torch.ones((self._num_envs, 4), dtype=torch.float32, device=self._device)
motor_tau[thrust_cmds < self.thrust_cmds_damp] = self.motor_tau_down
motor_tau[motor_tau > 1.0] = 1.0
# Since NN commands thrusts we need to convert to rot vel and back
thrust_rot = thrust_cmds**0.5
self.thrust_rot_damp = motor_tau * (thrust_rot - self.thrust_rot_damp) + self.thrust_rot_damp
self.thrust_cmds_damp = self.thrust_rot_damp**2
## Adding noise
thrust_noise = 0.01 * torch.randn(4, dtype=torch.float32, device=self._device)
thrust_noise = thrust_cmds * thrust_noise
self.thrust_cmds_damp = torch.clamp(self.thrust_cmds_damp + thrust_noise, min=0.0, max=1.0)
thrusts = self.thrust_max * self.thrust_cmds_damp
# thrusts given rotation
root_quats = self.root_rot
rot_x = quat_axis(root_quats, 0)
rot_y = quat_axis(root_quats, 1)
rot_z = quat_axis(root_quats, 2)
rot_matrix = torch.cat((rot_x, rot_y, rot_z), 1).reshape(-1, 3, 3)
force_x = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device)
force_y = torch.zeros(self._num_envs, 4, dtype=torch.float32, device=self._device)
force_xy = torch.cat((force_x, force_y), 1).reshape(-1, 4, 2)
thrusts = thrusts.reshape(-1, 4, 1)
thrusts = torch.cat((force_xy, thrusts), 2)
thrusts_0 = thrusts[:, 0]
thrusts_0 = thrusts_0[:, :, None]
thrusts_1 = thrusts[:, 1]
thrusts_1 = thrusts_1[:, :, None]
thrusts_2 = thrusts[:, 2]
thrusts_2 = thrusts_2[:, :, None]
thrusts_3 = thrusts[:, 3]
thrusts_3 = thrusts_3[:, :, None]
mod_thrusts_0 = torch.matmul(rot_matrix, thrusts_0)
mod_thrusts_1 = torch.matmul(rot_matrix, thrusts_1)
mod_thrusts_2 = torch.matmul(rot_matrix, thrusts_2)
mod_thrusts_3 = torch.matmul(rot_matrix, thrusts_3)
self.thrusts[:, 0] = torch.squeeze(mod_thrusts_0)
self.thrusts[:, 1] = torch.squeeze(mod_thrusts_1)
self.thrusts[:, 2] = torch.squeeze(mod_thrusts_2)
self.thrusts[:, 3] = torch.squeeze(mod_thrusts_3)
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0
# spin spinning rotors
prop_rot = self.thrust_cmds_damp * self.prop_max_rot
self.dof_vel[:, 0] = prop_rot[:, 0]
self.dof_vel[:, 1] = -1.0 * prop_rot[:, 1]
self.dof_vel[:, 2] = prop_rot[:, 2]
self.dof_vel[:, 3] = -1.0 * prop_rot[:, 3]
self._copters.set_joint_velocities(self.dof_vel)
# apply actions
for i in range(4):
self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices)
def post_reset(self):
thrust_max = self.grav_z * self.mass * self.thrust_to_weight * self.motor_assymetry / 4.0
self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device)
self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_max = torch.tensor(thrust_max, device=self._device, dtype=torch.float32)
self.motor_linearity = 1.0
self.prop_max_rot = 433.3
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32)
self.target_positions[:, 2] = 1
self.actions = torch.zeros((self._num_envs, 4), device=self._device, dtype=torch.float32)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
# Extra info
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {
"rew_pos": torch_zeros(),
"rew_orient": torch_zeros(),
"rew_effort": torch_zeros(),
"rew_spin": torch_zeros(),
"raw_dist": torch_zeros(),
"raw_orient": torch_zeros(),
"raw_effort": torch_zeros(),
"raw_spin": torch_zeros(),
}
self.root_pos, self.root_rot = self._copters.get_world_poses()
self.root_velocities = self._copters.get_velocities()
self.dof_pos = self._copters.get_joint_positions()
self.dof_vel = self._copters.get_joint_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses(clone=False)
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
# control parameters
self.thrusts = torch.zeros((self._num_envs, 4, 3), dtype=torch.float32, device=self._device)
self.thrust_cmds_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.thrust_rot_damp = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device)
self.set_targets(self.all_indices)
def set_targets(self, env_ids):
num_sets = len(env_ids)
envs_long = env_ids.long()
# set target position randomly with x, y in (0, 0) and z in (2)
self.target_positions[envs_long, 0:2] = torch.zeros((num_sets, 2), device=self._device)
self.target_positions[envs_long, 2] = torch.ones(num_sets, device=self._device) * 2.0
# shift the target up so it visually aligns better
ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long]
ball_pos[:, 2] += 0.0
self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, :] = torch_rand_float(-0.0, 0.0, (num_resets, self._copters.num_dof), device=self._device)
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.0, 0.0, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.thrust_cmds_damp[env_ids] = 0
self.thrust_rot_damp[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
self.episode_sums[key][env_ids] = 0.0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# pos reward
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + target_dist)
self.target_dist = target_dist
self.root_positions = root_positions
# orient reward
ups = quat_axis(root_quats, 2)
self.orient_z = ups[..., 2]
up_reward = torch.clamp(ups[..., 2], min=0.0, max=1.0)
# effort reward
effort = torch.square(self.actions).sum(-1)
effort_reward = 0.05 * torch.exp(-0.5 * effort)
# spin reward
spin = torch.square(root_angvels).sum(-1)
spin_reward = 0.01 * torch.exp(-1.0 * spin)
# combined reward
self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spin_reward) - effort_reward
# log episode reward sums
self.episode_sums["rew_pos"] += pos_reward
self.episode_sums["rew_orient"] += up_reward
self.episode_sums["rew_effort"] += effort_reward
self.episode_sums["rew_spin"] += spin_reward
# log raw info
self.episode_sums["raw_dist"] += target_dist
self.episode_sums["raw_orient"] += ups[..., 2]
self.episode_sums["raw_effort"] += effort
self.episode_sums["raw_spin"] += spin
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 5.0, ones, die)
# z >= 0.5 & z <= 5.0 & up > 0
die = torch.where(self.root_positions[..., 2] < 0.5, ones, die)
die = torch.where(self.root_positions[..., 2] > 5.0, ones, die)
die = torch.where(self.orient_z < 0.0, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 16,830 | Python | 41.502525 | 120 | 0.61937 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/humanoid.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.humanoid import Humanoid
from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask
from pxr import PhysxSchema
class HumanoidLocomotionTask(LocomotionTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 87
self._num_actions = 21
self._humanoid_positions = torch.tensor([0, 0, 1.34])
LocomotionTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
LocomotionTask.update_config(self)
def set_up_scene(self, scene) -> None:
self.get_humanoid()
RLTask.set_up_scene(self, scene)
self._humanoids = ArticulationView(
prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False
)
scene.add(self._humanoids)
return
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("humanoid_view"):
scene.remove_object("humanoid_view", registry_only=True)
self._humanoids = ArticulationView(
prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False
)
scene.add(self._humanoids)
def get_humanoid(self):
humanoid = Humanoid(
prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions
)
self._sim_config.apply_articulation_settings(
"Humanoid", get_prim_at_path(humanoid.prim_path), self._sim_config.parse_actor_config("Humanoid")
)
def get_robot(self):
return self._humanoids
def post_reset(self):
self.joint_gears = torch.tensor(
[
67.5000, # lower_waist
67.5000, # lower_waist
67.5000, # right_upper_arm
67.5000, # right_upper_arm
67.5000, # left_upper_arm
67.5000, # left_upper_arm
67.5000, # pelvis
45.0000, # right_lower_arm
45.0000, # left_lower_arm
45.0000, # right_thigh: x
135.0000, # right_thigh: y
45.0000, # right_thigh: z
45.0000, # left_thigh: x
135.0000, # left_thigh: y
45.0000, # left_thigh: z
90.0000, # right_knee
90.0000, # left_knee
22.5, # right_foot
22.5, # right_foot
22.5, # left_foot
22.5, # left_foot
],
device=self._device,
)
self.max_motor_effort = torch.max(self.joint_gears)
self.motor_effort_ratio = self.joint_gears / self.max_motor_effort
dof_limits = self._humanoids.get_dof_limits()
self.dof_limits_lower = dof_limits[0, :, 0].to(self._device)
self.dof_limits_upper = dof_limits[0, :, 1].to(self._device)
force_links = ["left_foot", "right_foot"]
self._sensor_indices = torch.tensor(
[self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
return get_dof_at_limit_cost(self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale)
@torch.jit.script
def get_dof_at_limit_cost(obs_buf, motor_effort_ratio, joints_at_limit_cost_scale):
# type: (Tensor, Tensor, float) -> Tensor
scaled_cost = joints_at_limit_cost_scale * (torch.abs(obs_buf[:, 12:33]) - 0.98) / 0.02
dof_at_limit_cost = torch.sum(
(torch.abs(obs_buf[:, 12:33]) > 0.98) * scaled_cost * motor_effort_ratio.unsqueeze(0), dim=-1
)
return dof_at_limit_cost
| 5,980 | Python | 41.119718 | 117 | 0.651003 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/franka_deformable.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.franka import Franka
from omniisaacgymenvs.robots.articulations.views.franka_view import FrankaView
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
import omni.isaac.core.utils.deformable_mesh_utils as deformableMeshUtils
from omni.isaac.core.materials.deformable_material import DeformableMaterial
from omni.isaac.core.prims.soft.deformable_prim import DeformablePrim
from omni.isaac.core.prims.soft.deformable_prim_view import DeformablePrimView
from omni.physx.scripts import deformableUtils, physicsUtils
import numpy as np
import torch
import math
from pxr import Usd, UsdGeom, Gf, UsdPhysics, PhysxSchema
class FrankaDeformableTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self.update_config(sim_config)
self.dt = 1/60.
self._num_observations = 39
self._num_actions = 9
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.action_scale = self._task_cfg["env"]["actionScale"]
def set_up_scene(self, scene) -> None:
self.stage = get_current_stage()
self.assets_root_path = get_assets_root_path()
if self.assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self.get_franka()
self.get_beaker()
self.get_deformable_tube()
super().set_up_scene(scene=scene, replicate_physics=False)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
self.deformableView = DeformablePrimView(
prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view"
)
scene.add(self.deformableView)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("franka_view"):
scene.remove_object("franka_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("deformabletube_view"):
scene.remove_object("deformabletube_view", registry_only=True)
self._frankas = FrankaView(
prim_paths_expr="/World/envs/.*/franka", name="franka_view"
)
self.deformableView = DeformablePrimView(
prim_paths_expr="/World/envs/.*/deformableTube/tube/mesh", name="deformabletube_view"
)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self.deformableView)
def get_franka(self):
franka = Franka(
prim_path=self.default_zero_env_path + "/franka",
name="franka",
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]),
translation=torch.tensor([0.0, 0.0, 0.0]),
)
self._sim_config.apply_articulation_settings(
"franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")
)
franka.set_franka_properties(stage=self.stage, prim=franka.prim)
def get_beaker(self):
_usd_path = self.assets_root_path + "/Isaac/Props/Beaker/beaker_500ml.usd"
mesh_path = self.default_zero_env_path + "/beaker"
add_reference_to_stage(_usd_path, mesh_path)
beaker = RigidPrim(
prim_path=mesh_path+"/beaker",
name="beaker",
position=torch.tensor([0.5, 0.2, 0.095]),
)
self._sim_config.apply_articulation_settings("beaker", beaker.prim, self._sim_config.parse_actor_config("beaker"))
def get_deformable_tube(self):
_usd_path = self.assets_root_path + "/Isaac/Props/DeformableTube/tube.usd"
mesh_path = self.default_zero_env_path + "/deformableTube/tube"
add_reference_to_stage(_usd_path, mesh_path)
skin_mesh = get_prim_at_path(mesh_path)
physicsUtils.setup_transform_as_scale_orient_translate(skin_mesh)
physicsUtils.set_or_add_translate_op(skin_mesh, (0.6, 0.0, 0.005))
physicsUtils.set_or_add_orient_op(skin_mesh, Gf.Rotation(Gf.Vec3d([0, 0, 1]), 90).GetQuat())
def get_observations(self) -> dict:
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
dof_pos_scaled = (
2.0 * (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False)
self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False)
self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos
tube_positions = self.deformableView.get_simulation_mesh_nodal_positions(clone=False)
tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities(clone=False)
self.tube_front_positions = tube_positions[:, 200, :] - self._env_pos
self.tube_front_velocities = tube_velocities[:, 200, :]
self.tube_back_positions = tube_positions[:, -1, :] - self._env_pos
self.tube_back_velocities = tube_velocities[:, -1, :]
front_to_gripper = self.tube_front_positions - self.gripper_site_pos
to_front_goal = self.front_goal_pos - self.tube_front_positions
to_back_goal = self.back_goal_pos - self.tube_back_positions
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
front_to_gripper,
to_front_goal,
to_back_goal,
self.tube_front_positions,
self.tube_front_velocities,
self.tube_back_positions,
self.tube_back_velocities,
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
self.franka_dof_targets[:, -1] = self.franka_dof_targets[:, -2]
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
pos = self.franka_default_dof_pos
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
self.deformableView.set_simulation_mesh_nodal_positions(self.initial_tube_positions[env_ids], indices)
self.deformableView.set_simulation_mesh_nodal_velocities(self.initial_tube_velocities[env_ids], indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.franka_default_dof_pos = torch.tensor(
[0.00, 0.63, 0.00, -2.15, 0.00, 2.76, 0.75, 0.02, 0.02], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
self.front_goal_pos = torch.tensor([0.36, 0.0, 0.23], device=self._device).repeat((self._num_envs, 1))
self.back_goal_pos = torch.tensor([0.5, 0.2, 0.0], device=self._device).repeat((self._num_envs, 1))
self.goal_hand_rot = torch.tensor([0.0, 1.0, 0.0, 0.0], device=self._device).repeat((self.num_envs, 1))
self.lfinger_pos, _ = self._frankas._lfingers.get_world_poses(clone=False)
self.rfinger_pos, _ = self._frankas._rfingers.get_world_poses(clone=False)
self.gripper_site_pos = (self.lfinger_pos + self.rfinger_pos)/2 - self._env_pos
self.initial_tube_positions = self.deformableView.get_simulation_mesh_nodal_positions()
self.initial_tube_velocities = self.deformableView.get_simulation_mesh_nodal_velocities()
self.tube_front_positions = self.initial_tube_positions[:, 0, :] - self._env_pos
self.tube_front_velocities = self.initial_tube_velocities[:, 0, :]
self.tube_back_positions = self.initial_tube_positions[:, -1, :] - self._env_pos
self.tube_back_velocities = self.initial_tube_velocities[:, -1, :]
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
goal_distance_error = torch.norm(self.tube_back_positions[:, 0:2] - self.back_goal_pos[:, 0:2], p = 2, dim = -1)
goal_dist_reward = 1.0 / (5*goal_distance_error + .025)
current_z_level = self.tube_back_positions[:, 2:3]
z_lift_level = torch.where(
goal_distance_error < 0.07, torch.zeros_like(current_z_level), torch.ones_like(current_z_level)*0.18
)
front_lift_error = torch.norm(current_z_level - z_lift_level, p = 2, dim = -1)
front_lift_reward = 1.0 / (5*front_lift_error + .025)
rewards = goal_dist_reward + 4*front_lift_reward
self.rew_buf[:] = rewards
def is_done(self) -> None:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 0] < 0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 0] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 1] < -1.0, torch.ones_like(self.reset_buf), self.reset_buf)
self.reset_buf = torch.where(self.tube_front_positions[:, 1] > 1.0, torch.ones_like(self.reset_buf), self.reset_buf)
| 13,322 | Python | 42.825658 | 136 | 0.641045 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ant.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.ant import Ant
from omniisaacgymenvs.tasks.shared.locomotion import LocomotionTask
from pxr import PhysxSchema
class AntLocomotionTask(LocomotionTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
LocomotionTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 60
self._num_actions = 8
self._ant_positions = torch.tensor([0, 0, 0.5])
LocomotionTask.update_config(self)
def set_up_scene(self, scene) -> None:
self.get_ant()
RLTask.set_up_scene(self, scene)
self._ants = ArticulationView(
prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False
)
scene.add(self._ants)
return
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("ant_view"):
scene.remove_object("ant_view", registry_only=True)
self._ants = ArticulationView(
prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False
)
scene.add(self._ants)
def get_ant(self):
ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions)
self._sim_config.apply_articulation_settings(
"Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant")
)
def get_robot(self):
return self._ants
def post_reset(self):
self.joint_gears = torch.tensor([15, 15, 15, 15, 15, 15, 15, 15], dtype=torch.float32, device=self._device)
dof_limits = self._ants.get_dof_limits()
self.dof_limits_lower = dof_limits[0, :, 0].to(self._device)
self.dof_limits_upper = dof_limits[0, :, 1].to(self._device)
self.motor_effort_ratio = torch.ones_like(self.joint_gears, device=self._device)
force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"]
self._sensor_indices = torch.tensor(
[self._ants._body_indices[j] for j in force_links], device=self._device, dtype=torch.long
)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
return get_dof_at_limit_cost(self.obs_buf, self._ants.num_dof)
@torch.jit.script
def get_dof_at_limit_cost(obs_buf, num_dof):
# type: (Tensor, int) -> Tensor
return torch.sum(obs_buf[:, 12 : 12 + num_dof] > 0.99, dim=-1)
| 4,691 | Python | 41.654545 | 115 | 0.69708 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/cartpole.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
class CartpoleTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = torch.tensor([0.0, 0.0, 2.0])
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
def set_up_scene(self, scene) -> None:
self.get_cartpole()
super().set_up_scene(scene)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("cartpole_view"):
scene.remove_object("cartpole_view", registry_only=True)
self._cartpoles = ArticulationView(
prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False
)
scene.add(self._cartpoles)
def get_cartpole(self):
cartpole = Cartpole(
prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole")
)
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
self.cart_pos = dof_pos[:, self._cart_dof_idx]
self.cart_vel = dof_vel[:, self._cart_dof_idx]
self.pole_pos = dof_pos[:, self._pole_dof_idx]
self.pole_vel = dof_vel[:, self._pole_dof_idx]
self.obs_buf[:, 0] = self.cart_pos
self.obs_buf[:, 1] = self.cart_vel
self.obs_buf[:, 2] = self.pole_pos
self.obs_buf[:, 3] = self.pole_vel
observations = {self._cartpoles.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = actions.to(self._device)
forces = torch.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=torch.float32, device=self._device)
forces[:, self._cart_dof_idx] = self._max_push_effort * actions[:, 0]
indices = torch.arange(self._cartpoles.count, dtype=torch.int32, device=self._device)
self._cartpoles.set_joint_efforts(forces, indices=indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF positions
dof_pos = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_pos[:, self._cart_dof_idx] = 1.0 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_pos[:, self._pole_dof_idx] = 0.125 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# randomize DOF velocities
dof_vel = torch.zeros((num_resets, self._cartpoles.num_dof), device=self._device)
dof_vel[:, self._cart_dof_idx] = 0.5 * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
dof_vel[:, self._pole_dof_idx] = 0.25 * math.pi * (1.0 - 2.0 * torch.rand(num_resets, device=self._device))
# apply resets
indices = env_ids.to(dtype=torch.int32)
self._cartpoles.set_joint_positions(dof_pos, indices=indices)
self._cartpoles.set_joint_velocities(dof_vel, indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
# randomize all envs
indices = torch.arange(self._cartpoles.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
reward = 1.0 - self.pole_pos * self.pole_pos - 0.01 * torch.abs(self.cart_vel) - 0.005 * torch.abs(self.pole_vel)
reward = torch.where(torch.abs(self.cart_pos) > self._reset_dist, torch.ones_like(reward) * -2.0, reward)
reward = torch.where(torch.abs(self.pole_pos) > np.pi / 2, torch.ones_like(reward) * -2.0, reward)
self.rew_buf[:] = reward
def is_done(self) -> None:
resets = torch.where(torch.abs(self.cart_pos) > self._reset_dist, 1, 0)
resets = torch.where(torch.abs(self.pole_pos) > math.pi / 2, 1, resets)
resets = torch.where(self.progress_buf >= self._max_episode_length, 1, resets)
self.reset_buf[:] = resets
| 7,256 | Python | 42.981818 | 121 | 0.659179 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/dofbot_reacher.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/tasks/shadow_hand.py
import math
import numpy as np
import torch
from omniisaacgymenvs.sim2real.dofbot import RealWorldDofbot
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from omniisaacgymenvs.robots.articulations.views.dofbot_view import DofbotView
from omniisaacgymenvs.robots.articulations.dofbot import Dofbot
from omniisaacgymenvs.tasks.shared.reacher import ReacherTask
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omni.isaac.gym.vec_env import VecEnvBase
class DofbotReacherTask(ReacherTask):
def __init__(
self,
name: str,
sim_config: SimConfig,
env: VecEnvBase,
offset=None
) -> None:
self.update_config(sim_config)
ReacherTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["full"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [full]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full": 29,
# 6: dofbot joints position (action space)
# 6: dofbot joints velocity
# 3: goal position
# 4: goal rotation
# 4: goal relative rotation
# 6: previous action
}
self.object_scale = torch.tensor([0.1] * 3)
self.goal_scale = torch.tensor([0.5] * 3)
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 6
self._num_states = 0
pi = math.pi
# For actions
self._dof_limits = torch.tensor([[
[-pi/2, pi/2],
[-pi/4, pi/4],
[-pi/4, pi/4],
[-pi/4, pi/4],
[-pi/2, pi/2],
[-0.1, 0.1], # The gripper joint will be ignored, since it is not used in the Reacher task
]], dtype=torch.float32, device=self._cfg["sim_device"])
# The last action space cannot be [0, 0]
# It will introduce the following error:
# ValueError: Expected parameter loc (Tensor of shape (2048, 6)) of distribution Normal(loc: torch.Size([2048, 6]), scale: torch.Size([2048, 6])) to satisfy the constraint Real(), but found invalid values
self.useURDF = self._task_cfg["env"]["useURDF"]
# Setup Sim2Real
sim2real_config = self._task_cfg['sim2real']
if sim2real_config['enabled'] and self.test and self.num_envs == 1:
self.real_world_dofbot = RealWorldDofbot(
sim2real_config['ip'],
sim2real_config['port'],
sim2real_config['fail_quietely'],
sim2real_config['verbose']
)
ReacherTask.update_config(self)
def get_num_dof(self):
# assert self._arms.num_dof == 11
return min(self._arms.num_dof, 6)
def get_arm(self):
if not self.useURDF:
usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_instanceable.usd"
else:
usd_path = "omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_urdf_instanceable.usd"
dofbot = Dofbot(
prim_path=self.default_zero_env_path + "/Dofbot",
name="Dofbot",
usd_path=usd_path
)
self._sim_config.apply_articulation_settings(
"dofbot",
get_prim_at_path(dofbot.prim_path),
self._sim_config.parse_actor_config("dofbot"),
)
def get_arm_view(self, scene):
if not self.useURDF:
end_effector_prim_paths_expr = "/World/envs/.*/Dofbot/link5/Wrist_Twist"
else:
end_effector_prim_paths_expr = "/World/envs/.*/Dofbot/link5"
arm_view = DofbotView(
prim_paths_expr="/World/envs/.*/Dofbot",
end_effector_prim_paths_expr=end_effector_prim_paths_expr,
name="dofbot_view"
)
scene.add(arm_view._end_effectors)
return arm_view
def get_object_displacement_tensor(self):
return torch.tensor([0.0, 0.015, 0.1], device=self.device).repeat((self.num_envs, 1))
def get_observations(self):
self.arm_dof_pos = self._arms.get_joint_positions()
self.arm_dof_vel = self._arms.get_joint_velocities()
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
else:
print("Unkown observations type!")
observations = {self._arms.name: {"obs_buf": self.obs_buf}}
return observations
def get_reset_target_new_pos(self, n_reset_envs):
# Randomly generate goal positions, although the resulting goal may still not be reachable.
new_pos = torch_rand_float(-1, 1, (n_reset_envs, 3), device=self.device)
new_pos[:, 0] = new_pos[:, 0] * 0.05 + 0.15 * torch.sign(new_pos[:, 0])
new_pos[:, 1] = new_pos[:, 1] * 0.05 + 0.15 * torch.sign(new_pos[:, 1])
new_pos[:, 2] = torch.abs(new_pos[:, 2] * 0.2) + 0.15
return new_pos
def compute_full_observations(self, no_vel=False):
if no_vel:
raise NotImplementedError()
else:
# There are many redundant information for the simple Reacher task, but we'll keep them for now.
self.obs_buf[:, 0:self.num_arm_dofs] = unscale(self.arm_dof_pos[:, :self.num_arm_dofs],
self.arm_dof_lower_limits, self.arm_dof_upper_limits)
self.obs_buf[:, self.num_arm_dofs:2*self.num_arm_dofs] = self.vel_obs_scale * self.arm_dof_vel[:, :self.num_arm_dofs]
base = 2 * self.num_arm_dofs
self.obs_buf[:, base+0:base+3] = self.goal_pos
self.obs_buf[:, base+3:base+7] = self.goal_rot
self.obs_buf[:, base+7:base+11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, base+11:base+17] = self.actions
def send_joint_pos(self, joint_pos):
self.real_world_dofbot.send_joint_pos(joint_pos)
| 7,961 | Python | 41.57754 | 212 | 0.633589 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/quadcopter.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.quadcopter import Quadcopter
from omniisaacgymenvs.robots.articulations.views.quadcopter_view import QuadcopterView
class QuadcopterTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 21
self._num_actions = 12
self._copter_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
max_thrust = 2.0
self.thrust_lower_limits = -max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)
self.thrust_upper_limits = max_thrust * torch.ones(4, device=self._device, dtype=torch.float32)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
def set_up_scene(self, scene) -> None:
self.get_copter()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view")
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False
)
self._balls._non_root_link = True # do not set states for kinematics
scene.add(self._copters)
scene.add(self._copters.rotors)
scene.add(self._balls)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("quadcopter_view"):
scene.remove_object("quadcopter_view", registry_only=True)
if scene.object_exists("rotors_view"):
scene.remove_object("rotors_view", registry_only=True)
if scene.object_exists("targets_view"):
scene.remove_object("targets_view", registry_only=True)
self._copters = QuadcopterView(prim_paths_expr="/World/envs/.*/Quadcopter", name="quadcopter_view")
self._balls = RigidPrimView(
prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False
)
scene.add(self._copters)
scene.add(self._copters.rotors)
scene.add(self._balls)
def get_copter(self):
copter = Quadcopter(
prim_path=self.default_zero_env_path + "/Quadcopter", name="quadcopter", translation=self._copter_position
)
self._sim_config.apply_articulation_settings(
"copter", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("copter")
)
def get_target(self):
radius = 0.05
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings(
"ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")
)
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
self.dof_pos = self._copters.get_joint_positions(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3
self.obs_buf[..., 3:7] = root_quats
self.obs_buf[..., 7:10] = root_linvels / 2
self.obs_buf[..., 10:13] = root_angvels / math.pi
self.obs_buf[..., 13:21] = self.dof_pos
observations = {self._copters.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
actions = actions.clone().to(self._device)
dof_action_speed_scale = 8 * math.pi
self.dof_position_targets += self.dt * dof_action_speed_scale * actions[:, 0:8]
self.dof_position_targets[:] = tensor_clamp(
self.dof_position_targets, self.dof_lower_limits, self.dof_upper_limits
)
thrust_action_speed_scale = 100
self.thrusts += self.dt * thrust_action_speed_scale * actions[:, 8:12]
self.thrusts[:] = tensor_clamp(self.thrusts, self.thrust_lower_limits, self.thrust_upper_limits)
self.forces[:, 0, 2] = self.thrusts[:, 0]
self.forces[:, 1, 2] = self.thrusts[:, 1]
self.forces[:, 2, 2] = self.thrusts[:, 2]
self.forces[:, 3, 2] = self.thrusts[:, 3]
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0.0
self.forces[reset_env_ids] = 0.0
self.dof_position_targets[reset_env_ids] = self.dof_pos[reset_env_ids]
# apply actions
self._copters.set_joint_position_targets(self.dof_position_targets)
self._copters.rotors.apply_forces(self.forces, is_global=False)
def post_reset(self):
# control tensors
self.dof_position_targets = torch.zeros(
(self._num_envs, self._copters.num_dof), dtype=torch.float32, device=self._device, requires_grad=False
)
self.thrusts = torch.zeros((self._num_envs, 4), dtype=torch.float32, device=self._device, requires_grad=False)
self.forces = torch.zeros(
(self._num_envs, self._copters.rotors.count // self._num_envs, 3),
dtype=torch.float32,
device=self._device,
requires_grad=False,
)
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device)
self.target_positions[:, 2] = 1.0
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
self.dof_pos = self._copters.get_joint_positions(clone=False)
self.dof_vel = self._copters.get_joint_velocities(clone=False)
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
dof_limits = self._copters.get_dof_limits()
self.dof_lower_limits = dof_limits[0][:, 0].to(device=self._device)
self.dof_upper_limits = dof_limits[0][:, 1].to(device=self._device)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, :] = torch_rand_float(-0.2, 0.2, (num_resets, self._copters.num_dof), device=self._device)
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-1.5, 1.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.2, 1.5, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
self._balls.set_world_poses(positions=self.target_positions[:, 0:3] + self._env_pos)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# distance to target
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + 3 * target_dist * target_dist) # 2
self.target_dist = target_dist
self.root_positions = root_positions
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + 10 * tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + 0.001 * spinnage * spinnage)
rew = pos_reward + pos_reward * (up_reward + spinnage_reward + spinnage * spinnage * (-1 / 400))
rew = torch.clip(rew, 0.0, None)
self.rew_buf[:] = rew
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 3.0, ones, die)
die = torch.where(self.root_positions[..., 2] < 0.3, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 11,498 | Python | 42.889313 | 120 | 0.640633 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/ingenuity.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.ingenuity import Ingenuity
from omniisaacgymenvs.robots.articulations.views.ingenuity_view import IngenuityView
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTask
import numpy as np
import torch
import math
class IngenuityTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self.update_config(sim_config)
self.thrust_limit = 2000
self.thrust_lateral_component = 0.2
self._num_observations = 13
self._num_actions = 6
self._ingenuity_position = torch.tensor([0, 0, 1.0])
self._ball_position = torch.tensor([0, 0, 1.0])
RLTask.__init__(self, name=name, env=env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["maxEpisodeLength"]
self.dt = self._task_cfg["sim"]["dt"]
def set_up_scene(self, scene) -> None:
self.get_ingenuity()
self.get_target()
RLTask.set_up_scene(self, scene)
self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False)
self._balls._non_root_link = True # do not set states for kinematics
scene.add(self._copters)
scene.add(self._balls)
for i in range(2):
scene.add(self._copters.physics_rotors[i])
scene.add(self._copters.visual_rotors[i])
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("ingenuity_view"):
scene.remove_object("ingenuity_view", registry_only=True)
for i in range(2):
if scene.object_exists(f"physics_rotor_{i}_view"):
scene.remove_object(f"physics_rotor_{i}_view", registry_only=True)
if scene.object_exists(f"visual_rotor_{i}_view"):
scene.remove_object(f"visual_rotor_{i}_view", registry_only=True)
if scene.object_exists("targets_view"):
scene.remove_object("targets_view", registry_only=True)
self._copters = IngenuityView(prim_paths_expr="/World/envs/.*/Ingenuity", name="ingenuity_view")
self._balls = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="targets_view", reset_xform_properties=False)
scene.add(self._copters)
scene.add(self._balls)
for i in range(2):
scene.add(self._copters.physics_rotors[i])
scene.add(self._copters.visual_rotors[i])
def get_ingenuity(self):
copter = Ingenuity(prim_path=self.default_zero_env_path + "/Ingenuity", name="ingenuity", translation=self._ingenuity_position)
self._sim_config.apply_articulation_settings("ingenuity", get_prim_at_path(copter.prim_path), self._sim_config.parse_actor_config("ingenuity"))
def get_target(self):
radius = 0.1
color = torch.tensor([1, 0, 0])
ball = DynamicSphere(
prim_path=self.default_zero_env_path + "/ball",
translation=self._ball_position,
name="target_0",
radius=radius,
color=color,
)
self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball"))
ball.set_collision_enabled(False)
def get_observations(self) -> dict:
self.root_pos, self.root_rot = self._copters.get_world_poses(clone=False)
self.root_velocities = self._copters.get_velocities(clone=False)
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_linvels = self.root_velocities[:, :3]
root_angvels = self.root_velocities[:, 3:]
self.obs_buf[..., 0:3] = (self.target_positions - root_positions) / 3
self.obs_buf[..., 3:7] = root_quats
self.obs_buf[..., 7:10] = root_linvels / 2
self.obs_buf[..., 10:13] = root_angvels / math.pi
observations = {
self._copters.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
set_target_ids = (self.progress_buf % 500 == 0).nonzero(as_tuple=False).squeeze(-1)
if len(set_target_ids) > 0:
self.set_targets(set_target_ids)
actions = actions.clone().to(self._device)
vertical_thrust_prop_0 = torch.clamp(actions[:, 2] * self.thrust_limit, -self.thrust_limit, self.thrust_limit)
vertical_thrust_prop_1 = torch.clamp(actions[:, 5] * self.thrust_limit, -self.thrust_limit, self.thrust_limit)
lateral_fraction_prop_0 = torch.clamp(
actions[:, 0:2] * self.thrust_lateral_component,
-self.thrust_lateral_component,
self.thrust_lateral_component,
)
lateral_fraction_prop_1 = torch.clamp(
actions[:, 3:5] * self.thrust_lateral_component,
-self.thrust_lateral_component,
self.thrust_lateral_component,
)
self.thrusts[:, 0, 2] = self.dt * vertical_thrust_prop_0
self.thrusts[:, 0, 0:2] = self.thrusts[:, 0, 2, None] * lateral_fraction_prop_0
self.thrusts[:, 1, 2] = self.dt * vertical_thrust_prop_1
self.thrusts[:, 1, 0:2] = self.thrusts[:, 1, 2, None] * lateral_fraction_prop_1
# clear actions for reset envs
self.thrusts[reset_env_ids] = 0
# spin spinning rotors
self.dof_vel[:, self.spinning_indices[0]] = 50
self.dof_vel[:, self.spinning_indices[1]] = -50
self._copters.set_joint_velocities(self.dof_vel)
# apply actions
for i in range(2):
self._copters.physics_rotors[i].apply_forces(self.thrusts[:, i], indices=self.all_indices)
def post_reset(self):
self.spinning_indices = torch.tensor([1, 3], device=self._device)
self.all_indices = torch.arange(self._num_envs, dtype=torch.int32, device=self._device)
self.target_positions = torch.zeros((self._num_envs, 3), device=self._device, dtype=torch.float32)
self.target_positions[:, 2] = 1
self.root_pos, self.root_rot = self._copters.get_world_poses()
self.root_velocities = self._copters.get_velocities()
self.dof_pos = self._copters.get_joint_positions()
self.dof_vel = self._copters.get_joint_velocities()
self.initial_ball_pos, self.initial_ball_rot = self._balls.get_world_poses()
self.initial_root_pos, self.initial_root_rot = self.root_pos.clone(), self.root_rot.clone()
# control tensors
self.thrusts = torch.zeros((self._num_envs, 2, 3), dtype=torch.float32, device=self._device)
def set_targets(self, env_ids):
num_sets = len(env_ids)
envs_long = env_ids.long()
# set target position randomly with x, y in (-1, 1) and z in (1, 2)
self.target_positions[envs_long, 0:2] = torch.rand((num_sets, 2), device=self._device) * 2 - 1
self.target_positions[envs_long, 2] = torch.rand(num_sets, device=self._device) + 1
# shift the target up so it visually aligns better
ball_pos = self.target_positions[envs_long] + self._env_pos[envs_long]
ball_pos[:, 2] += 0.4
self._balls.set_world_poses(ball_pos[:, 0:3], self.initial_ball_rot[envs_long].clone(), indices=env_ids)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
self.dof_pos[env_ids, 1] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze()
self.dof_pos[env_ids, 3] = torch_rand_float(-0.2, 0.2, (num_resets, 1), device=self._device).squeeze()
self.dof_vel[env_ids, :] = 0
root_pos = self.initial_root_pos.clone()
root_pos[env_ids, 0] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 1] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_pos[env_ids, 2] += torch_rand_float(-0.5, 0.5, (num_resets, 1), device=self._device).view(-1)
root_velocities = self.root_velocities.clone()
root_velocities[env_ids] = 0
# apply resets
self._copters.set_joint_positions(self.dof_pos[env_ids], indices=env_ids)
self._copters.set_joint_velocities(self.dof_vel[env_ids], indices=env_ids)
self._copters.set_world_poses(root_pos[env_ids], self.initial_root_rot[env_ids].clone(), indices=env_ids)
self._copters.set_velocities(root_velocities[env_ids], indices=env_ids)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def calculate_metrics(self) -> None:
root_positions = self.root_pos - self._env_pos
root_quats = self.root_rot
root_angvels = self.root_velocities[:, 3:]
# distance to target
target_dist = torch.sqrt(torch.square(self.target_positions - root_positions).sum(-1))
pos_reward = 1.0 / (1.0 + 2.5 * target_dist * target_dist)
self.target_dist = target_dist
self.root_positions = root_positions
# uprightness
ups = quat_axis(root_quats, 2)
tiltage = torch.abs(1 - ups[..., 2])
up_reward = 1.0 / (1.0 + 30 * tiltage * tiltage)
# spinning
spinnage = torch.abs(root_angvels[..., 2])
spinnage_reward = 1.0 / (1.0 + 10 * spinnage * spinnage)
# combined reward
# uprightness and spinning only matter when close to the target
self.rew_buf[:] = pos_reward + pos_reward * (up_reward + spinnage_reward)
def is_done(self) -> None:
# resets due to misbehavior
ones = torch.ones_like(self.reset_buf)
die = torch.zeros_like(self.reset_buf)
die = torch.where(self.target_dist > 20.0, ones, die)
die = torch.where(self.root_positions[..., 2] < 0.5, ones, die)
# resets due to episode length
self.reset_buf[:] = torch.where(self.progress_buf >= self._max_episode_length - 1, ones, die)
| 12,391 | Python | 42.943262 | 151 | 0.635138 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/anymal.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.rotations import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.anymal import Anymal
from omniisaacgymenvs.robots.articulations.views.anymal_view import AnymalView
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
class AnymalTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self.update_config(sim_config)
self._num_observations = 48
self._num_actions = 12
RLTask.__init__(self, name, env)
return
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
# normalization
self.lin_vel_scale = self._task_cfg["env"]["learn"]["linearVelocityScale"]
self.ang_vel_scale = self._task_cfg["env"]["learn"]["angularVelocityScale"]
self.dof_pos_scale = self._task_cfg["env"]["learn"]["dofPositionScale"]
self.dof_vel_scale = self._task_cfg["env"]["learn"]["dofVelocityScale"]
self.action_scale = self._task_cfg["env"]["control"]["actionScale"]
# reward scales
self.rew_scales = {}
self.rew_scales["lin_vel_xy"] = self._task_cfg["env"]["learn"]["linearVelocityXYRewardScale"]
self.rew_scales["ang_vel_z"] = self._task_cfg["env"]["learn"]["angularVelocityZRewardScale"]
self.rew_scales["lin_vel_z"] = self._task_cfg["env"]["learn"]["linearVelocityZRewardScale"]
self.rew_scales["joint_acc"] = self._task_cfg["env"]["learn"]["jointAccRewardScale"]
self.rew_scales["action_rate"] = self._task_cfg["env"]["learn"]["actionRateRewardScale"]
self.rew_scales["cosmetic"] = self._task_cfg["env"]["learn"]["cosmeticRewardScale"]
# command ranges
self.command_x_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_x"]
self.command_y_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["linear_y"]
self.command_yaw_range = self._task_cfg["env"]["randomCommandVelocityRanges"]["yaw"]
# base init state
pos = self._task_cfg["env"]["baseInitState"]["pos"]
rot = self._task_cfg["env"]["baseInitState"]["rot"]
v_lin = self._task_cfg["env"]["baseInitState"]["vLinear"]
v_ang = self._task_cfg["env"]["baseInitState"]["vAngular"]
state = pos + rot + v_lin + v_ang
self.base_init_state = state
# default joint positions
self.named_default_joint_angles = self._task_cfg["env"]["defaultJointAngles"]
# other
self.dt = 1 / 60
self.max_episode_length_s = self._task_cfg["env"]["learn"]["episodeLength_s"]
self.max_episode_length = int(self.max_episode_length_s / self.dt + 0.5)
self.Kp = self._task_cfg["env"]["control"]["stiffness"]
self.Kd = self._task_cfg["env"]["control"]["damping"]
for key in self.rew_scales.keys():
self.rew_scales[key] *= self.dt
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._anymal_translation = torch.tensor([0.0, 0.0, 0.62])
self._env_spacing = self._task_cfg["env"]["envSpacing"]
def set_up_scene(self, scene) -> None:
self.get_anymal()
super().set_up_scene(scene)
self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview")
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
return
def initialize_views(self, scene):
super().initialize_views(scene)
if scene.object_exists("anymalview"):
scene.remove_object("anymalview", registry_only=True)
if scene.object_exists("knees_view"):
scene.remove_object("knees_view", registry_only=True)
if scene.object_exists("base_view"):
scene.remove_object("base_view", registry_only=True)
self._anymals = AnymalView(prim_paths_expr="/World/envs/.*/anymal", name="anymalview")
scene.add(self._anymals)
scene.add(self._anymals._knees)
scene.add(self._anymals._base)
def get_anymal(self):
anymal = Anymal(
prim_path=self.default_zero_env_path + "/anymal", name="Anymal", translation=self._anymal_translation
)
self._sim_config.apply_articulation_settings(
"Anymal", get_prim_at_path(anymal.prim_path), self._sim_config.parse_actor_config("Anymal")
)
# Configure joint properties
joint_paths = []
for quadrant in ["LF", "LH", "RF", "RH"]:
for component, abbrev in [("HIP", "H"), ("THIGH", "K")]:
joint_paths.append(f"{quadrant}_{component}/{quadrant}_{abbrev}FE")
joint_paths.append(f"base/{quadrant}_HAA")
for joint_path in joint_paths:
set_drive(f"{anymal.prim_path}/{joint_path}", "angular", "position", 0, 400, 40, 1000)
def get_observations(self) -> dict:
torso_position, torso_rotation = self._anymals.get_world_poses(clone=False)
root_velocities = self._anymals.get_velocities(clone=False)
dof_pos = self._anymals.get_joint_positions(clone=False)
dof_vel = self._anymals.get_joint_velocities(clone=False)
velocity = root_velocities[:, 0:3]
ang_velocity = root_velocities[:, 3:6]
base_lin_vel = quat_rotate_inverse(torso_rotation, velocity) * self.lin_vel_scale
base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity) * self.ang_vel_scale
projected_gravity = quat_rotate(torso_rotation, self.gravity_vec)
dof_pos_scaled = (dof_pos - self.default_dof_pos) * self.dof_pos_scale
commands_scaled = self.commands * torch.tensor(
[self.lin_vel_scale, self.lin_vel_scale, self.ang_vel_scale],
requires_grad=False,
device=self.commands.device,
)
obs = torch.cat(
(
base_lin_vel,
base_ang_vel,
projected_gravity,
commands_scaled,
dof_pos_scaled,
dof_vel * self.dof_vel_scale,
self.actions,
),
dim=-1,
)
self.obs_buf[:] = obs
observations = {self._anymals.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
indices = torch.arange(self._anymals.count, dtype=torch.int32, device=self._device)
self.actions[:] = actions.clone().to(self._device)
current_targets = self.current_targets + self.action_scale * self.actions * self.dt
self.current_targets[:] = tensor_clamp(
current_targets, self.anymal_dof_lower_limits, self.anymal_dof_upper_limits
)
self._anymals.set_joint_position_targets(self.current_targets, indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF velocities
velocities = torch_rand_float(-0.1, 0.1, (num_resets, self._anymals.num_dof), device=self._device)
dof_pos = self.default_dof_pos[env_ids]
dof_vel = velocities
self.current_targets[env_ids] = dof_pos[:]
root_vel = torch.zeros((num_resets, 6), device=self._device)
# apply resets
indices = env_ids.to(dtype=torch.int32)
self._anymals.set_joint_positions(dof_pos, indices)
self._anymals.set_joint_velocities(dof_vel, indices)
self._anymals.set_world_poses(
self.initial_root_pos[env_ids].clone(), self.initial_root_rot[env_ids].clone(), indices
)
self._anymals.set_velocities(root_vel, indices)
self.commands_x[env_ids] = torch_rand_float(
self.command_x_range[0], self.command_x_range[1], (num_resets, 1), device=self._device
).squeeze()
self.commands_y[env_ids] = torch_rand_float(
self.command_y_range[0], self.command_y_range[1], (num_resets, 1), device=self._device
).squeeze()
self.commands_yaw[env_ids] = torch_rand_float(
self.command_yaw_range[0], self.command_yaw_range[1], (num_resets, 1), device=self._device
).squeeze()
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
self.last_actions[env_ids] = 0.0
self.last_dof_vel[env_ids] = 0.0
def post_reset(self):
self.default_dof_pos = torch.zeros(
(self.num_envs, 12), dtype=torch.float, device=self.device, requires_grad=False
)
dof_names = self._anymals.dof_names
for i in range(self.num_actions):
name = dof_names[i]
angle = self.named_default_joint_angles[name]
self.default_dof_pos[:, i] = angle
self.initial_root_pos, self.initial_root_rot = self._anymals.get_world_poses()
self.current_targets = self.default_dof_pos.clone()
dof_limits = self._anymals.get_dof_limits()
self.anymal_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.anymal_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.commands = torch.zeros(self._num_envs, 3, dtype=torch.float, device=self._device, requires_grad=False)
self.commands_y = self.commands.view(self._num_envs, 3)[..., 1]
self.commands_x = self.commands.view(self._num_envs, 3)[..., 0]
self.commands_yaw = self.commands.view(self._num_envs, 3)[..., 2]
# initialize some data used later on
self.extras = {}
self.gravity_vec = torch.tensor([0.0, 0.0, -1.0], device=self._device).repeat((self._num_envs, 1))
self.actions = torch.zeros(
self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False
)
self.last_dof_vel = torch.zeros(
(self._num_envs, 12), dtype=torch.float, device=self._device, requires_grad=False
)
self.last_actions = torch.zeros(
self._num_envs, self.num_actions, dtype=torch.float, device=self._device, requires_grad=False
)
self.time_out_buf = torch.zeros_like(self.reset_buf)
# randomize all envs
indices = torch.arange(self._anymals.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
torso_position, torso_rotation = self._anymals.get_world_poses(clone=False)
root_velocities = self._anymals.get_velocities(clone=False)
dof_pos = self._anymals.get_joint_positions(clone=False)
dof_vel = self._anymals.get_joint_velocities(clone=False)
velocity = root_velocities[:, 0:3]
ang_velocity = root_velocities[:, 3:6]
base_lin_vel = quat_rotate_inverse(torso_rotation, velocity)
base_ang_vel = quat_rotate_inverse(torso_rotation, ang_velocity)
# velocity tracking reward
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - base_lin_vel[:, :2]), dim=1)
ang_vel_error = torch.square(self.commands[:, 2] - base_ang_vel[:, 2])
rew_lin_vel_xy = torch.exp(-lin_vel_error / 0.25) * self.rew_scales["lin_vel_xy"]
rew_ang_vel_z = torch.exp(-ang_vel_error / 0.25) * self.rew_scales["ang_vel_z"]
rew_lin_vel_z = torch.square(base_lin_vel[:, 2]) * self.rew_scales["lin_vel_z"]
rew_joint_acc = torch.sum(torch.square(self.last_dof_vel - dof_vel), dim=1) * self.rew_scales["joint_acc"]
rew_action_rate = (
torch.sum(torch.square(self.last_actions - self.actions), dim=1) * self.rew_scales["action_rate"]
)
rew_cosmetic = (
torch.sum(torch.abs(dof_pos[:, 0:4] - self.default_dof_pos[:, 0:4]), dim=1) * self.rew_scales["cosmetic"]
)
total_reward = rew_lin_vel_xy + rew_ang_vel_z + rew_joint_acc + rew_action_rate + rew_cosmetic + rew_lin_vel_z
total_reward = torch.clip(total_reward, 0.0, None)
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = dof_vel[:]
self.fallen_over = self._anymals.is_base_below_threshold(threshold=0.51, ground_heights=0.0)
total_reward[torch.nonzero(self.fallen_over)] = -1
self.rew_buf[:] = total_reward.detach()
def is_done(self) -> None:
# reset agents
time_out = self.progress_buf >= self.max_episode_length - 1
self.reset_buf[:] = time_out | self.fallen_over
| 14,350 | Python | 44.55873 | 118 | 0.630941 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/humanoid.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask
from omniisaacgymenvs.robots.articulations.humanoid import Humanoid
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
import math
class HumanoidLocomotionTask(LocomotionTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 87
self._num_actions = 21
self._humanoid_positions = torch.tensor([0, 0, 1.34])
LocomotionTask.__init__(self, name=name, env=env)
return
def set_up_scene(self, scene) -> None:
self.get_humanoid()
RLTaskWarp.set_up_scene(self, scene)
self._humanoids = ArticulationView(prim_paths_expr="/World/envs/.*/Humanoid/torso", name="humanoid_view", reset_xform_properties=False)
scene.add(self._humanoids)
return
def get_humanoid(self):
humanoid = Humanoid(prim_path=self.default_zero_env_path + "/Humanoid", name="Humanoid", translation=self._humanoid_positions)
self._sim_config.apply_articulation_settings("Humanoid", get_prim_at_path(humanoid.prim_path),
self._sim_config.parse_actor_config("Humanoid"))
def get_robot(self):
return self._humanoids
def post_reset(self):
self.joint_gears = wp.array(
[
67.5000, # lower_waist
67.5000, # lower_waist
67.5000, # right_upper_arm
67.5000, # right_upper_arm
67.5000, # left_upper_arm
67.5000, # left_upper_arm
67.5000, # pelvis
45.0000, # right_lower_arm
45.0000, # left_lower_arm
45.0000, # right_thigh: x
135.0000, # right_thigh: y
45.0000, # right_thigh: z
45.0000, # left_thigh: x
135.0000, # left_thigh: y
45.0000, # left_thigh: z
90.0000, # right_knee
90.0000, # left_knee
22.5, # right_foot
22.5, # right_foot
22.5, # left_foot
22.5, # left_foot
],
device=self._device,
dtype=wp.float32
)
self.max_motor_effort = 135.0
self.motor_effort_ratio = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
wp.launch(compute_effort_ratio, dim=self._humanoids._num_dof,
inputs=[self.motor_effort_ratio, self.joint_gears, self.max_motor_effort], device=self._device)
dof_limits = self._humanoids.get_dof_limits().to(self._device)
self.dof_limits_lower = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
self.dof_limits_upper = wp.zeros(self._humanoids._num_dof, dtype=wp.float32, device=self._device)
wp.launch(parse_dof_limits, dim=self._humanoids._num_dof,
inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device)
self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
force_links = ["left_foot", "right_foot"]
self._sensor_indices = wp.array([self._humanoids._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._humanoids._num_dof),
inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio, self.joints_at_limit_cost_scale])
return self.dof_at_limit_cost
@wp.kernel
def compute_effort_ratio(motor_effort_ratio: wp.array(dtype=wp.float32),
joint_gears: wp.array(dtype=wp.float32),
max_motor_effort: float):
tid = wp.tid()
motor_effort_ratio[tid] = joint_gears[tid] / max_motor_effort
@wp.kernel
def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_limits: wp.array(dtype=wp.float32, ndim=3)):
tid = wp.tid()
dof_limits_lower[tid] = dof_limits[0, tid, 0]
dof_limits_upper[tid] = dof_limits[0, tid, 1]
@wp.kernel
def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
motor_effort_ratio: wp.array(dtype=wp.float32),
joints_at_limit_cost_scale: float):
i, j = wp.tid()
dof_i = j + 12
scaled_cost = joints_at_limit_cost_scale * (wp.abs(obs_buf[i, dof_i]) - 0.98) / 0.02
cost = 0.0
if wp.abs(obs_buf[i, dof_i]) > 0.98:
cost = scaled_cost * motor_effort_ratio[j]
dof_at_limit_cost[i] = cost
| 6,686 | Python | 42.422078 | 143 | 0.639994 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/ant.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.ant import Ant
from omniisaacgymenvs.tasks.warp.shared.locomotion import LocomotionTask
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omni.isaac.core.utils.torch.maths import torch_rand_float, tensor_clamp, unscale
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
class AntLocomotionTask(LocomotionTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_observations = 60
self._num_actions = 8
self._ant_positions = wp.array([0, 0, 0.5], dtype=wp.float32, device="cpu")
LocomotionTask.__init__(self, name=name, env=env)
return
def set_up_scene(self, scene) -> None:
self.get_ant()
RLTaskWarp.set_up_scene(self, scene)
self._ants = ArticulationView(prim_paths_expr="/World/envs/.*/Ant/torso", name="ant_view", reset_xform_properties=False)
scene.add(self._ants)
return
def get_ant(self):
ant = Ant(prim_path=self.default_zero_env_path + "/Ant", name="Ant", translation=self._ant_positions)
self._sim_config.apply_articulation_settings("Ant", get_prim_at_path(ant.prim_path), self._sim_config.parse_actor_config("Ant"))
def get_robot(self):
return self._ants
def post_reset(self):
self.joint_gears = wp.array([15, 15, 15, 15, 15, 15, 15, 15], dtype=wp.float32, device=self._device)
dof_limits = self._ants.get_dof_limits().to(self._device)
self.dof_limits_lower = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device)
self.dof_limits_upper = wp.zeros(self._ants._num_dof, dtype=wp.float32, device=self._device)
wp.launch(parse_dof_limits, dim=self._ants._num_dof,
inputs=[self.dof_limits_lower, self.dof_limits_upper, dof_limits], device=self._device)
self.motor_effort_ratio = wp.array([1, 1, 1, 1, 1, 1, 1, 1], dtype=wp.float32, device=self._device)
self.dof_at_limit_cost = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
force_links = ["front_left_foot", "front_right_foot", "left_back_foot", "right_back_foot"]
self._sensor_indices = wp.array([self._ants._body_indices[j] for j in force_links], device=self._device, dtype=wp.int32)
LocomotionTask.post_reset(self)
def get_dof_at_limit_cost(self):
wp.launch(get_dof_at_limit_cost, dim=(self._num_envs, self._ants._num_dof),
inputs=[self.dof_at_limit_cost, self.obs_buf, self.motor_effort_ratio])
return self.dof_at_limit_cost
@wp.kernel
def get_dof_at_limit_cost(dof_at_limit_cost: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
motor_effort_ratio: wp.array(dtype=wp.float32)):
i, j = wp.tid()
dof_i = j + 12
cost = 0.0
if wp.abs(obs_buf[i, dof_i]) > 0.99:
cost = 1.0
dof_at_limit_cost[i] = cost
@wp.kernel
def parse_dof_limits(dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_limits: wp.array(dtype=wp.float32, ndim=3)):
tid = wp.tid()
dof_limits_lower[tid] = dof_limits[0, tid, 0]
dof_limits_upper[tid] = dof_limits[0, tid, 1] | 5,221 | Python | 44.807017 | 136 | 0.685309 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/cartpole.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.robots.articulations.cartpole import Cartpole
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.warp as warp_utils
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
import math
class CartpoleTask(RLTaskWarp):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._cartpole_positions = wp.array([0.0, 0.0, 2.0], dtype=wp.float32)
self._reset_dist = self._task_cfg["env"]["resetDist"]
self._max_push_effort = self._task_cfg["env"]["maxEffort"]
self._max_episode_length = 500
self._num_observations = 4
self._num_actions = 1
RLTaskWarp.__init__(self, name, env)
return
def set_up_scene(self, scene) -> None:
self.get_cartpole()
super().set_up_scene(scene)
self._cartpoles = ArticulationView(prim_paths_expr="/World/envs/.*/Cartpole", name="cartpole_view", reset_xform_properties=False)
scene.add(self._cartpoles)
return
def get_cartpole(self):
cartpole = Cartpole(prim_path=self.default_zero_env_path + "/Cartpole", name="Cartpole", translation=self._cartpole_positions)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings("Cartpole", get_prim_at_path(cartpole.prim_path), self._sim_config.parse_actor_config("Cartpole"))
def get_observations(self) -> dict:
dof_pos = self._cartpoles.get_joint_positions(clone=False)
dof_vel = self._cartpoles.get_joint_velocities(clone=False)
wp.launch(get_observations, dim=self._num_envs,
inputs=[self.obs_buf, dof_pos, dof_vel, self._cart_dof_idx, self._pole_dof_idx], device=self._device)
observations = {
self._cartpoles.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
self.reset_idx()
actions_wp = wp.from_torch(actions)
forces = wp.zeros((self._cartpoles.count, self._cartpoles.num_dof), dtype=wp.float32, device=self._device)
wp.launch(compute_forces, dim=self._num_envs,
inputs=[forces, actions_wp, self._cart_dof_idx, self._max_push_effort], device=self._device)
self._cartpoles.set_joint_efforts(forces)
def reset_idx(self):
reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1)
num_resets = len(reset_env_ids)
indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32)
if num_resets > 0:
wp.launch(reset_idx, num_resets,
inputs=[self.dof_pos, self.dof_vel, indices, self.reset_buf, self.progress_buf, self._cart_dof_idx, self._pole_dof_idx, self._rand_seed],
device=self._device)
# apply resets
self._cartpoles.set_joint_positions(self.dof_pos[indices], indices=indices)
self._cartpoles.set_joint_velocities(self.dof_vel[indices], indices=indices)
def post_reset(self):
self._cart_dof_idx = self._cartpoles.get_dof_index("cartJoint")
self._pole_dof_idx = self._cartpoles.get_dof_index("poleJoint")
self.dof_pos = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
self.dof_vel = wp.zeros((self._num_envs, self._cartpoles.num_dof), device=self._device, dtype=wp.float32)
# randomize all envs
self.reset_idx()
def calculate_metrics(self) -> None:
wp.launch(calculate_metrics, dim=self._num_envs,
inputs=[self.obs_buf, self.rew_buf, self._reset_dist], device=self._device)
def is_done(self) -> None:
wp.launch(is_done, dim=self._num_envs,
inputs=[self.obs_buf, self.reset_buf, self.progress_buf, self._reset_dist, self._max_episode_length],
device=self._device)
@wp.kernel
def reset_idx(dof_pos: wp.array(dtype=wp.float32, ndim=2),
dof_vel: wp.array(dtype=wp.float32, ndim=2),
indices: wp.array(dtype=wp.int32),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
cart_dof_idx: int,
pole_dof_idx: int,
rand_seed: int):
i = wp.tid()
idx = indices[i]
rand_state = wp.rand_init(rand_seed, i)
# randomize DOF positions
dof_pos[idx, cart_dof_idx] = 1.0 * (1.0 - 2.0 * wp.randf(rand_state))
dof_pos[idx, pole_dof_idx] = 0.125 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# randomize DOF velocities
dof_vel[idx, cart_dof_idx] = 0.5 * (1.0 - 2.0 * wp.randf(rand_state))
dof_vel[idx, pole_dof_idx] = 0.25 * warp_utils.PI * (1.0 - 2.0 * wp.randf(rand_state))
# bookkeeping
progress_buf[idx] = 0
reset_buf[idx] = 0
@wp.kernel
def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
max_push_effort: float):
i = wp.tid()
forces[i, cart_dof_idx] = max_push_effort * actions[i, 0]
@wp.kernel
def get_observations(obs_buf: wp.array(dtype=wp.float32, ndim=2),
dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2),
cart_dof_idx: int,
pole_dof_idx: int):
i = wp.tid()
obs_buf[i, 0] = dof_pos[i, cart_dof_idx]
obs_buf[i, 1] = dof_vel[i, cart_dof_idx]
obs_buf[i, 2] = dof_pos[i, pole_dof_idx]
obs_buf[i, 3] = dof_vel[i, pole_dof_idx]
@wp.kernel
def calculate_metrics(obs_buf: wp.array(dtype=wp.float32, ndim=2),
rew_buf: wp.array(dtype=wp.float32),
reset_dist: float):
i = wp.tid()
cart_pos = obs_buf[i, 0]
cart_vel = obs_buf[i, 1]
pole_angle = obs_buf[i, 2]
pole_vel = obs_buf[i, 3]
rew_buf[i] = 1.0 - pole_angle * pole_angle - 0.01 * wp.abs(cart_vel) - 0.005 * wp.abs(pole_vel)
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_angle) > warp_utils.PI / 2.0:
rew_buf[i] = -2.0
@wp.kernel
def is_done(obs_buf: wp.array(dtype=wp.float32, ndim=2),
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
reset_dist: float,
max_episode_length: int):
i = wp.tid()
cart_pos = obs_buf[i, 0]
pole_pos = obs_buf[i, 2]
if wp.abs(cart_pos) > reset_dist or wp.abs(pole_pos) > warp_utils.PI / 2.0 or progress_buf[i] > max_episode_length:
reset_buf[i] = 1
else:
reset_buf[i] = 0
| 8,665 | Python | 38.390909 | 154 | 0.635661 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/warp/shared/locomotion.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
import omni.isaac.core.utils.warp as warp_utils
from omniisaacgymenvs.tasks.base.rl_task import RLTaskWarp
import numpy as np
import torch
import warp as wp
class LocomotionTask(RLTaskWarp):
def __init__(
self,
name,
env,
offset=None
) -> None:
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"]
self.contact_force_scale = self._task_cfg["env"]["contactForceScale"]
self.power_scale = self._task_cfg["env"]["powerScale"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
self._num_sensors = 2
RLTaskWarp.__init__(self, name, env)
return
@abstractmethod
def set_up_scene(self, scene) -> None:
pass
@abstractmethod
def get_robot(self):
pass
def get_observations(self) -> dict:
torso_position, torso_rotation = self._robots.get_world_poses(clone=False)
velocities = self._robots.get_velocities(clone=False)
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
# force sensors attached to the feet
sensor_force_torques = self._robots.get_measured_joint_forces()
wp.launch(get_observations, dim=self._num_envs,
inputs=[self.obs_buf, torso_position, torso_rotation, self._env_pos, velocities, dof_pos, dof_vel,
self.prev_potentials, self.potentials, self.dt, self.target,
self.basis_vec0, self.basis_vec1, self.dof_limits_lower, self.dof_limits_upper, self.dof_vel_scale,
sensor_force_torques, self.contact_force_scale, self.actions, self.angular_velocity_scale,
self._robots._num_dof, self._num_sensors, self._sensor_indices], device=self._device
)
observations = {
self._robots.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
self.reset_idx()
actions_wp = wp.from_torch(actions)
self.actions = actions_wp
wp.launch(compute_forces, dim=(self._num_envs, self._robots._num_dof),
inputs=[self.forces, self.actions, self.joint_gears, self.power_scale], device=self._device)
# applies joint torques
self._robots.set_joint_efforts(self.forces)
def reset_idx(self):
reset_env_ids = wp.to_torch(self.reset_buf).nonzero(as_tuple=False).squeeze(-1)
num_resets = len(reset_env_ids)
indices = wp.from_torch(reset_env_ids.to(dtype=torch.int32), dtype=wp.int32)
if num_resets > 0:
wp.launch(reset_dofs, dim=(num_resets, self._robots._num_dof),
inputs=[self.dof_pos, self.dof_vel, self.initial_dof_pos, self.dof_limits_lower, self.dof_limits_upper, indices, self._rand_seed],
device=self._device)
wp.launch(reset_idx, dim=num_resets,
inputs=[self.root_pos, self.root_rot, self.initial_root_pos, self.initial_root_rot, self._env_pos,
self.target, self.prev_potentials, self.potentials, self.dt,
self.reset_buf, self.progress_buf, indices, self._rand_seed],
device=self._device)
# apply resets
self._robots.set_joint_positions(self.dof_pos[indices], indices=indices)
self._robots.set_joint_velocities(self.dof_vel[indices], indices=indices)
self._robots.set_world_poses(self.root_pos[indices], self.root_rot[indices], indices=indices)
self._robots.set_velocities(self.root_vel[indices], indices=indices)
def post_reset(self):
self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses()
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
self.basis_vec0 = wp.vec3(1, 0, 0)
self.basis_vec1 = wp.vec3(0, 0, 1)
self.target = wp.vec3(1000, 0, 0)
self.dt = 1.0 / 60.0
# initialize potentials
self.potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
self.prev_potentials = wp.zeros(self._num_envs, dtype=wp.float32, device=self._device)
wp.launch(init_potentials, dim=self._num_envs,
inputs=[self.potentials, self.prev_potentials, self.dt], device=self._device)
self.actions = wp.zeros((self.num_envs, self.num_actions), device=self._device, dtype=wp.float32)
self.forces = wp.zeros((self._num_envs, self._robots._num_dof), dtype=wp.float32, device=self._device)
self.dof_pos = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32)
self.dof_vel = wp.zeros((self.num_envs, self._robots._num_dof), device=self._device, dtype=wp.float32)
self.root_pos = wp.zeros((self.num_envs, 3), device=self._device, dtype=wp.float32)
self.root_rot = wp.zeros((self.num_envs, 4), device=self._device, dtype=wp.float32)
self.root_vel = wp.zeros((self.num_envs, 6), device=self._device, dtype=wp.float32)
# randomize all env
self.reset_idx()
def calculate_metrics(self) -> None:
dof_at_limit_cost = self.get_dof_at_limit_cost()
wp.launch(calculate_metrics, dim=self._num_envs,
inputs=[self.rew_buf, self.obs_buf, self.actions, self.up_weight, self.heading_weight, self.potentials, self.prev_potentials,
self.actions_cost_scale, self.energy_cost_scale, self.termination_height,
self.death_cost, self._robots.num_dof, dof_at_limit_cost, self.alive_reward_scale, self.motor_effort_ratio],
device=self._device
)
def is_done(self) -> None:
wp.launch(is_done, dim=self._num_envs,
inputs=[self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length],
device=self._device
)
#####################################################################
###==========================warp kernels=========================###
#####################################################################
@wp.kernel
def init_potentials(potentials: wp.array(dtype=wp.float32),
prev_potentials: wp.array(dtype=wp.float32),
dt: float):
i = wp.tid()
potentials[i] = -1000.0 / dt
prev_potentials[i] = -1000.0 / dt
@wp.kernel
def reset_idx(root_pos: wp.array(dtype=wp.float32, ndim=2),
root_rot: wp.array(dtype=wp.float32, ndim=2),
initial_root_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
initial_root_rot: wp.indexedarray(dtype=wp.float32, ndim=2),
env_pos: wp.array(dtype=wp.float32, ndim=2),
target: wp.vec3,
prev_potentials: wp.array(dtype=wp.float32),
potentials: wp.array(dtype=wp.float32),
dt: float,
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
indices: wp.array(dtype=wp.int32),
rand_seed: int):
i = wp.tid()
idx = indices[i]
# reset root states
for j in range(3):
root_pos[idx, j] = initial_root_pos[idx, j]
for j in range(4):
root_rot[idx, j] = initial_root_rot[idx, j]
# reset potentials
to_target = target - wp.vec3(initial_root_pos[idx, 0] - env_pos[idx, 0], initial_root_pos[idx, 1] - env_pos[idx, 1], target[2])
prev_potentials[idx] = -wp.length(to_target) / dt
potentials[idx] = -wp.length(to_target) / dt
temp = potentials[idx] - prev_potentials[idx]
# bookkeeping
reset_buf[idx] = 0
progress_buf[idx] = 0
@wp.kernel
def reset_dofs(dof_pos: wp.array(dtype=wp.float32, ndim=2),
dof_vel: wp.array(dtype=wp.float32, ndim=2),
initial_dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
indices: wp.array(dtype=wp.int32),
rand_seed: int):
i, j = wp.tid()
idx = indices[i]
rand_state = wp.rand_init(rand_seed, i * j + j)
# randomize DOF positions and velocities
dof_pos[idx, j] = wp.clamp(wp.randf(rand_state, -0.2, 0.2) + initial_dof_pos[idx, j], dof_limits_lower[j], dof_limits_upper[j])
dof_vel[idx, j] = wp.randf(rand_state, -0.1, 0.1)
@wp.kernel
def compute_forces(forces: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
joint_gears: wp.array(dtype=wp.float32),
power_scale: float):
i, j = wp.tid()
forces[i, j] = actions[i, j] * joint_gears[j] * power_scale
@wp.func
def get_euler_xyz(q: wp.quat):
qx = 0
qy = 1
qz = 2
qw = 3
# roll (x-axis rotation)
sinr_cosp = 2.0 * (q[qw] * q[qx] + q[qy] * q[qz])
cosr_cosp = q[qw] * q[qw] - q[qx] * q[qx] - q[qy] * q[qy] + q[qz] * q[qz]
roll = wp.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2.0 * (q[qw] * q[qy] - q[qz] * q[qx])
if wp.abs(sinp) >= 1:
pitch = warp_utils.PI / 2.0 * (wp.abs(sinp)/sinp)
else:
pitch = wp.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2.0 * (q[qw] * q[qz] + q[qx] * q[qy])
cosy_cosp = q[qw] * q[qw] + q[qx] * q[qx] - q[qy] * q[qy] - q[qz] * q[qz]
yaw = wp.atan2(siny_cosp, cosy_cosp)
rpy = wp.vec3(roll % (2.0 * warp_utils.PI), pitch % (2.0 * warp_utils.PI), yaw % (2.0 * warp_utils.PI))
return rpy
@wp.func
def compute_up_vec(torso_rotation: wp.quat, vec1: wp.vec3):
up_vec = wp.quat_rotate(torso_rotation, vec1)
return up_vec
@wp.func
def compute_heading_vec(torso_rotation: wp.quat, vec0: wp.vec3):
heading_vec = wp.quat_rotate(torso_rotation, vec0)
return heading_vec
@wp.func
def unscale(x:float, lower:float, upper:float):
return (2.0 * x - upper - lower) / (upper - lower)
@wp.func
def normalize_angle(x: float):
return wp.atan2(wp.sin(x), wp.cos(x))
@wp.kernel
def get_observations(
obs_buf: wp.array(dtype=wp.float32, ndim=2),
torso_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
torso_rot: wp.indexedarray(dtype=wp.float32, ndim=2),
env_pos: wp.array(dtype=wp.float32, ndim=2),
velocity: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_pos: wp.indexedarray(dtype=wp.float32, ndim=2),
dof_vel: wp.indexedarray(dtype=wp.float32, ndim=2),
prev_potentials: wp.array(dtype=wp.float32),
potentials: wp.array(dtype=wp.float32),
dt: float,
target: wp.vec3,
basis_vec0: wp.vec3,
basis_vec1: wp.vec3,
dof_limits_lower: wp.array(dtype=wp.float32),
dof_limits_upper: wp.array(dtype=wp.float32),
dof_vel_scale: float,
sensor_force_torques: wp.indexedarray(dtype=wp.float32, ndim=3),
contact_force_scale: float,
actions: wp.array(dtype=wp.float32, ndim=2),
angular_velocity_scale: float,
num_dofs: int,
num_sensors: int,
sensor_indices: wp.array(dtype=wp.int32)
):
i = wp.tid()
torso_position_x = torso_pos[i, 0] - env_pos[i, 0]
torso_position_y = torso_pos[i, 1] - env_pos[i, 1]
torso_position_z = torso_pos[i, 2] - env_pos[i, 2]
to_target = target - wp.vec3(torso_position_x, torso_position_y, target[2])
prev_potentials[i] = potentials[i]
potentials[i] = -wp.length(to_target) / dt
temp = potentials[i] - prev_potentials[i]
torso_quat = wp.quat(torso_rot[i, 1], torso_rot[i, 2], torso_rot[i, 3], torso_rot[i, 0])
up_vec = compute_up_vec(torso_quat, basis_vec1)
up_proj = up_vec[2]
heading_vec = compute_heading_vec(torso_quat, basis_vec0)
target_dir = wp.normalize(to_target)
heading_proj = wp.dot(heading_vec, target_dir)
lin_velocity = wp.vec3(velocity[i, 0], velocity[i, 1], velocity[i, 2])
ang_velocity = wp.vec3(velocity[i, 3], velocity[i, 4], velocity[i, 5])
rpy = get_euler_xyz(torso_quat)
vel_loc = wp.quat_rotate_inv(torso_quat, lin_velocity)
angvel_loc = wp.quat_rotate_inv(torso_quat, ang_velocity)
walk_target_angle = wp.atan2(target[2] - torso_position_z, target[0] - torso_position_x)
angle_to_target = walk_target_angle - rpy[2] # yaw
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs
obs_offset = 0
obs_buf[i, 0] = torso_position_z
obs_offset = obs_offset + 1
for j in range(3):
obs_buf[i, j+obs_offset] = vel_loc[j]
obs_offset = obs_offset + 3
for j in range(3):
obs_buf[i, j+obs_offset] = angvel_loc[j] * angular_velocity_scale
obs_offset = obs_offset + 3
obs_buf[i, obs_offset+0] = normalize_angle(rpy[2])
obs_buf[i, obs_offset+1] = normalize_angle(rpy[0])
obs_buf[i, obs_offset+2] = normalize_angle(angle_to_target)
obs_buf[i, obs_offset+3] = up_proj
obs_buf[i, obs_offset+4] = heading_proj
obs_offset = obs_offset + 5
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = unscale(dof_pos[i, j], dof_limits_lower[j], dof_limits_upper[j])
obs_offset = obs_offset + num_dofs
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = dof_vel[i, j] * dof_vel_scale
obs_offset = obs_offset + num_dofs
for j in range(num_sensors):
sensor_idx = sensor_indices[j]
for k in range(6):
obs_buf[i, obs_offset+j*6+k] = sensor_force_torques[i, sensor_idx, k] * contact_force_scale
obs_offset = obs_offset + (num_sensors * 6)
for j in range(num_dofs):
obs_buf[i, obs_offset+j] = actions[i, j]
@wp.kernel
def is_done(
obs_buf: wp.array(dtype=wp.float32, ndim=2),
termination_height: float,
reset_buf: wp.array(dtype=wp.int32),
progress_buf: wp.array(dtype=wp.int32),
max_episode_length: int
):
i = wp.tid()
if obs_buf[i, 0] < termination_height or progress_buf[i] >= max_episode_length - 1:
reset_buf[i] = 1
else:
reset_buf[i] = 0
@wp.kernel
def calculate_metrics(
rew_buf: wp.array(dtype=wp.float32),
obs_buf: wp.array(dtype=wp.float32, ndim=2),
actions: wp.array(dtype=wp.float32, ndim=2),
up_weight: float,
heading_weight: float,
potentials: wp.array(dtype=wp.float32),
prev_potentials: wp.array(dtype=wp.float32),
actions_cost_scale: float,
energy_cost_scale: float,
termination_height: float,
death_cost: float,
num_dof: int,
dof_at_limit_cost: wp.array(dtype=wp.float32),
alive_reward_scale: float,
motor_effort_ratio: wp.array(dtype=wp.float32)
):
i = wp.tid()
# heading reward
if obs_buf[i, 11] > 0.8:
heading_reward = heading_weight
else:
heading_reward = heading_weight * obs_buf[i, 11] / 0.8
# aligning up axis of robot and environment
up_reward = 0.0
if obs_buf[i, 10] > 0.93:
up_reward = up_weight
# energy penalty for movement
actions_cost = float(0.0)
electricity_cost = float(0.0)
for j in range(num_dof):
actions_cost = actions_cost + (actions[i, j] * actions[i, j])
electricity_cost = electricity_cost + (wp.abs(actions[i, j] * obs_buf[i, 12+num_dof+j]) * motor_effort_ratio[j])
# reward for duration of staying alive
progress_reward = potentials[i] - prev_potentials[i]
total_reward = (
progress_reward
+ alive_reward_scale
+ up_reward
+ heading_reward
- actions_cost_scale * actions_cost
- energy_cost_scale * electricity_cost
- dof_at_limit_cost[i]
)
# adjust reward for fallen agents
if obs_buf[i, 0] < termination_height:
total_reward = death_cost
rew_buf[i] = total_reward
| 18,233 | Python | 39.52 | 147 | 0.624198 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/base/rl_task.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
from abc import abstractmethod
import numpy as np
import omni.isaac.core.utils.warp.tensor as wp_utils
import omni.kit
import omni.usd
import torch
import warp as wp
from gym import spaces
from omni.isaac.cloner import GridCloner
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.gym.tasks.rl_task import RLTaskInterface
from omniisaacgymenvs.utils.domain_randomization.randomize import Randomizer
from pxr import Gf, UsdGeom, UsdLux
class RLTask(RLTaskInterface):
"""This class provides a PyTorch RL-specific interface for setting up RL tasks.
It includes utilities for setting up RL task related parameters,
cloning environments, and data collection for RL algorithms.
"""
def __init__(self, name, env, offset=None) -> None:
"""Initializes RL parameters, cloner object, and buffers.
Args:
name (str): name of the task.
env (VecEnvBase): an instance of the environment wrapper class to register task.
offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None.
"""
BaseTask.__init__(self, name=name, offset=offset)
self._rand_seed = self._cfg["seed"]
# optimization flags for pytorch JIT
torch._C._jit_set_nvfuser_enabled(False)
self.test = self._cfg["test"]
self._device = self._cfg["sim_device"]
# set up randomizer for DR
self._dr_randomizer = Randomizer(self._cfg, self._task_cfg)
if self._dr_randomizer.randomize:
import omni.replicator.isaac as dr
self.dr = dr
# set up replicator for camera data collection
if self._task_cfg["sim"].get("enable_cameras", False):
from omni.replicator.isaac.scripts.writers.pytorch_writer import PytorchWriter
from omni.replicator.isaac.scripts.writers.pytorch_listener import PytorchListener
import omni.replicator.core as rep
self.rep = rep
self.PytorchWriter = PytorchWriter
self.PytorchListener = PytorchListener
print("Task Device:", self._device)
self.randomize_actions = False
self.randomize_observations = False
self.clip_obs = self._task_cfg["env"].get("clipObservations", np.Inf)
self.clip_actions = self._task_cfg["env"].get("clipActions", np.Inf)
self.rl_device = self._cfg.get("rl_device", "cuda:0")
self.control_frequency_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
self.rendering_interval = self._task_cfg.get("renderingInterval", 1)
print("RL device: ", self.rl_device)
self._env = env
if not hasattr(self, "_num_agents"):
self._num_agents = 1 # used for multi-agent environments
if not hasattr(self, "_num_states"):
self._num_states = 0
# initialize data spaces (defaults to gym.Box)
if not hasattr(self, "action_space"):
self.action_space = spaces.Box(
np.ones(self.num_actions, dtype=np.float32) * -1.0, np.ones(self.num_actions, dtype=np.float32) * 1.0
)
if not hasattr(self, "observation_space"):
self.observation_space = spaces.Box(
np.ones(self.num_observations, dtype=np.float32) * -np.Inf,
np.ones(self.num_observations, dtype=np.float32) * np.Inf,
)
if not hasattr(self, "state_space"):
self.state_space = spaces.Box(
np.ones(self.num_states, dtype=np.float32) * -np.Inf,
np.ones(self.num_states, dtype=np.float32) * np.Inf,
)
self.cleanup()
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = torch.zeros((self._num_envs, self.num_observations), device=self._device, dtype=torch.float)
self.states_buf = torch.zeros((self._num_envs, self.num_states), device=self._device, dtype=torch.float)
self.rew_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.float)
self.reset_buf = torch.ones(self._num_envs, device=self._device, dtype=torch.long)
self.progress_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.extras = {}
def set_up_scene(
self, scene, replicate_physics=True, collision_filter_global_paths=[], filter_collisions=True, copy_from_source=False
) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
replicate_physics (bool): Clone physics using PhysX API for better performance.
collision_filter_global_paths (list): Prim paths of global objects that should not have collision masked.
filter_collisions (bool): Mask off collision between environments.
copy_from_source (bool): Copy from source prim when cloning instead of inheriting.
"""
super().set_up_scene(scene)
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self.default_base_env_path)
stage = omni.usd.get_context().get_stage()
UsdGeom.Xform.Define(stage, self.default_zero_env_path)
if self._task_cfg["sim"].get("add_ground_plane", True):
self._ground_plane_path = "/World/defaultGroundPlane"
collision_filter_global_paths.append(self._ground_plane_path)
scene.add_default_ground_plane(prim_path=self._ground_plane_path)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0", prim_paths=prim_paths, replicate_physics=replicate_physics, copy_from_source=copy_from_source
)
self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float)
if filter_collisions:
self._cloner.filter_collisions(
self._env._world.get_physics_context().prim_path,
"/World/collisions",
prim_paths,
collision_filter_global_paths,
)
if self._env._render:
self.set_initial_camera_params(camera_position=[10, 10, 3], camera_target=[0, 0, 0])
if self._task_cfg["sim"].get("add_distant_light", True):
self._create_distant_light()
def set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]):
from omni.kit.viewport.utility import get_viewport_from_window_name
from omni.kit.viewport.utility.camera_state import ViewportCameraState
viewport_api_2 = get_viewport_from_window_name("Viewport")
viewport_api_2.set_active_camera("/OmniverseKit_Persp")
camera_state = ViewportCameraState("/OmniverseKit_Persp", viewport_api_2)
camera_state.set_position_world(Gf.Vec3d(camera_position[0], camera_position[1], camera_position[2]), True)
camera_state.set_target_world(Gf.Vec3d(camera_target[0], camera_target[1], camera_target[2]), True)
def _create_distant_light(self, prim_path="/World/defaultDistantLight", intensity=5000):
stage = get_current_stage()
light = UsdLux.DistantLight.Define(stage, prim_path)
light.CreateIntensityAttr().Set(intensity)
def initialize_views(self, scene):
"""Optionally implemented by individual task classes to initialize views used in the task.
This API is required for the extension workflow, where tasks are expected to train on a pre-defined stage.
Args:
scene (Scene): Scene to remove existing views and initialize/add new views.
"""
self._cloner = GridCloner(spacing=self._env_spacing)
pos, _ = self._cloner.get_clone_transforms(self._num_envs)
self._env_pos = torch.tensor(np.array(pos), device=self._device, dtype=torch.float)
@property
def default_base_env_path(self):
"""Retrieves default path to the parent of all env prims.
Returns:
default_base_env_path(str): Defaults to "/World/envs".
"""
return "/World/envs"
@property
def default_zero_env_path(self):
"""Retrieves default path to the first env prim (index 0).
Returns:
default_zero_env_path(str): Defaults to "/World/envs/env_0".
"""
return f"{self.default_base_env_path}/env_0"
def reset(self):
"""Flags all environments for reset."""
self.reset_buf = torch.ones_like(self.reset_buf)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
class RLTaskWarp(RLTask):
def cleanup(self) -> None:
"""Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = wp.zeros((self._num_envs, self.num_observations), device=self._device, dtype=wp.float32)
self.states_buf = wp.zeros((self._num_envs, self.num_states), device=self._device, dtype=wp.float32)
self.rew_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.float32)
self.reset_buf = wp_utils.ones(self._num_envs, device=self._device, dtype=wp.int32)
self.progress_buf = wp.zeros(self._num_envs, device=self._device, dtype=wp.int32)
self.zero_states_buf_torch = torch.zeros(
(self._num_envs, self.num_states), device=self._device, dtype=torch.float32
)
self.extras = {}
def reset(self):
"""Flags all environments for reset."""
wp.launch(reset_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device)
def post_physics_step(self):
"""Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
wp.launch(increment_progress, dim=self._num_envs, inputs=[self.progress_buf], device=self._device)
if self._env._world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
obs_buf_torch = wp.to_torch(self.obs_buf)
rew_buf_torch = wp.to_torch(self.rew_buf)
reset_buf_torch = wp.to_torch(self.reset_buf)
return obs_buf_torch, rew_buf_torch, reset_buf_torch, self.extras
def get_states(self):
"""API for retrieving states buffer, used for asymmetric AC training.
Returns:
states_buf(torch.Tensor): States buffer.
"""
if self.num_states > 0:
return wp.to_torch(self.states_buf)
else:
return self.zero_states_buf_torch
def set_up_scene(self, scene) -> None:
"""Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
"""
super().set_up_scene(scene)
self._env_pos = wp.from_torch(self._env_pos)
@wp.kernel
def increment_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = progress_buf[i] + 1
@wp.kernel
def reset_progress(progress_buf: wp.array(dtype=wp.int32)):
i = wp.tid()
progress_buf[i] = 1
| 14,224 | Python | 41.717718 | 143 | 0.653121 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: base class.
Inherits Gym's RLTask class and abstract base class. Inherited by environment classes. Not directly executed.
Configuration defined in FactoryBase.yaml. Asset info defined in factory_asset_info_franka_table.yaml.
"""
import carb
import hydra
import math
import numpy as np
import torch
from omni.isaac.core.objects import FixedCuboid
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.factory_franka import FactoryFranka
from pxr import PhysxSchema, UsdPhysics
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_schema_class_base import FactoryABCBase
from omniisaacgymenvs.tasks.factory.factory_schema_config_base import (
FactorySchemaConfigBase,
)
class FactoryBase(RLTask, FactoryABCBase):
def __init__(self, name, sim_config, env) -> None:
"""Initialize instance variables. Initialize RLTask superclass."""
# Set instance variables from base YAML
self._get_base_yaml_params()
self._env_spacing = self.cfg_base.env.env_spacing
# Set instance variables from task and train YAMLs
self._sim_config = sim_config
self._cfg = sim_config.config # CL args, task config, and train config
self._task_cfg = sim_config.task_config # just task config
self._num_envs = sim_config.task_config["env"]["numEnvs"]
self._num_observations = sim_config.task_config["env"]["numObservations"]
self._num_actions = sim_config.task_config["env"]["numActions"]
super().__init__(name, env)
def _get_base_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_base", node=FactorySchemaConfigBase)
config_path = (
"task/FactoryBase.yaml" # relative to Gym's Hydra search path (cfg dir)
)
self.cfg_base = hydra.compose(config_name=config_path)
self.cfg_base = self.cfg_base["task"] # strip superfluous nesting
asset_info_path = "../tasks/factory/yaml/factory_asset_info_franka_table.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_franka_table = hydra.compose(config_name=asset_info_path)
self.asset_info_franka_table = self.asset_info_franka_table[""][""][""][
"tasks"
]["factory"][
"yaml"
] # strip superfluous nesting
def import_franka_assets(self, add_to_stage=True):
"""Set Franka and table asset options. Import assets."""
self._stage = get_current_stage()
if add_to_stage:
franka_translation = np.array([self.cfg_base.env.franka_depth, 0.0, 0.0])
franka_orientation = np.array([0.0, 0.0, 0.0, 1.0])
franka = FactoryFranka(
prim_path=self.default_zero_env_path + "/franka",
name="franka",
translation=franka_translation,
orientation=franka_orientation,
)
self._sim_config.apply_articulation_settings(
"franka",
get_prim_at_path(franka.prim_path),
self._sim_config.parse_actor_config("franka"),
)
for link_prim in franka.prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(
self._stage, link_prim.GetPrimPath()
)
rb.GetDisableGravityAttr().Set(True)
rb.GetRetainAccelerationsAttr().Set(False)
if self.cfg_base.sim.add_damping:
rb.GetLinearDampingAttr().Set(
1.0
) # default = 0.0; increased to improve stability
rb.GetMaxLinearVelocityAttr().Set(
1.0
) # default = 1000.0; reduced to prevent CUDA errors
rb.GetAngularDampingAttr().Set(
5.0
) # default = 0.5; increased to improve stability
rb.GetMaxAngularVelocityAttr().Set(
2 / math.pi * 180
) # default = 64.0; reduced to prevent CUDA errors
else:
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.5)
rb.GetMaxAngularVelocityAttr().Set(64 / math.pi * 180)
table_translation = np.array(
[0.0, 0.0, self.cfg_base.env.table_height * 0.5]
)
table_orientation = np.array([1.0, 0.0, 0.0, 0.0])
table = FixedCuboid(
prim_path=self.default_zero_env_path + "/table",
name="table",
translation=table_translation,
orientation=table_orientation,
scale=np.array(
[
self.asset_info_franka_table.table_depth,
self.asset_info_franka_table.table_width,
self.cfg_base.env.table_height,
]
),
size=1.0,
color=np.array([0, 0, 0]),
)
self.parse_controller_spec(add_to_stage=add_to_stage)
def acquire_base_tensors(self):
"""Acquire tensors."""
self.num_dofs = 9
self.env_pos = self._env_pos
self.dof_pos = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.dof_vel = torch.zeros((self.num_envs, self.num_dofs), device=self.device)
self.dof_torque = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = torch.zeros(
(self.num_envs, 3), device=self.device
)
self.ctrl_target_fingertip_midpoint_quat = torch.zeros(
(self.num_envs, 4), device=self.device
)
self.ctrl_target_dof_pos = torch.zeros(
(self.num_envs, self.num_dofs), device=self.device
)
self.ctrl_target_gripper_dof_pos = torch.zeros(
(self.num_envs, 2), device=self.device
)
self.ctrl_target_fingertip_contact_wrench = torch.zeros(
(self.num_envs, 6), device=self.device
)
self.prev_actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def refresh_base_tensors(self):
"""Refresh tensors."""
if not self._env._world.is_playing():
return
self.dof_pos = self.frankas.get_joint_positions(clone=False)
self.dof_vel = self.frankas.get_joint_velocities(clone=False)
# Jacobian shape: [4, 11, 6, 9] (root has no Jacobian)
self.franka_jacobian = self.frankas.get_jacobians()
self.franka_mass_matrix = self.frankas.get_mass_matrices(clone=False)
self.arm_dof_pos = self.dof_pos[:, 0:7]
self.arm_mass_matrix = self.franka_mass_matrix[
:, 0:7, 0:7
] # for Franka arm (not gripper)
self.hand_pos, self.hand_quat = self.frankas._hands.get_world_poses(clone=False)
self.hand_pos -= self.env_pos
hand_velocities = self.frankas._hands.get_velocities(clone=False)
self.hand_linvel = hand_velocities[:, 0:3]
self.hand_angvel = hand_velocities[:, 3:6]
(
self.left_finger_pos,
self.left_finger_quat,
) = self.frankas._lfingers.get_world_poses(clone=False)
self.left_finger_pos -= self.env_pos
left_finger_velocities = self.frankas._lfingers.get_velocities(clone=False)
self.left_finger_linvel = left_finger_velocities[:, 0:3]
self.left_finger_angvel = left_finger_velocities[:, 3:6]
self.left_finger_jacobian = self.franka_jacobian[:, 8, 0:6, 0:7]
left_finger_forces = self.frankas._lfingers.get_net_contact_forces(clone=False)
self.left_finger_force = left_finger_forces[:, 0:3]
(
self.right_finger_pos,
self.right_finger_quat,
) = self.frankas._rfingers.get_world_poses(clone=False)
self.right_finger_pos -= self.env_pos
right_finger_velocities = self.frankas._rfingers.get_velocities(clone=False)
self.right_finger_linvel = right_finger_velocities[:, 0:3]
self.right_finger_angvel = right_finger_velocities[:, 3:6]
self.right_finger_jacobian = self.franka_jacobian[:, 9, 0:6, 0:7]
right_finger_forces = self.frankas._rfingers.get_net_contact_forces(clone=False)
self.right_finger_force = right_finger_forces[:, 0:3]
self.gripper_dof_pos = self.dof_pos[:, 7:9]
(
self.fingertip_centered_pos,
self.fingertip_centered_quat,
) = self.frankas._fingertip_centered.get_world_poses(clone=False)
self.fingertip_centered_pos -= self.env_pos
fingertip_centered_velocities = self.frankas._fingertip_centered.get_velocities(
clone=False
)
self.fingertip_centered_linvel = fingertip_centered_velocities[:, 0:3]
self.fingertip_centered_angvel = fingertip_centered_velocities[:, 3:6]
self.fingertip_centered_jacobian = self.franka_jacobian[:, 10, 0:6, 0:7]
self.finger_midpoint_pos = (self.left_finger_pos + self.right_finger_pos) / 2
self.fingertip_midpoint_pos = fc.translate_along_local_z(
pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length,
device=self.device,
)
self.fingertip_midpoint_quat = self.fingertip_centered_quat # always equal
# TODO: Add relative velocity term (see https://dynamicsmotioncontrol487379916.files.wordpress.com/2020/11/21-me258pointmovingrigidbody.pdf)
self.fingertip_midpoint_linvel = self.fingertip_centered_linvel + torch.cross(
self.fingertip_centered_angvel,
(self.fingertip_midpoint_pos - self.fingertip_centered_pos),
dim=1,
)
# From sum of angular velocities (https://physics.stackexchange.com/questions/547698/understanding-addition-of-angular-velocity),
# angular velocity of midpoint w.r.t. world is equal to sum of
# angular velocity of midpoint w.r.t. hand and angular velocity of hand w.r.t. world.
# Midpoint is in sliding contact (i.e., linear relative motion) with hand; angular velocity of midpoint w.r.t. hand is zero.
# Thus, angular velocity of midpoint w.r.t. world is equal to angular velocity of hand w.r.t. world.
self.fingertip_midpoint_angvel = self.fingertip_centered_angvel # always equal
self.fingertip_midpoint_jacobian = (
self.left_finger_jacobian + self.right_finger_jacobian
) * 0.5
def parse_controller_spec(self, add_to_stage):
"""Parse controller specification into lower-level controller configuration."""
cfg_ctrl_keys = {
"num_envs",
"jacobian_type",
"gripper_prop_gains",
"gripper_deriv_gains",
"motor_ctrl_mode",
"gain_space",
"ik_method",
"joint_prop_gains",
"joint_deriv_gains",
"do_motion_ctrl",
"task_prop_gains",
"task_deriv_gains",
"do_inertial_comp",
"motion_ctrl_axes",
"do_force_ctrl",
"force_ctrl_method",
"wrench_prop_gains",
"force_ctrl_axes",
}
self.cfg_ctrl = {cfg_ctrl_key: None for cfg_ctrl_key in cfg_ctrl_keys}
self.cfg_ctrl["num_envs"] = self.num_envs
self.cfg_ctrl["jacobian_type"] = self.cfg_task.ctrl.all.jacobian_type
self.cfg_ctrl["gripper_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.all.gripper_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.all.gripper_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
ctrl_type = self.cfg_task.ctrl.ctrl_type
if ctrl_type == "gym_default":
self.cfg_ctrl["motor_ctrl_mode"] = "gym"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.gym_default.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.gripper_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["gripper_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.gym_default.gripper_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "joint_space_ik":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_ik.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_ik.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_ik.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = False
elif ctrl_type == "joint_space_id":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "joint"
self.cfg_ctrl["ik_method"] = self.cfg_task.ctrl.joint_space_id.ik_method
self.cfg_ctrl["joint_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_id.joint_prop_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["joint_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.joint_space_id.joint_deriv_gains, device=self.device
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
elif ctrl_type == "task_space_impedance":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = False
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.task_space_impedance.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = False
elif ctrl_type == "operational_space_motion":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.operational_space_motion.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = False
elif ctrl_type == "open_loop_force":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = False
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "open"
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.open_loop_force.force_ctrl_axes, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "closed_loop_force":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = False
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "closed"
self.cfg_ctrl["wrench_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.closed_loop_force.wrench_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.closed_loop_force.force_ctrl_axes, device=self.device
).repeat((self.num_envs, 1))
elif ctrl_type == "hybrid_force_motion":
self.cfg_ctrl["motor_ctrl_mode"] = "manual"
self.cfg_ctrl["gain_space"] = "task"
self.cfg_ctrl["do_motion_ctrl"] = True
self.cfg_ctrl["task_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.task_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["task_deriv_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.task_deriv_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_inertial_comp"] = True
self.cfg_ctrl["motion_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.motion_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["do_force_ctrl"] = True
self.cfg_ctrl["force_ctrl_method"] = "closed"
self.cfg_ctrl["wrench_prop_gains"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.wrench_prop_gains,
device=self.device,
).repeat((self.num_envs, 1))
self.cfg_ctrl["force_ctrl_axes"] = torch.tensor(
self.cfg_task.ctrl.hybrid_force_motion.force_ctrl_axes,
device=self.device,
).repeat((self.num_envs, 1))
if add_to_stage:
if self.cfg_ctrl["motor_ctrl_mode"] == "gym":
for i in range(7):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_link{i}/panda_joint{i+1}"
)
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "angular")
drive.GetStiffnessAttr().Set(
self.cfg_ctrl["joint_prop_gains"][0, i].item() * np.pi / 180
)
drive.GetDampingAttr().Set(
self.cfg_ctrl["joint_deriv_gains"][0, i].item() * np.pi / 180
)
for i in range(2):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_hand/panda_finger_joint{i+1}"
)
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "linear")
drive.GetStiffnessAttr().Set(
self.cfg_ctrl["gripper_deriv_gains"][0, i].item()
)
drive.GetDampingAttr().Set(
self.cfg_ctrl["gripper_deriv_gains"][0, i].item()
)
elif self.cfg_ctrl["motor_ctrl_mode"] == "manual":
for i in range(7):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_link{i}/panda_joint{i+1}"
)
joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "angular")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None")
drive.GetStiffnessAttr().Set(0.0)
drive.GetDampingAttr().Set(0.0)
for i in range(2):
joint_prim = self._stage.GetPrimAtPath(
self.default_zero_env_path
+ f"/franka/panda_hand/panda_finger_joint{i+1}"
)
joint_prim.RemoveAPI(UsdPhysics.DriveAPI, "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, "None")
drive.GetStiffnessAttr().Set(0.0)
drive.GetDampingAttr().Set(0.0)
def generate_ctrl_signals(self):
"""Get Jacobian. Set Franka DOF position targets or DOF torques."""
# Get desired Jacobian
if self.cfg_ctrl["jacobian_type"] == "geometric":
self.fingertip_midpoint_jacobian_tf = self.fingertip_midpoint_jacobian
elif self.cfg_ctrl["jacobian_type"] == "analytic":
self.fingertip_midpoint_jacobian_tf = fc.get_analytic_jacobian(
fingertip_quat=self.fingertip_quat,
fingertip_jacobian=self.fingertip_midpoint_jacobian,
num_envs=self.num_envs,
device=self.device,
)
# Set PD joint pos target or joint torque
if self.cfg_ctrl["motor_ctrl_mode"] == "gym":
self._set_dof_pos_target()
elif self.cfg_ctrl["motor_ctrl_mode"] == "manual":
self._set_dof_torque()
def _set_dof_pos_target(self):
"""Set Franka DOF position target to move fingertips towards target pose."""
self.ctrl_target_dof_pos = fc.compute_dof_pos_target(
cfg_ctrl=self.cfg_ctrl,
arm_dof_pos=self.arm_dof_pos,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
jacobian=self.fingertip_midpoint_jacobian_tf,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
device=self.device,
)
self.frankas.set_joint_position_targets(positions=self.ctrl_target_dof_pos)
def _set_dof_torque(self):
"""Set Franka DOF torque to move fingertips towards target pose."""
self.dof_torque = fc.compute_dof_torque(
cfg_ctrl=self.cfg_ctrl,
dof_pos=self.dof_pos,
dof_vel=self.dof_vel,
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
fingertip_midpoint_linvel=self.fingertip_midpoint_linvel,
fingertip_midpoint_angvel=self.fingertip_midpoint_angvel,
left_finger_force=self.left_finger_force,
right_finger_force=self.right_finger_force,
jacobian=self.fingertip_midpoint_jacobian_tf,
arm_mass_matrix=self.arm_mass_matrix,
ctrl_target_gripper_dof_pos=self.ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench=self.ctrl_target_fingertip_contact_wrench,
device=self.device,
)
self.frankas.set_joint_efforts(efforts=self.dof_torque)
def enable_gravity(self, gravity_mag):
"""Enable gravity."""
gravity = [0.0, 0.0, -gravity_mag]
self._env._world._physics_sim_view.set_gravity(
carb.Float3(gravity[0], gravity[1], gravity[2])
)
def disable_gravity(self):
"""Disable gravity."""
gravity = [0.0, 0.0, 0.0]
self._env._world._physics_sim_view.set_gravity(
carb.Float3(gravity[0], gravity[1], gravity[2])
)
| 26,838 | Python | 45.921329 | 148 | 0.588419 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_config_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for task class configurations.
Used by Hydra. Defines template for task class YAML files. Not enforced.
"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class Sim:
use_gpu_pipeline: bool # use GPU pipeline
dt: float # timestep size
gravity: list[float] # gravity vector
@dataclass
class Env:
numObservations: int # number of observations per env; camel case required by VecTask
numActions: int # number of actions per env; camel case required by VecTask
numEnvs: int # number of envs; camel case required by VecTask
@dataclass
class Randomize:
franka_arm_initial_dof_pos: list[float] # initial Franka arm DOF position (7)
@dataclass
class RL:
pos_action_scale: list[
float
] # scale on pos displacement targets (3), to convert [-1, 1] to +- x m
rot_action_scale: list[
float
] # scale on rot displacement targets (3), to convert [-1, 1] to +- x rad
force_action_scale: list[
float
] # scale on force targets (3), to convert [-1, 1] to +- x N
torque_action_scale: list[
float
] # scale on torque targets (3), to convert [-1, 1] to +- x Nm
clamp_rot: bool # clamp small values of rotation actions to zero
clamp_rot_thresh: float # smallest acceptable value
max_episode_length: int # max number of timesteps in each episode
@dataclass
class All:
jacobian_type: str # map between joint space and task space via geometric or analytic Jacobian {geometric, analytic}
gripper_prop_gains: list[
float
] # proportional gains on left and right Franka gripper finger DOF position (2)
gripper_deriv_gains: list[
float
] # derivative gains on left and right Franka gripper finger DOF position (2)
@dataclass
class GymDefault:
joint_prop_gains: list[int] # proportional gains on Franka arm DOF position (7)
joint_deriv_gains: list[int] # derivative gains on Franka arm DOF position (7)
@dataclass
class JointSpaceIK:
ik_method: str # use Jacobian pseudoinverse, Jacobian transpose, damped least squares or adaptive SVD {pinv, trans, dls, svd}
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class JointSpaceID:
ik_method: str
joint_prop_gains: list[int]
joint_deriv_gains: list[int]
@dataclass
class TaskSpaceImpedance:
motion_ctrl_axes: list[bool] # axes for which to enable motion control {0, 1} (6)
task_prop_gains: list[float] # proportional gains on Franka fingertip pose (6)
task_deriv_gains: list[float] # derivative gains on Franka fingertip pose (6)
@dataclass
class OperationalSpaceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
@dataclass
class OpenLoopForce:
force_ctrl_axes: list[bool] # axes for which to enable force control {0, 1} (6)
@dataclass
class ClosedLoopForce:
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float] # proportional gains on Franka finger force (6)
@dataclass
class HybridForceMotion:
motion_ctrl_axes: list[bool]
task_prop_gains: list[float]
task_deriv_gains: list[float]
force_ctrl_axes: list[bool]
wrench_prop_gains: list[float]
@dataclass
class Ctrl:
ctrl_type: str # {gym_default,
# joint_space_ik,
# joint_space_id,
# task_space_impedance,
# operational_space_motion,
# open_loop_force,
# closed_loop_force,
# hybrid_force_motion}
gym_default: GymDefault
joint_space_ik: JointSpaceIK
joint_space_id: JointSpaceID
task_space_impedance: TaskSpaceImpedance
operational_space_motion: OperationalSpaceMotion
open_loop_force: OpenLoopForce
closed_loop_force: ClosedLoopForce
hybrid_force_motion: HybridForceMotion
@dataclass
class FactorySchemaConfigTask:
name: str
physics_engine: str
sim: Sim
env: Env
rl: RL
ctrl: Ctrl
| 5,517 | Python | 30.895954 | 130 | 0.719413 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_place.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt place task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPlace
"""
import asyncio
import hydra
import math
import omegaconf
import torch
from typing import Tuple
import omni.kit
from omni.isaac.core.simulation_context import SimulationContext
import omni.isaac.core.utils.torch as torch_utils
from omni.isaac.core.utils.torch.transformations import tf_combine
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltPlace(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltPlacePPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
asyncio.ensure_future(
self.reset_idx_async(indices, randomize_gripper_pose=False)
)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
# Nut-bolt tensors
self.nut_base_pos_local = self.bolt_head_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
bolt_heights = self.bolt_head_heights + self.bolt_shank_lengths
self.bolt_tip_pos_local = bolt_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
# Keypoint tensors
self.keypoint_offsets = (
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_nut = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_bolt = torch.zeros_like(self.keypoints_nut, device=self.device)
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
async def pre_physics_step_async(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
await self.reset_idx_async(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=True,
)
def reset_idx(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
self._close_gripper(sim_steps=self.cfg_task.env.num_gripper_close_sim_steps)
self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag)
if randomize_gripper_pose:
self._randomize_gripper_pose(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
# Close gripper onto nut
self.disable_gravity() # to prevent nut from falling
await self._close_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
self.enable_gravity(gravity_mag=self.cfg_task.sim.gravity_mag)
if randomize_gripper_pose:
await self._randomize_gripper_pose_async(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
).repeat((len(env_ids), 1)),
(self.nut_widths_max * 0.5)
* 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max * 0.5) * 1.1,
), # buffer on gripper DOF pos to prevent initial contact
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root states of nut and bolt."""
# Randomize root state of nut within gripper
self.nut_pos[env_ids, 0] = 0.0
self.nut_pos[env_ids, 1] = 0.0
fingertip_midpoint_pos_reset = 0.58781 # self.fingertip_midpoint_pos at reset
nut_base_pos_local = self.bolt_head_heights.squeeze(-1)
self.nut_pos[env_ids, 2] = fingertip_midpoint_pos_reset - nut_base_pos_local
nut_noise_pos_in_gripper = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
nut_noise_pos_in_gripper = nut_noise_pos_in_gripper @ torch.diag(
torch.tensor(
self.cfg_task.randomize.nut_noise_pos_in_gripper, device=self.device
)
)
self.nut_pos[env_ids, :] += nut_noise_pos_in_gripper[env_ids]
nut_rot_euler = torch.tensor(
[0.0, 0.0, math.pi * 0.5], device=self.device
).repeat(len(env_ids), 1)
nut_noise_rot_in_gripper = 2 * (
torch.rand(self.num_envs, dtype=torch.float32, device=self.device) - 0.5
) # [-1, 1]
nut_noise_rot_in_gripper *= self.cfg_task.randomize.nut_noise_rot_in_gripper
nut_rot_euler[:, 2] += nut_noise_rot_in_gripper
nut_rot_quat = torch_utils.quat_from_euler_xyz(
nut_rot_euler[:, 0], nut_rot_euler[:, 1], nut_rot_euler[:, 2]
)
self.nut_quat[env_ids, :] = nut_rot_quat
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
# Randomize root state of bolt
bolt_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(
self.cfg_task.randomize.bolt_pos_xy_noise,
dtype=torch.float32,
device=self.device,
)
)
self.bolt_pos[env_ids, 0] = (
self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0]
)
self.bolt_pos[env_ids, 1] = (
self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1]
)
self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height
self.bolt_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
indices = env_ids.to(dtype=torch.int32)
self.bolts.set_world_poses(
self.bolt_pos[env_ids] + self.env_pos[env_ids],
self.bolt_quat[env_ids],
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self) -> None:
"""Refresh tensors."""
# Compute pos of keypoints on gripper, nut, and bolt in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_nut[:, idx] = tf_combine(
self.nut_quat,
self.nut_pos,
self.identity_quat,
(keypoint_offset + self.nut_base_pos_local),
)[1]
self.keypoints_bolt[:, idx] = tf_combine(
self.bolt_quat,
self.bolt_pos,
self.identity_quat,
(keypoint_offset + self.bolt_tip_pos_local),
)[1]
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_pos,
self.nut_quat,
self.bolt_pos,
self.bolt_quat,
]
if self.cfg_task.rl.add_obs_bolt_tip_pos:
obs_tensors += [self.bolt_tip_pos_local]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reset and reward buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self) -> None:
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def _update_rew_buf(self) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = (
torch.norm(self.actions, p=2, dim=-1)
* self.cfg_task.rl.action_penalty_scale
)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
)
# In this policy, episode length is constant across all envs
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check if nut is close enough to bolt
is_nut_close_to_bolt = self._check_nut_close_to_bolt()
self.rew_buf[:] += is_nut_close_to_bolt * self.cfg_task.rl.success_bonus
self.extras["successes"] = torch.mean(is_nut_close_to_bolt.float())
def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor:
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
)
return keypoint_offsets
def _get_keypoint_dist(self) -> torch.Tensor:
"""Get keypoint distance between nut and bolt."""
keypoint_dist = torch.sum(
torch.norm(self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1), dim=-1
)
return keypoint_dist
def _randomize_gripper_pose(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# Step once to update PhysX with new joint positions and velocities from reset_franka()
SimulationContext.step(self._env._world, render=True)
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False,
)
SimulationContext.step(self._env._world, render=True)
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# Step once to update PhysX with new joint velocities
SimulationContext.step(self._env._world, render=True)
async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# Step once to update PhysX with new joint positions and velocities from reset_franka()
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=0.0,
do_scale=False,
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# Step once to update PhysX with new joint velocities
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _close_gripper(self, sim_steps) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
SimulationContext.step(self._env._world, render=True)
async def _close_gripper_async(self, sim_steps) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
await self._move_gripper_to_dof_pos_async(
gripper_dof_pos=0.0, sim_steps=sim_steps
)
async def _move_gripper_to_dof_pos_async(
self, gripper_dof_pos, sim_steps
) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _check_nut_close_to_bolt(self) -> torch.Tensor:
"""Check if nut is close to bolt."""
keypoint_dist = torch.norm(
self.keypoints_bolt - self.keypoints_nut, p=2, dim=-1
)
is_nut_close_to_bolt = torch.where(
torch.sum(keypoint_dist, dim=-1) < self.cfg_task.rl.close_error_thresh,
torch.ones_like(self.progress_buf),
torch.zeros_like(self.progress_buf),
)
return is_nut_close_to_bolt
| 29,034 | Python | 37.868809 | 131 | 0.594303 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_class_task.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for task classes.
Inherits ABC class. Inherited by task classes. Defines template for task classes.
"""
from abc import ABC, abstractmethod
class FactoryABCTask(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize environment superclass."""
pass
@abstractmethod
def _get_task_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def _acquire_task_tensors(self):
"""Acquire tensors."""
pass
@abstractmethod
def _refresh_task_tensors(self):
"""Refresh tensors."""
pass
@abstractmethod
def pre_physics_step(self):
"""Reset environments. Apply actions from policy as controller targets. Simulation step called after this method."""
pass
@abstractmethod
def post_physics_step(self):
"""Step buffers. Refresh tensors. Compute observations and reward."""
pass
@abstractmethod
def get_observations(self):
"""Compute observations."""
pass
@abstractmethod
def calculate_metrics(self):
"""Detect successes and failures. Update reward and reset buffers."""
pass
@abstractmethod
def _update_rew_buf(self):
"""Compute reward at current timestep."""
pass
@abstractmethod
def _update_reset_buf(self):
"""Assign environments for reset if successful or failed."""
pass
@abstractmethod
def reset_idx(self):
"""Reset specified environments."""
pass
@abstractmethod
def _reset_franka(self):
"""Reset DOF states and DOF targets of Franka."""
pass
@abstractmethod
def _reset_object(self):
"""Reset root state of object."""
pass
@abstractmethod
def _reset_buffers(self):
"""Reset buffers."""
pass
| 3,492 | Python | 31.342592 | 124 | 0.69559 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_class_env.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: abstract base class for environment classes.
Inherits ABC class. Inherited by environment classes. Defines template for environment classes.
"""
from abc import ABC, abstractmethod
class FactoryABCEnv(ABC):
@abstractmethod
def __init__(self):
"""Initialize instance variables. Initialize base superclass. Acquire tensors."""
pass
@abstractmethod
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
pass
@abstractmethod
def set_up_scene(self):
"""Set env options. Import assets. Create actors."""
pass
@abstractmethod
def _import_env_assets(self):
"""Set asset options. Import assets."""
pass
@abstractmethod
def refresh_env_tensors(self):
"""Refresh tensors."""
# NOTE: Tensor refresh functions should be called once per step, before setters.
pass
| 2,489 | Python | 37.906249 | 95 | 0.73644 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_screw.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt screw task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltScrew
"""
import hydra
import math
import omegaconf
import torch
from typing import Tuple
import omni.isaac.core.utils.torch as torch_utils
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltScrew(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltScrewPPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self.num_envs, dtype=torch.int64, device=self.device)
self.reset_idx(indices)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
target_heights = (
self.cfg_base.env.table_height
+ self.bolt_head_heights
+ self.nut_heights * 0.5
)
self.target_pos = target_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions, ctrl_target_gripper_dof_pos=0.0, do_scale=True
)
def reset_idx(self, env_ids) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
).repeat((len(env_ids), 1)),
(self.nut_widths_max[env_ids] * 0.5)
* 1.1, # buffer on gripper DOF pos to prevent initial contact
(self.nut_widths_max[env_ids] * 0.5) * 1.1,
), # buffer on gripper DOF pos to prevent initial contact
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root state of nut."""
nut_pos = self.cfg_base.env.table_height + self.bolt_shank_lengths[env_ids]
self.nut_pos[env_ids, :] = nut_pos * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat(len(env_ids), 1)
nut_rot = (
self.cfg_task.randomize.nut_rot_initial
* torch.ones((len(env_ids), 1), device=self.device)
* math.pi
/ 180.0
)
self.nut_quat[env_ids, :] = torch.cat(
(
torch.cos(nut_rot * 0.5),
torch.zeros((len(env_ids), 1), device=self.device),
torch.zeros((len(env_ids), 1), device=self.device),
torch.sin(nut_rot * 0.5),
),
dim=-1,
)
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if self.cfg_task.rl.unidirectional_pos:
pos_actions[:, 2] = -(pos_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if self.cfg_task.rl.unidirectional_rot:
rot_actions[:, 2] = -(rot_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if self.cfg_task.rl.unidirectional_force:
force_actions[:, 2] = -(force_actions[:, 2] + 1.0) * 0.5 # [-1, 0]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self) -> None:
"""Refresh tensors."""
self.fingerpad_midpoint_pos = fc.translate_along_local_z(
pos=self.finger_midpoint_pos,
quat=self.hand_quat,
offset=self.asset_info_franka_table.franka_finger_length
- self.asset_info_franka_table.franka_fingerpad_length * 0.5,
device=self.device,
)
self.finger_nut_keypoint_dist = self._get_keypoint_dist(body="finger_nut")
self.nut_keypoint_dist = self._get_keypoint_dist(body="nut")
self.nut_dist_to_target = torch.norm(
self.target_pos - self.nut_com_pos, p=2, dim=-1
) # distance between nut COM and target
self.nut_dist_to_fingerpads = torch.norm(
self.fingerpad_midpoint_pos - self.nut_com_pos, p=2, dim=-1
) # distance between nut COM and midpoint between centers of fingerpads
self.was_success = torch.zeros_like(self.progress_buf, dtype=torch.bool)
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_com_pos,
self.nut_com_quat,
self.nut_com_linvel,
self.nut_com_angvel,
]
if self.cfg_task.rl.add_obs_finger_force:
obs_tensors += [self.left_finger_force, self.right_finger_force]
else:
obs_tensors += [
torch.zeros_like(self.left_finger_force),
torch.zeros_like(self.right_finger_force),
]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reset and reward buffers."""
# Get successful and failed envs at current timestep
curr_successes = self._get_curr_successes()
curr_failures = self._get_curr_failures(curr_successes)
self._update_reset_buf(curr_successes, curr_failures)
self._update_rew_buf(curr_successes)
if torch.any(self.is_expired):
self.extras["successes"] = torch.mean(curr_successes.float())
def _update_reset_buf(self, curr_successes, curr_failures) -> None:
"""Assign environments for reset if successful or failed."""
self.reset_buf[:] = self.is_expired
def _update_rew_buf(self, curr_successes) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -(self.nut_keypoint_dist + self.finger_nut_keypoint_dist)
action_penalty = torch.norm(self.actions, p=2, dim=-1)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
+ curr_successes * self.cfg_task.rl.success_bonus
)
def _get_keypoint_dist(self, body) -> torch.Tensor:
"""Get keypoint distance."""
axis_length = (
self.asset_info_franka_table.franka_hand_length
+ self.asset_info_franka_table.franka_finger_length
)
if body == "finger" or body == "nut":
# Keypoint distance between finger/nut and target
if body == "finger":
self.keypoint1 = self.fingertip_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device,
)
elif body == "nut":
self.keypoint1 = self.nut_com_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device,
)
self.keypoint1_targ = self.target_pos
self.keypoint2_targ = self.keypoint1_targ + torch.tensor(
[0.0, 0.0, axis_length], device=self.device
)
elif body == "finger_nut":
# Keypoint distance between finger and nut
self.keypoint1 = self.fingerpad_midpoint_pos
self.keypoint2 = fc.translate_along_local_z(
pos=self.keypoint1,
quat=self.fingertip_midpoint_quat,
offset=-axis_length,
device=self.device,
)
self.keypoint1_targ = self.nut_com_pos
self.keypoint2_targ = fc.translate_along_local_z(
pos=self.nut_com_pos,
quat=self.nut_com_quat,
offset=axis_length,
device=self.device,
)
self.keypoint3 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 1.0 / 3.0
self.keypoint4 = self.keypoint1 + (self.keypoint2 - self.keypoint1) * 2.0 / 3.0
self.keypoint3_targ = (
self.keypoint1_targ
+ (self.keypoint2_targ - self.keypoint1_targ) * 1.0 / 3.0
)
self.keypoint4_targ = (
self.keypoint1_targ
+ (self.keypoint2_targ - self.keypoint1_targ) * 2.0 / 3.0
)
keypoint_dist = (
torch.norm(self.keypoint1_targ - self.keypoint1, p=2, dim=-1)
+ torch.norm(self.keypoint2_targ - self.keypoint2, p=2, dim=-1)
+ torch.norm(self.keypoint3_targ - self.keypoint3, p=2, dim=-1)
+ torch.norm(self.keypoint4_targ - self.keypoint4, p=2, dim=-1)
)
return keypoint_dist
def _get_curr_successes(self) -> torch.Tensor:
"""Get success mask at current timestep."""
curr_successes = torch.zeros(
(self.num_envs,), dtype=torch.bool, device=self.device
)
# If nut is close enough to target pos
is_close = torch.where(
self.nut_dist_to_target < self.thread_pitches.squeeze(-1) * 5,
torch.ones_like(curr_successes),
torch.zeros_like(curr_successes),
)
curr_successes = torch.logical_or(curr_successes, is_close)
return curr_successes
def _get_curr_failures(self, curr_successes) -> torch.Tensor:
"""Get failure mask at current timestep."""
curr_failures = torch.zeros(
(self.num_envs,), dtype=torch.bool, device=self.device
)
# If max episode length has been reached
self.is_expired = torch.where(
self.progress_buf[:] >= self.cfg_task.rl.max_episode_length,
torch.ones_like(curr_failures),
curr_failures,
)
# If nut is too far from target pos
self.is_far = torch.where(
self.nut_dist_to_target > self.cfg_task.rl.far_error_thresh,
torch.ones_like(curr_failures),
curr_failures,
)
# If nut has slipped (distance-based definition)
self.is_slipped = torch.where(
self.nut_dist_to_fingerpads
> self.asset_info_franka_table.franka_fingerpad_length * 0.5
+ self.nut_heights.squeeze(-1) * 0.5,
torch.ones_like(curr_failures),
curr_failures,
)
self.is_slipped = torch.logical_and(
self.is_slipped, torch.logical_not(curr_successes)
) # ignore slip if successful
# If nut has fallen (i.e., if nut XY pos has drifted from center of bolt and nut Z pos has drifted below top of bolt)
self.is_fallen = torch.logical_and(
torch.norm(self.nut_com_pos[:, 0:2], p=2, dim=-1)
> self.bolt_widths.squeeze(-1) * 0.5,
self.nut_com_pos[:, 2]
< self.cfg_base.env.table_height
+ self.bolt_head_heights.squeeze(-1)
+ self.bolt_shank_lengths.squeeze(-1)
+ self.nut_heights.squeeze(-1) * 0.5,
)
curr_failures = torch.logical_or(curr_failures, self.is_expired)
curr_failures = torch.logical_or(curr_failures, self.is_far)
curr_failures = torch.logical_or(curr_failures, self.is_slipped)
curr_failures = torch.logical_or(curr_failures, self.is_fallen)
return curr_failures
| 20,039 | Python | 37.390805 | 131 | 0.589051 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_task_nut_bolt_pick.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: Class for nut-bolt pick task.
Inherits nut-bolt environment class and abstract task class (not enforced). Can be executed with
PYTHON_PATH omniisaacgymenvs/scripts/rlgames_train.py task=FactoryTaskNutBoltPick
"""
import asyncio
import hydra
import omegaconf
import torch
import omni.kit
from omni.isaac.core.simulation_context import SimulationContext
from omni.isaac.core.utils.torch.transformations import tf_combine
from typing import Tuple
import omni.isaac.core.utils.torch as torch_utils
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_env_nut_bolt import FactoryEnvNutBolt
from omniisaacgymenvs.tasks.factory.factory_schema_class_task import FactoryABCTask
from omniisaacgymenvs.tasks.factory.factory_schema_config_task import (
FactorySchemaConfigTask,
)
class FactoryTaskNutBoltPick(FactoryEnvNutBolt, FactoryABCTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
"""Initialize environment superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_task_yaml_params()
def _get_task_yaml_params(self) -> None:
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_task", node=FactorySchemaConfigTask)
self.cfg_task = omegaconf.OmegaConf.create(self._task_cfg)
self.max_episode_length = (
self.cfg_task.rl.max_episode_length
) # required instance var for VecTask
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml" # relative to Gym's Hydra search path (cfg dir)
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
ppo_path = "train/FactoryTaskNutBoltPickPPO.yaml" # relative to Gym's Hydra search path (cfg dir)
self.cfg_ppo = hydra.compose(config_name=ppo_path)
self.cfg_ppo = self.cfg_ppo["train"] # strip superfluous nesting
def post_reset(self) -> None:
"""Reset the world. Called only once, before simulation begins."""
if self.cfg_task.sim.disable_gravity:
self.disable_gravity()
self.acquire_base_tensors()
self._acquire_task_tensors()
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
# Reset all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
asyncio.ensure_future(
self.reset_idx_async(indices, randomize_gripper_pose=False)
)
def _acquire_task_tensors(self) -> None:
"""Acquire tensors."""
# Grasp pose tensors
nut_grasp_heights = self.bolt_head_heights + self.nut_heights * 0.5 # nut COM
self.nut_grasp_pos_local = nut_grasp_heights * torch.tensor(
[0.0, 0.0, 1.0], device=self.device
).repeat((self.num_envs, 1))
self.nut_grasp_quat_local = (
torch.tensor([0.0, 0.0, 1.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
# Keypoint tensors
self.keypoint_offsets = (
self._get_keypoint_offsets(self.cfg_task.rl.num_keypoints)
* self.cfg_task.rl.keypoint_scale
)
self.keypoints_gripper = torch.zeros(
(self.num_envs, self.cfg_task.rl.num_keypoints, 3),
dtype=torch.float32,
device=self.device,
)
self.keypoints_nut = torch.zeros_like(
self.keypoints_gripper, device=self.device
)
self.identity_quat = (
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
self.actions = torch.zeros(
(self.num_envs, self.num_actions), device=self.device
)
def pre_physics_step(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=True,
)
async def pre_physics_step_async(self, actions) -> None:
"""Reset environments. Apply actions from policy. Simulation step called after this method."""
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
await self.reset_idx_async(env_ids, randomize_gripper_pose=True)
self.actions = actions.clone().to(
self.device
) # shape = (num_envs, num_actions); values = [-1, 1]
self._apply_actions_as_ctrl_targets(
actions=self.actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=True,
)
def reset_idx(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
if randomize_gripper_pose:
self._randomize_gripper_pose(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
async def reset_idx_async(self, env_ids, randomize_gripper_pose) -> None:
"""Reset specified environments."""
self._reset_franka(env_ids)
self._reset_object(env_ids)
if randomize_gripper_pose:
await self._randomize_gripper_pose_async(
env_ids, sim_steps=self.cfg_task.env.num_gripper_move_sim_steps
)
self._reset_buffers(env_ids)
def _reset_franka(self, env_ids) -> None:
"""Reset DOF states and DOF targets of Franka."""
self.dof_pos[env_ids] = torch.cat(
(
torch.tensor(
self.cfg_task.randomize.franka_arm_initial_dof_pos,
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
torch.tensor(
[self.asset_info_franka_table.franka_gripper_width_max],
device=self.device,
),
),
dim=-1,
) # shape = (num_envs, num_dofs)
self.dof_vel[env_ids] = 0.0 # shape = (num_envs, num_dofs)
self.ctrl_target_dof_pos[env_ids] = self.dof_pos[env_ids]
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_positions(self.dof_pos[env_ids], indices=indices)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
def _reset_object(self, env_ids) -> None:
"""Reset root states of nut and bolt."""
# Randomize root state of nut
nut_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
nut_noise_xy = nut_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.nut_pos_xy_noise, device=self.device)
)
self.nut_pos[env_ids, 0] = (
self.cfg_task.randomize.nut_pos_xy_initial[0] + nut_noise_xy[env_ids, 0]
)
self.nut_pos[env_ids, 1] = (
self.cfg_task.randomize.nut_pos_xy_initial[1] + nut_noise_xy[env_ids, 1]
)
self.nut_pos[
env_ids, 2
] = self.cfg_base.env.table_height - self.bolt_head_heights.squeeze(-1)
self.nut_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
self.nut_linvel[env_ids, :] = 0.0
self.nut_angvel[env_ids, :] = 0.0
indices = env_ids.to(dtype=torch.int32)
self.nuts.set_world_poses(
self.nut_pos[env_ids] + self.env_pos[env_ids],
self.nut_quat[env_ids],
indices,
)
self.nuts.set_velocities(
torch.cat((self.nut_linvel[env_ids], self.nut_angvel[env_ids]), dim=1),
indices,
)
# Randomize root state of bolt
bolt_noise_xy = 2 * (
torch.rand((self.num_envs, 2), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
bolt_noise_xy = bolt_noise_xy @ torch.diag(
torch.tensor(self.cfg_task.randomize.bolt_pos_xy_noise, device=self.device)
)
self.bolt_pos[env_ids, 0] = (
self.cfg_task.randomize.bolt_pos_xy_initial[0] + bolt_noise_xy[env_ids, 0]
)
self.bolt_pos[env_ids, 1] = (
self.cfg_task.randomize.bolt_pos_xy_initial[1] + bolt_noise_xy[env_ids, 1]
)
self.bolt_pos[env_ids, 2] = self.cfg_base.env.table_height
self.bolt_quat[env_ids, :] = torch.tensor(
[1.0, 0.0, 0.0, 0.0], dtype=torch.float32, device=self.device
).repeat(len(env_ids), 1)
indices = env_ids.to(dtype=torch.int32)
self.bolts.set_world_poses(
self.bolt_pos[env_ids] + self.env_pos[env_ids],
self.bolt_quat[env_ids],
indices,
)
def _reset_buffers(self, env_ids) -> None:
"""Reset buffers."""
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def _apply_actions_as_ctrl_targets(
self, actions, ctrl_target_gripper_dof_pos, do_scale
) -> None:
"""Apply actions from policy as position/rotation/force/torque targets."""
# Interpret actions as target pos displacements and set pos target
pos_actions = actions[:, 0:3]
if do_scale:
pos_actions = pos_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.pos_action_scale, device=self.device)
)
self.ctrl_target_fingertip_midpoint_pos = (
self.fingertip_midpoint_pos + pos_actions
)
# Interpret actions as target rot (axis-angle) displacements
rot_actions = actions[:, 3:6]
if do_scale:
rot_actions = rot_actions @ torch.diag(
torch.tensor(self.cfg_task.rl.rot_action_scale, device=self.device)
)
# Convert to quat and set rot target
angle = torch.norm(rot_actions, p=2, dim=-1)
axis = rot_actions / angle.unsqueeze(-1)
rot_actions_quat = torch_utils.quat_from_angle_axis(angle, axis)
if self.cfg_task.rl.clamp_rot:
rot_actions_quat = torch.where(
angle.unsqueeze(-1).repeat(1, 4) > self.cfg_task.rl.clamp_rot_thresh,
rot_actions_quat,
torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device).repeat(
self.num_envs, 1
),
)
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_mul(
rot_actions_quat, self.fingertip_midpoint_quat
)
if self.cfg_ctrl["do_force_ctrl"]:
# Interpret actions as target forces and target torques
force_actions = actions[:, 6:9]
if do_scale:
force_actions = force_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.force_action_scale, device=self.device
)
)
torque_actions = actions[:, 9:12]
if do_scale:
torque_actions = torque_actions @ torch.diag(
torch.tensor(
self.cfg_task.rl.torque_action_scale, device=self.device
)
)
self.ctrl_target_fingertip_contact_wrench = torch.cat(
(force_actions, torque_actions), dim=-1
)
self.ctrl_target_gripper_dof_pos = ctrl_target_gripper_dof_pos
self.generate_ctrl_signals()
def post_physics_step(
self,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# At this point, robot has executed RL policy. Now close gripper and lift (open-loop)
if self.cfg_task.env.close_and_lift:
self._close_gripper(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
self._lift_gripper(
franka_gripper_width=0.0,
lift_distance=0.3,
sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps,
)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.get_states()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
async def post_physics_step_async(self):
"""Step buffers. Refresh tensors. Compute observations and reward. Reset environments."""
self.progress_buf[:] += 1
if self._env._world.is_playing():
# In this policy, episode length is constant
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if self.cfg_task.env.close_and_lift:
# At this point, robot has executed RL policy. Now close gripper and lift (open-loop)
if is_last_step:
await self._close_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_close_sim_steps
)
await self._lift_gripper_async(
sim_steps=self.cfg_task.env.num_gripper_lift_sim_steps
)
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
self.get_observations()
self.get_states()
self.calculate_metrics()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
def _refresh_task_tensors(self):
"""Refresh tensors."""
# Compute pose of nut grasping frame
self.nut_grasp_quat, self.nut_grasp_pos = tf_combine(
self.nut_quat,
self.nut_pos,
self.nut_grasp_quat_local,
self.nut_grasp_pos_local,
)
# Compute pos of keypoints on gripper and nut in world frame
for idx, keypoint_offset in enumerate(self.keypoint_offsets):
self.keypoints_gripper[:, idx] = tf_combine(
self.fingertip_midpoint_quat,
self.fingertip_midpoint_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
self.keypoints_nut[:, idx] = tf_combine(
self.nut_grasp_quat,
self.nut_grasp_pos,
self.identity_quat,
keypoint_offset.repeat(self.num_envs, 1),
)[1]
def get_observations(self) -> dict:
"""Compute observations."""
# Shallow copies of tensors
obs_tensors = [
self.fingertip_midpoint_pos,
self.fingertip_midpoint_quat,
self.fingertip_midpoint_linvel,
self.fingertip_midpoint_angvel,
self.nut_grasp_pos,
self.nut_grasp_quat,
]
self.obs_buf = torch.cat(
obs_tensors, dim=-1
) # shape = (num_envs, num_observations)
observations = {self.frankas.name: {"obs_buf": self.obs_buf}}
return observations
def calculate_metrics(self) -> None:
"""Update reward and reset buffers."""
self._update_reset_buf()
self._update_rew_buf()
def _update_reset_buf(self) -> None:
"""Assign environments for reset if successful or failed."""
# If max episode length has been reached
self.reset_buf[:] = torch.where(
self.progress_buf[:] >= self.max_episode_length - 1,
torch.ones_like(self.reset_buf),
self.reset_buf,
)
def _update_rew_buf(self) -> None:
"""Compute reward at current timestep."""
keypoint_reward = -self._get_keypoint_dist()
action_penalty = (
torch.norm(self.actions, p=2, dim=-1)
* self.cfg_task.rl.action_penalty_scale
)
self.rew_buf[:] = (
keypoint_reward * self.cfg_task.rl.keypoint_reward_scale
- action_penalty * self.cfg_task.rl.action_penalty_scale
)
# In this policy, episode length is constant across all envs
is_last_step = self.progress_buf[0] == self.max_episode_length - 1
if is_last_step:
# Check if nut is picked up and above table
lift_success = self._check_lift_success(height_multiple=3.0)
self.rew_buf[:] += lift_success * self.cfg_task.rl.success_bonus
self.extras["successes"] = torch.mean(lift_success.float())
def _get_keypoint_offsets(self, num_keypoints) -> torch.Tensor:
"""Get uniformly-spaced keypoints along a line of unit length, centered at 0."""
keypoint_offsets = torch.zeros((num_keypoints, 3), device=self.device)
keypoint_offsets[:, -1] = (
torch.linspace(0.0, 1.0, num_keypoints, device=self.device) - 0.5
)
return keypoint_offsets
def _get_keypoint_dist(self) -> torch.Tensor:
"""Get keypoint distance."""
keypoint_dist = torch.sum(
torch.norm(self.keypoints_nut - self.keypoints_gripper, p=2, dim=-1), dim=-1
)
return keypoint_dist
def _close_gripper(self, sim_steps=20) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
self._move_gripper_to_dof_pos(gripper_dof_pos=0.0, sim_steps=sim_steps)
def _move_gripper_to_dof_pos(self, gripper_dof_pos, sim_steps=20) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, 6), device=self.device
) # No hand motion
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
# Step sim
for _ in range(sim_steps):
SimulationContext.step(self._env._world, render=True)
def _lift_gripper(
self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20
) -> None:
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, franka_gripper_width, do_scale=False
)
SimulationContext.step(self._env._world, render=True)
async def _close_gripper_async(self, sim_steps=20) -> None:
"""Fully close gripper using controller. Called outside RL loop (i.e., after last step of episode)."""
await self._move_gripper_to_dof_pos_async(
gripper_dof_pos=0.0, sim_steps=sim_steps
)
async def _move_gripper_to_dof_pos_async(
self, gripper_dof_pos, sim_steps=20
) -> None:
"""Move gripper fingers to specified DOF position using controller."""
delta_hand_pose = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
) # No hand motion
self._apply_actions_as_ctrl_targets(
delta_hand_pose, gripper_dof_pos, do_scale=False
)
# Step sim
for _ in range(sim_steps):
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
async def _lift_gripper_async(
self, franka_gripper_width=0.0, lift_distance=0.3, sim_steps=20
) -> None:
"""Lift gripper by specified distance. Called outside RL loop (i.e., after last step of episode)."""
delta_hand_pose = torch.zeros([self.num_envs, 6], device=self.device)
delta_hand_pose[:, 2] = lift_distance
# Step sim
for _ in range(sim_steps):
self._apply_actions_as_ctrl_targets(
delta_hand_pose, franka_gripper_width, do_scale=False
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
def _check_lift_success(self, height_multiple) -> torch.Tensor:
"""Check if nut is above table by more than specified multiple times height of nut."""
lift_success = torch.where(
self.nut_pos[:, 2]
> self.cfg_base.env.table_height
+ self.nut_heights.squeeze(-1) * height_multiple,
torch.ones((self.num_envs,), device=self.device),
torch.zeros((self.num_envs,), device=self.device),
)
return lift_success
def _randomize_gripper_pose(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# step once to update physx with the newly set joint positions from reset_franka()
SimulationContext.step(self._env._world, render=True)
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
if not self._env._world.is_playing():
return
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=False,
)
SimulationContext.step(self._env._world, render=True)
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# step once to update physx with the newly set joint velocities
SimulationContext.step(self._env._world, render=True)
async def _randomize_gripper_pose_async(self, env_ids, sim_steps) -> None:
"""Move gripper to random pose."""
# step once to update physx with the newly set joint positions from reset_franka()
await omni.kit.app.get_app().next_update_async()
# Set target pos above table
self.ctrl_target_fingertip_midpoint_pos = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self.device
) + torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_initial, device=self.device
)
self.ctrl_target_fingertip_midpoint_pos = (
self.ctrl_target_fingertip_midpoint_pos.unsqueeze(0).repeat(
self.num_envs, 1
)
)
fingertip_midpoint_pos_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_pos_noise = fingertip_midpoint_pos_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_pos_noise, device=self.device
)
)
self.ctrl_target_fingertip_midpoint_pos += fingertip_midpoint_pos_noise
# Set target rot
ctrl_target_fingertip_midpoint_euler = (
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_initial,
device=self.device,
)
.unsqueeze(0)
.repeat(self.num_envs, 1)
)
fingertip_midpoint_rot_noise = 2 * (
torch.rand((self.num_envs, 3), dtype=torch.float32, device=self.device)
- 0.5
) # [-1, 1]
fingertip_midpoint_rot_noise = fingertip_midpoint_rot_noise @ torch.diag(
torch.tensor(
self.cfg_task.randomize.fingertip_midpoint_rot_noise, device=self.device
)
)
ctrl_target_fingertip_midpoint_euler += fingertip_midpoint_rot_noise
self.ctrl_target_fingertip_midpoint_quat = torch_utils.quat_from_euler_xyz(
ctrl_target_fingertip_midpoint_euler[:, 0],
ctrl_target_fingertip_midpoint_euler[:, 1],
ctrl_target_fingertip_midpoint_euler[:, 2],
)
# Step sim and render
for _ in range(sim_steps):
self.refresh_base_tensors()
self.refresh_env_tensors()
self._refresh_task_tensors()
pos_error, axis_angle_error = fc.get_pose_error(
fingertip_midpoint_pos=self.fingertip_midpoint_pos,
fingertip_midpoint_quat=self.fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=self.ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=self.ctrl_target_fingertip_midpoint_quat,
jacobian_type=self.cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_hand_pose = torch.cat((pos_error, axis_angle_error), dim=-1)
actions = torch.zeros(
(self.num_envs, self.cfg_task.env.numActions), device=self.device
)
actions[:, :6] = delta_hand_pose
self._apply_actions_as_ctrl_targets(
actions=actions,
ctrl_target_gripper_dof_pos=self.asset_info_franka_table.franka_gripper_width_max,
do_scale=False,
)
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
self.dof_vel[env_ids, :] = torch.zeros_like(self.dof_vel[env_ids])
indices = env_ids.to(dtype=torch.int32)
self.frankas.set_joint_velocities(self.dof_vel[env_ids], indices=indices)
# step once to update physx with the newly set joint velocities
self._env._world.physics_sim_view.flush()
await omni.kit.app.get_app().next_update_async()
| 31,568 | Python | 37.926017 | 131 | 0.589268 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_schema_config_base.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: schema for base class configuration.
Used by Hydra. Defines template for base class YAML file.
"""
from dataclasses import dataclass
@dataclass
class Mode:
export_scene: bool # export scene to USD
export_states: bool # export states to NPY
@dataclass
class Sim:
dt: float # timestep size (default = 1.0 / 60.0)
num_substeps: int # number of substeps (default = 2)
num_pos_iters: int # number of position iterations for PhysX TGS solver (default = 4)
num_vel_iters: int # number of velocity iterations for PhysX TGS solver (default = 1)
gravity_mag: float # magnitude of gravitational acceleration
add_damping: bool # add damping to stabilize gripper-object interactions
@dataclass
class Env:
env_spacing: float # lateral offset between envs
franka_depth: float # depth offset of Franka base relative to env origin
table_height: float # height of table
franka_friction: float # coefficient of friction associated with Franka
table_friction: float # coefficient of friction associated with table
@dataclass
class FactorySchemaConfigBase:
mode: Mode
sim: Sim
env: Env
| 2,724 | Python | 39.073529 | 90 | 0.757342 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_env_nut_bolt.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: class for nut-bolt env.
Inherits base class and abstract environment class. Inherited by nut-bolt task classes. Not directly executed.
Configuration defined in FactoryEnvNutBolt.yaml. Asset info defined in factory_asset_info_nut_bolt.yaml.
"""
import hydra
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView, XFormPrim
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omni.physx.scripts import physicsUtils, utils
from omniisaacgymenvs.robots.articulations.views.factory_franka_view import (
FactoryFrankaView,
)
import omniisaacgymenvs.tasks.factory.factory_control as fc
from omniisaacgymenvs.tasks.factory.factory_base import FactoryBase
from omniisaacgymenvs.tasks.factory.factory_schema_class_env import FactoryABCEnv
from omniisaacgymenvs.tasks.factory.factory_schema_config_env import (
FactorySchemaConfigEnv,
)
class FactoryEnvNutBolt(FactoryBase, FactoryABCEnv):
def __init__(self, name, sim_config, env) -> None:
"""Initialize base superclass. Initialize instance variables."""
super().__init__(name, sim_config, env)
self._get_env_yaml_params()
def _get_env_yaml_params(self):
"""Initialize instance variables from YAML files."""
cs = hydra.core.config_store.ConfigStore.instance()
cs.store(name="factory_schema_config_env", node=FactorySchemaConfigEnv)
config_path = (
"task/FactoryEnvNutBolt.yaml" # relative to Hydra search path (cfg dir)
)
self.cfg_env = hydra.compose(config_name=config_path)
self.cfg_env = self.cfg_env["task"] # strip superfluous nesting
asset_info_path = "../tasks/factory/yaml/factory_asset_info_nut_bolt.yaml"
self.asset_info_nut_bolt = hydra.compose(config_name=asset_info_path)
self.asset_info_nut_bolt = self.asset_info_nut_bolt[""][""][""]["tasks"][
"factory"
][
"yaml"
] # strip superfluous nesting
def update_config(self, sim_config):
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._num_observations = self._task_cfg["env"]["numObservations"]
self._num_actions = self._task_cfg["env"]["numActions"]
self._env_spacing = self.cfg_base["env"]["env_spacing"]
self._get_env_yaml_params()
def set_up_scene(self, scene) -> None:
"""Import assets. Add to scene."""
# Increase buffer size to prevent overflow for Place and Screw tasks
physxSceneAPI = self._env._world.get_physics_context()._physx_scene_api
physxSceneAPI.CreateGpuCollisionStackSizeAttr().Set(256 * 1024 * 1024)
self.import_franka_assets(add_to_stage=True)
self.create_nut_bolt_material()
RLTask.set_up_scene(self, scene, replicate_physics=False)
self._import_env_assets(add_to_stage=True)
self.frankas = FactoryFrankaView(
prim_paths_expr="/World/envs/.*/franka", name="frankas_view"
)
self.nuts = RigidPrimView(
prim_paths_expr="/World/envs/.*/nut/factory_nut.*",
name="nuts_view",
track_contact_forces=True,
)
self.bolts = RigidPrimView(
prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*",
name="bolts_view",
track_contact_forces=True,
)
scene.add(self.nuts)
scene.add(self.bolts)
scene.add(self.frankas)
scene.add(self.frankas._hands)
scene.add(self.frankas._lfingers)
scene.add(self.frankas._rfingers)
scene.add(self.frankas._fingertip_centered)
return
def initialize_views(self, scene) -> None:
"""Initialize views for extension workflow."""
super().initialize_views(scene)
self.import_franka_assets(add_to_stage=False)
self._import_env_assets(add_to_stage=False)
if scene.object_exists("frankas_view"):
scene.remove_object("frankas_view", registry_only=True)
if scene.object_exists("nuts_view"):
scene.remove_object("nuts_view", registry_only=True)
if scene.object_exists("bolts_view"):
scene.remove_object("bolts_view", registry_only=True)
if scene.object_exists("hands_view"):
scene.remove_object("hands_view", registry_only=True)
if scene.object_exists("lfingers_view"):
scene.remove_object("lfingers_view", registry_only=True)
if scene.object_exists("rfingers_view"):
scene.remove_object("rfingers_view", registry_only=True)
if scene.object_exists("fingertips_view"):
scene.remove_object("fingertips_view", registry_only=True)
self.frankas = FactoryFrankaView(
prim_paths_expr="/World/envs/.*/franka", name="frankas_view"
)
self.nuts = RigidPrimView(
prim_paths_expr="/World/envs/.*/nut/factory_nut.*", name="nuts_view"
)
self.bolts = RigidPrimView(
prim_paths_expr="/World/envs/.*/bolt/factory_bolt.*", name="bolts_view"
)
scene.add(self.nuts)
scene.add(self.bolts)
scene.add(self.frankas)
scene.add(self.frankas._hands)
scene.add(self.frankas._lfingers)
scene.add(self.frankas._rfingers)
scene.add(self.frankas._fingertip_centered)
def create_nut_bolt_material(self):
"""Define nut and bolt material."""
self.nutboltPhysicsMaterialPath = "/World/Physics_Materials/NutBoltMaterial"
utils.addRigidBodyMaterial(
self._stage,
self.nutboltPhysicsMaterialPath,
density=self.cfg_env.env.nut_bolt_density,
staticFriction=self.cfg_env.env.nut_bolt_friction,
dynamicFriction=self.cfg_env.env.nut_bolt_friction,
restitution=0.0,
)
def _import_env_assets(self, add_to_stage=True):
"""Set nut and bolt asset options. Import assets."""
self.nut_heights = []
self.nut_widths_max = []
self.bolt_widths = []
self.bolt_head_heights = []
self.bolt_shank_lengths = []
self.thread_pitches = []
assets_root_path = get_assets_root_path()
for i in range(0, self._num_envs):
j = np.random.randint(0, len(self.cfg_env.env.desired_subassemblies))
subassembly = self.cfg_env.env.desired_subassemblies[j]
components = list(self.asset_info_nut_bolt[subassembly])
nut_translation = torch.tensor(
[
0.0,
self.cfg_env.env.nut_lateral_offset,
self.cfg_base.env.table_height,
],
device=self._device,
)
nut_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
nut_height = self.asset_info_nut_bolt[subassembly][components[0]]["height"]
nut_width_max = self.asset_info_nut_bolt[subassembly][components[0]][
"width_max"
]
self.nut_heights.append(nut_height)
self.nut_widths_max.append(nut_width_max)
nut_file = (
assets_root_path
+ self.asset_info_nut_bolt[subassembly][components[0]]["usd_path"]
)
if add_to_stage:
add_reference_to_stage(nut_file, f"/World/envs/env_{i}" + "/nut")
XFormPrim(
prim_path=f"/World/envs/env_{i}" + "/nut",
translation=nut_translation,
orientation=nut_orientation,
)
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}" + f"/nut/factory_{components[0]}/collisions"
).SetInstanceable(
False
) # This is required to be able to edit physics material
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}"
+ f"/nut/factory_{components[0]}/collisions/mesh_0"
),
self.nutboltPhysicsMaterialPath,
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"nut",
self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/nut"),
self._sim_config.parse_actor_config("nut"),
)
bolt_translation = torch.tensor(
[0.0, 0.0, self.cfg_base.env.table_height], device=self._device
)
bolt_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self._device)
bolt_width = self.asset_info_nut_bolt[subassembly][components[1]]["width"]
bolt_head_height = self.asset_info_nut_bolt[subassembly][components[1]][
"head_height"
]
bolt_shank_length = self.asset_info_nut_bolt[subassembly][components[1]][
"shank_length"
]
self.bolt_widths.append(bolt_width)
self.bolt_head_heights.append(bolt_head_height)
self.bolt_shank_lengths.append(bolt_shank_length)
if add_to_stage:
bolt_file = (
assets_root_path
+ self.asset_info_nut_bolt[subassembly][components[1]]["usd_path"]
)
add_reference_to_stage(bolt_file, f"/World/envs/env_{i}" + "/bolt")
XFormPrim(
prim_path=f"/World/envs/env_{i}" + "/bolt",
translation=bolt_translation,
orientation=bolt_orientation,
)
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}" + f"/bolt/factory_{components[1]}/collisions"
).SetInstanceable(
False
) # This is required to be able to edit physics material
physicsUtils.add_physics_material_to_prim(
self._stage,
self._stage.GetPrimAtPath(
f"/World/envs/env_{i}"
+ f"/bolt/factory_{components[1]}/collisions/mesh_0"
),
self.nutboltPhysicsMaterialPath,
)
# applies articulation settings from the task configuration yaml file
self._sim_config.apply_articulation_settings(
"bolt",
self._stage.GetPrimAtPath(f"/World/envs/env_{i}" + "/bolt"),
self._sim_config.parse_actor_config("bolt"),
)
thread_pitch = self.asset_info_nut_bolt[subassembly]["thread_pitch"]
self.thread_pitches.append(thread_pitch)
# For computing body COM pos
self.nut_heights = torch.tensor(
self.nut_heights, device=self._device
).unsqueeze(-1)
self.bolt_head_heights = torch.tensor(
self.bolt_head_heights, device=self._device
).unsqueeze(-1)
# For setting initial state
self.nut_widths_max = torch.tensor(
self.nut_widths_max, device=self._device
).unsqueeze(-1)
self.bolt_shank_lengths = torch.tensor(
self.bolt_shank_lengths, device=self._device
).unsqueeze(-1)
# For defining success or failure
self.bolt_widths = torch.tensor(
self.bolt_widths, device=self._device
).unsqueeze(-1)
self.thread_pitches = torch.tensor(
self.thread_pitches, device=self._device
).unsqueeze(-1)
def refresh_env_tensors(self):
"""Refresh tensors."""
# Nut tensors
self.nut_pos, self.nut_quat = self.nuts.get_world_poses(clone=False)
self.nut_pos -= self.env_pos
self.nut_com_pos = fc.translate_along_local_z(
pos=self.nut_pos,
quat=self.nut_quat,
offset=self.bolt_head_heights + self.nut_heights * 0.5,
device=self.device,
)
self.nut_com_quat = self.nut_quat # always equal
nut_velocities = self.nuts.get_velocities(clone=False)
self.nut_linvel = nut_velocities[:, 0:3]
self.nut_angvel = nut_velocities[:, 3:6]
self.nut_com_linvel = self.nut_linvel + torch.cross(
self.nut_angvel, (self.nut_com_pos - self.nut_pos), dim=1
)
self.nut_com_angvel = self.nut_angvel # always equal
self.nut_force = self.nuts.get_net_contact_forces(clone=False)
# Bolt tensors
self.bolt_pos, self.bolt_quat = self.bolts.get_world_poses(clone=False)
self.bolt_pos -= self.env_pos
self.bolt_force = self.bolts.get_net_contact_forces(clone=False)
| 14,709 | Python | 39.30137 | 110 | 0.603372 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/factory/factory_control.py | # Copyright (c) 2018-2023, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Factory: control module.
Imported by base, environment, and task classes. Not directly executed.
"""
import math
import omni.isaac.core.utils.torch as torch_utils
import torch
def compute_dof_pos_target(
cfg_ctrl,
arm_dof_pos,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
jacobian,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_gripper_dof_pos,
device,
):
"""Compute Franka DOF position target to move fingertips towards target pose."""
ctrl_target_dof_pos = torch.zeros((cfg_ctrl["num_envs"], 9), device=device)
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
delta_arm_dof_pos = _get_delta_dof_pos(
delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl["ik_method"],
jacobian=jacobian,
device=device,
)
ctrl_target_dof_pos[:, 0:7] = arm_dof_pos + delta_arm_dof_pos
ctrl_target_dof_pos[:, 7:9] = ctrl_target_gripper_dof_pos # gripper finger joints
return ctrl_target_dof_pos
def compute_dof_torque(
cfg_ctrl,
dof_pos,
dof_vel,
fingertip_midpoint_pos,
fingertip_midpoint_quat,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
left_finger_force,
right_finger_force,
jacobian,
arm_mass_matrix,
ctrl_target_gripper_dof_pos,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
ctrl_target_fingertip_contact_wrench,
device,
):
"""Compute Franka DOF torque to move fingertips towards target pose."""
# References:
# 1) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# 2) Modern Robotics
dof_torque = torch.zeros((cfg_ctrl["num_envs"], 9), device=device)
if cfg_ctrl["gain_space"] == "joint":
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * joint_pos_error - k_d * joint_vel_error (ETH eq. 3.72)
delta_arm_dof_pos = _get_delta_dof_pos(
delta_pose=delta_fingertip_pose,
ik_method=cfg_ctrl["ik_method"],
jacobian=jacobian,
device=device,
)
dof_torque[:, 0:7] = cfg_ctrl[
"joint_prop_gains"
] * delta_arm_dof_pos + cfg_ctrl["joint_deriv_gains"] * (0.0 - dof_vel[:, 0:7])
if cfg_ctrl["do_inertial_comp"]:
# Set tau = M * tau, where M is the joint-space mass matrix
arm_mass_matrix_joint = arm_mass_matrix
dof_torque[:, 0:7] = (
arm_mass_matrix_joint @ dof_torque[:, 0:7].unsqueeze(-1)
).squeeze(-1)
elif cfg_ctrl["gain_space"] == "task":
task_wrench = torch.zeros((cfg_ctrl["num_envs"], 6), device=device)
if cfg_ctrl["do_motion_ctrl"]:
pos_error, axis_angle_error = get_pose_error(
fingertip_midpoint_pos=fingertip_midpoint_pos,
fingertip_midpoint_quat=fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos=ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat=ctrl_target_fingertip_midpoint_quat,
jacobian_type=cfg_ctrl["jacobian_type"],
rot_error_type="axis_angle",
)
delta_fingertip_pose = torch.cat((pos_error, axis_angle_error), dim=1)
# Set tau = k_p * task_pos_error - k_d * task_vel_error (building towards eq. 3.96-3.98)
task_wrench_motion = _apply_task_space_gains(
delta_fingertip_pose=delta_fingertip_pose,
fingertip_midpoint_linvel=fingertip_midpoint_linvel,
fingertip_midpoint_angvel=fingertip_midpoint_angvel,
task_prop_gains=cfg_ctrl["task_prop_gains"],
task_deriv_gains=cfg_ctrl["task_deriv_gains"],
)
if cfg_ctrl["do_inertial_comp"]:
# Set tau = Lambda * tau, where Lambda is the task-space mass matrix
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
arm_mass_matrix_task = torch.inverse(
jacobian @ torch.inverse(arm_mass_matrix) @ jacobian_T
) # ETH eq. 3.86; geometric Jacobian is assumed
task_wrench_motion = (
arm_mass_matrix_task @ task_wrench_motion.unsqueeze(-1)
).squeeze(-1)
task_wrench = (
task_wrench + cfg_ctrl["motion_ctrl_axes"] * task_wrench_motion
)
if cfg_ctrl["do_force_ctrl"]:
# Set tau = tau + F_t, where F_t is the target contact wrench
task_wrench_force = torch.zeros((cfg_ctrl["num_envs"], 6), device=device)
task_wrench_force = (
task_wrench_force + ctrl_target_fingertip_contact_wrench
) # open-loop force control (building towards ETH eq. 3.96-3.98)
if cfg_ctrl["force_ctrl_method"] == "closed":
force_error, torque_error = _get_wrench_error(
left_finger_force=left_finger_force,
right_finger_force=right_finger_force,
ctrl_target_fingertip_contact_wrench=ctrl_target_fingertip_contact_wrench,
num_envs=cfg_ctrl["num_envs"],
device=device,
)
# Set tau = tau + k_p * contact_wrench_error
task_wrench_force = task_wrench_force + cfg_ctrl[
"wrench_prop_gains"
] * torch.cat(
(force_error, torque_error), dim=1
) # part of Modern Robotics eq. 11.61
task_wrench = (
task_wrench
+ torch.tensor(cfg_ctrl["force_ctrl_axes"], device=device).unsqueeze(0)
* task_wrench_force
)
# Set tau = J^T * tau, i.e., map tau into joint space as desired
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
dof_torque[:, 0:7] = (jacobian_T @ task_wrench.unsqueeze(-1)).squeeze(-1)
dof_torque[:, 7:9] = cfg_ctrl["gripper_prop_gains"] * (
ctrl_target_gripper_dof_pos - dof_pos[:, 7:9]
) + cfg_ctrl["gripper_deriv_gains"] * (
0.0 - dof_vel[:, 7:9]
) # gripper finger joints
dof_torque = torch.clamp(dof_torque, min=-100.0, max=100.0)
return dof_torque
def get_pose_error(
fingertip_midpoint_pos,
fingertip_midpoint_quat,
ctrl_target_fingertip_midpoint_pos,
ctrl_target_fingertip_midpoint_quat,
jacobian_type,
rot_error_type,
):
"""Compute task-space error between target Franka fingertip pose and current pose."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# Compute pos error
pos_error = ctrl_target_fingertip_midpoint_pos - fingertip_midpoint_pos
# Compute rot error
if (
jacobian_type == "geometric"
): # See example 2.9.8; note use of J_g and transformation between rotation vectors
# Compute quat error (i.e., difference quat)
# Reference: https://personal.utdallas.edu/~sxb027100/dock/quat.html
fingertip_midpoint_quat_norm = torch_utils.quat_mul(
fingertip_midpoint_quat, torch_utils.quat_conjugate(fingertip_midpoint_quat)
)[
:, 0
] # scalar component
fingertip_midpoint_quat_inv = torch_utils.quat_conjugate(
fingertip_midpoint_quat
) / fingertip_midpoint_quat_norm.unsqueeze(-1)
quat_error = torch_utils.quat_mul(
ctrl_target_fingertip_midpoint_quat, fingertip_midpoint_quat_inv
)
# Convert to axis-angle error
axis_angle_error = axis_angle_from_quat(quat_error)
elif (
jacobian_type == "analytic"
): # See example 2.9.7; note use of J_a and difference of rotation vectors
# Compute axis-angle error
axis_angle_error = axis_angle_from_quat(
ctrl_target_fingertip_midpoint_quat
) - axis_angle_from_quat(fingertip_midpoint_quat)
if rot_error_type == "quat":
return pos_error, quat_error
elif rot_error_type == "axis_angle":
return pos_error, axis_angle_error
def _get_wrench_error(
left_finger_force,
right_finger_force,
ctrl_target_fingertip_contact_wrench,
num_envs,
device,
):
"""Compute task-space error between target Franka fingertip contact wrench and current wrench."""
fingertip_contact_wrench = torch.zeros((num_envs, 6), device=device)
fingertip_contact_wrench[:, 0:3] = (
left_finger_force + right_finger_force
) # net contact force on fingers
# Cols 3 to 6 are all zeros, as we do not have enough information
force_error = ctrl_target_fingertip_contact_wrench[:, 0:3] - (
-fingertip_contact_wrench[:, 0:3]
)
torque_error = ctrl_target_fingertip_contact_wrench[:, 3:6] - (
-fingertip_contact_wrench[:, 3:6]
)
return force_error, torque_error
def _get_delta_dof_pos(delta_pose, ik_method, jacobian, device):
"""Get delta Franka DOF position from delta pose using specified IK method."""
# References:
# 1) https://www.cs.cmu.edu/~15464-s13/lectures/lecture6/iksurvey.pdf
# 2) https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf (p. 47)
if ik_method == "pinv": # Jacobian pseudoinverse
k_val = 1.0
jacobian_pinv = torch.linalg.pinv(jacobian)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "trans": # Jacobian transpose
k_val = 1.0
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
delta_dof_pos = k_val * jacobian_T @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "dls": # damped least squares (Levenberg-Marquardt)
lambda_val = 0.1
jacobian_T = torch.transpose(jacobian, dim0=1, dim1=2)
lambda_matrix = (lambda_val**2) * torch.eye(
n=jacobian.shape[1], device=device
)
delta_dof_pos = (
jacobian_T
@ torch.inverse(jacobian @ jacobian_T + lambda_matrix)
@ delta_pose.unsqueeze(-1)
)
delta_dof_pos = delta_dof_pos.squeeze(-1)
elif ik_method == "svd": # adaptive SVD
k_val = 1.0
U, S, Vh = torch.linalg.svd(jacobian)
S_inv = 1.0 / S
min_singular_value = 1.0e-5
S_inv = torch.where(S > min_singular_value, S_inv, torch.zeros_like(S_inv))
jacobian_pinv = (
torch.transpose(Vh, dim0=1, dim1=2)[:, :, :6]
@ torch.diag_embed(S_inv)
@ torch.transpose(U, dim0=1, dim1=2)
)
delta_dof_pos = k_val * jacobian_pinv @ delta_pose.unsqueeze(-1)
delta_dof_pos = delta_dof_pos.squeeze(-1)
return delta_dof_pos
def _apply_task_space_gains(
delta_fingertip_pose,
fingertip_midpoint_linvel,
fingertip_midpoint_angvel,
task_prop_gains,
task_deriv_gains,
):
"""Interpret PD gains as task-space gains. Apply to task-space error."""
task_wrench = torch.zeros_like(delta_fingertip_pose)
# Apply gains to lin error components
lin_error = delta_fingertip_pose[:, 0:3]
task_wrench[:, 0:3] = task_prop_gains[:, 0:3] * lin_error + task_deriv_gains[
:, 0:3
] * (0.0 - fingertip_midpoint_linvel)
# Apply gains to rot error components
rot_error = delta_fingertip_pose[:, 3:6]
task_wrench[:, 3:6] = task_prop_gains[:, 3:6] * rot_error + task_deriv_gains[
:, 3:6
] * (0.0 - fingertip_midpoint_angvel)
return task_wrench
def get_analytic_jacobian(fingertip_quat, fingertip_jacobian, num_envs, device):
"""Convert geometric Jacobian to analytic Jacobian."""
# Reference: https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf
# NOTE: Gym returns world-space geometric Jacobians by default
batch = num_envs
# Overview:
# x = [x_p; x_r]
# From eq. 2.189 and 2.192, x_dot = J_a @ q_dot = (E_inv @ J_g) @ q_dot
# From eq. 2.191, E = block(E_p, E_r); thus, E_inv = block(E_p_inv, E_r_inv)
# Eq. 2.12 gives an expression for E_p_inv
# Eq. 2.107 gives an expression for E_r_inv
# Compute E_inv_top (i.e., [E_p_inv, 0])
I = torch.eye(3, device=device)
E_p_inv = I.repeat((batch, 1)).reshape(batch, 3, 3)
E_inv_top = torch.cat((E_p_inv, torch.zeros((batch, 3, 3), device=device)), dim=2)
# Compute E_inv_bottom (i.e., [0, E_r_inv])
fingertip_axis_angle = axis_angle_from_quat(fingertip_quat)
fingertip_axis_angle_cross = get_skew_symm_matrix(
fingertip_axis_angle, device=device
)
fingertip_angle = torch.linalg.vector_norm(fingertip_axis_angle, dim=1)
factor_1 = 1 / (fingertip_angle**2)
factor_2 = 1 - fingertip_angle * 0.5 * torch.sin(fingertip_angle) / (
1 - torch.cos(fingertip_angle)
)
factor_3 = factor_1 * factor_2
E_r_inv = (
I
- 1 * 0.5 * fingertip_axis_angle_cross
+ (fingertip_axis_angle_cross @ fingertip_axis_angle_cross)
* factor_3.unsqueeze(-1).repeat((1, 3 * 3)).reshape((batch, 3, 3))
)
E_inv_bottom = torch.cat(
(torch.zeros((batch, 3, 3), device=device), E_r_inv), dim=2
)
E_inv = torch.cat(
(E_inv_top.reshape((batch, 3 * 6)), E_inv_bottom.reshape((batch, 3 * 6))), dim=1
).reshape((batch, 6, 6))
J_a = E_inv @ fingertip_jacobian
return J_a
def get_skew_symm_matrix(vec, device):
"""Convert vector to skew-symmetric matrix."""
# Reference: https://en.wikipedia.org/wiki/Cross_product#Conversion_to_matrix_multiplication
batch = vec.shape[0]
I = torch.eye(3, device=device)
skew_symm = torch.transpose(
torch.cross(
vec.repeat((1, 3)).reshape((batch * 3, 3)), I.repeat((batch, 1))
).reshape(batch, 3, 3),
dim0=1,
dim1=2,
)
return skew_symm
def translate_along_local_z(pos, quat, offset, device):
"""Translate global body position along local Z-axis and express in global coordinates."""
num_vecs = pos.shape[0]
offset_vec = offset * torch.tensor([0.0, 0.0, 1.0], device=device).repeat(
(num_vecs, 1)
)
_, translated_pos = torch_utils.tf_combine(
q1=quat,
t1=pos,
q2=torch.tensor([1.0, 0.0, 0.0, 0.0], device=device).repeat((num_vecs, 1)),
t2=offset_vec,
)
return translated_pos
def axis_angle_from_euler(euler):
"""Convert tensor of Euler angles to tensor of axis-angles."""
quat = torch_utils.quat_from_euler_xyz(
roll=euler[:, 0], pitch=euler[:, 1], yaw=euler[:, 2]
)
quat = quat * torch.sign(quat[:, 0]).unsqueeze(-1) # smaller rotation
axis_angle = axis_angle_from_quat(quat)
return axis_angle
def axis_angle_from_quat(quat, eps=1.0e-6):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://github.com/facebookresearch/pytorch3d/blob/bee31c48d3d36a8ea268f9835663c52ff4a476ec/pytorch3d/transforms/rotation_conversions.py#L516-L544
mag = torch.linalg.norm(quat[:, 1:4], dim=1)
half_angle = torch.atan2(mag, quat[:, 0])
angle = 2.0 * half_angle
sin_half_angle_over_angle = torch.where(
torch.abs(angle) > eps, torch.sin(half_angle) / angle, 1 / 2 - angle**2.0 / 48
)
axis_angle = quat[:, 1:4] / sin_half_angle_over_angle.unsqueeze(-1)
return axis_angle
def axis_angle_from_quat_naive(quat):
"""Convert tensor of quaternions to tensor of axis-angles."""
# Reference: https://en.wikipedia.org/wiki/quats_and_spatial_rotation#Recovering_the_axis-angle_representation
# NOTE: Susceptible to undesirable behavior due to divide-by-zero
mag = torch.linalg.vector_norm(quat[:, 1:4], dim=1) # zero when quat = [1, 0, 0, 0]
axis = quat[:, 1:4] / mag.unsqueeze(-1)
angle = 2.0 * torch.atan2(mag, quat[:, 0])
axis_angle = axis * angle.unsqueeze(-1)
return axis_angle
def get_rand_quat(num_quats, device):
"""Generate tensor of random quaternions."""
# Reference: http://planning.cs.uiuc.edu/node198.html
u = torch.rand((num_quats, 3), device=device)
quat = torch.zeros((num_quats, 4), device=device)
quat[:, 0] = torch.sqrt(u[:, 0]) * torch.cos(2 * math.pi * u[:, 2])
quat[:, 1] = torch.sqrt(1 - u[:, 0]) * torch.sin(2 * math.pi * u[:, 1])
quat[:, 2] = torch.sqrt(1 - u[:, 0]) * torch.cos(2 * math.pi * u[:, 1])
quat[:, 3] = torch.sqrt(u[:, 0]) * torch.sin(2 * math.pi * u[:, 2])
return quat
def get_nonrand_quat(num_quats, rot_perturbation, device):
"""Generate tensor of non-random quaternions by composing random Euler rotations."""
quat = torch_utils.quat_from_euler_xyz(
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
torch.rand((num_quats, 1), device=device).squeeze() * rot_perturbation * 2.0
- rot_perturbation,
)
return quat
| 19,859 | Python | 37.864971 | 163 | 0.627574 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/utils/anymal_terrain_generator.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy as np
import torch
from omniisaacgymenvs.utils.terrain_utils.terrain_utils import *
# terrain generator
class Terrain:
def __init__(self, cfg, num_robots) -> None:
self.horizontal_scale = 0.1
self.vertical_scale = 0.005
self.border_size = 20
self.num_per_env = 2
self.env_length = cfg["mapLength"]
self.env_width = cfg["mapWidth"]
self.proportions = [np.sum(cfg["terrainProportions"][: i + 1]) for i in range(len(cfg["terrainProportions"]))]
self.env_rows = cfg["numLevels"]
self.env_cols = cfg["numTerrains"]
self.num_maps = self.env_rows * self.env_cols
self.num_per_env = int(num_robots / self.num_maps)
self.env_origins = np.zeros((self.env_rows, self.env_cols, 3))
self.width_per_env_pixels = int(self.env_width / self.horizontal_scale)
self.length_per_env_pixels = int(self.env_length / self.horizontal_scale)
self.border = int(self.border_size / self.horizontal_scale)
self.tot_cols = int(self.env_cols * self.width_per_env_pixels) + 2 * self.border
self.tot_rows = int(self.env_rows * self.length_per_env_pixels) + 2 * self.border
self.height_field_raw = np.zeros((self.tot_rows, self.tot_cols), dtype=np.int16)
if cfg["curriculum"]:
self.curiculum(num_robots, num_terrains=self.env_cols, num_levels=self.env_rows)
else:
self.randomized_terrain()
self.heightsamples = self.height_field_raw
self.vertices, self.triangles = convert_heightfield_to_trimesh(
self.height_field_raw, self.horizontal_scale, self.vertical_scale, cfg["slopeTreshold"]
)
def randomized_terrain(self):
for k in range(self.num_maps):
# Env coordinates in the world
(i, j) = np.unravel_index(k, (self.env_rows, self.env_cols))
# Heightfield coordinate system from now on
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
terrain = SubTerrain(
"terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale,
)
choice = np.random.uniform(0, 1)
if choice < 0.1:
if np.random.choice([0, 1]):
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.05, downsampled_scale=0.2)
else:
pyramid_sloped_terrain(terrain, np.random.choice([-0.3, -0.2, 0, 0.2, 0.3]))
elif choice < 0.6:
# step_height = np.random.choice([-0.18, -0.15, -0.1, -0.05, 0.05, 0.1, 0.15, 0.18])
step_height = np.random.choice([-0.15, 0.15])
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0)
elif choice < 1.0:
discrete_obstacles_terrain(terrain, 0.15, 1.0, 2.0, 40, platform_size=3.0)
self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale)
x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale)
y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale)
y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
def curiculum(self, num_robots, num_terrains, num_levels):
num_robots_per_map = int(num_robots / num_terrains)
left_over = num_robots % num_terrains
idx = 0
for j in range(num_terrains):
for i in range(num_levels):
terrain = SubTerrain(
"terrain",
width=self.width_per_env_pixels,
length=self.width_per_env_pixels,
vertical_scale=self.vertical_scale,
horizontal_scale=self.horizontal_scale,
)
difficulty = i / num_levels
choice = j / num_terrains
slope = difficulty * 0.4
step_height = 0.05 + 0.175 * difficulty
discrete_obstacles_height = 0.025 + difficulty * 0.15
stepping_stones_size = 2 - 1.8 * difficulty
if choice < self.proportions[0]:
if choice < 0.05:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0)
elif choice < self.proportions[1]:
if choice < 0.15:
slope *= -1
pyramid_sloped_terrain(terrain, slope=slope, platform_size=3.0)
random_uniform_terrain(terrain, min_height=-0.1, max_height=0.1, step=0.025, downsampled_scale=0.2)
elif choice < self.proportions[3]:
if choice < self.proportions[2]:
step_height *= -1
pyramid_stairs_terrain(terrain, step_width=0.31, step_height=step_height, platform_size=3.0)
elif choice < self.proportions[4]:
discrete_obstacles_terrain(terrain, discrete_obstacles_height, 1.0, 2.0, 40, platform_size=3.0)
else:
stepping_stones_terrain(
terrain, stone_size=stepping_stones_size, stone_distance=0.1, max_height=0.0, platform_size=3.0
)
# Heightfield coordinate system
start_x = self.border + i * self.length_per_env_pixels
end_x = self.border + (i + 1) * self.length_per_env_pixels
start_y = self.border + j * self.width_per_env_pixels
end_y = self.border + (j + 1) * self.width_per_env_pixels
self.height_field_raw[start_x:end_x, start_y:end_y] = terrain.height_field_raw
robots_in_map = num_robots_per_map
if j < left_over:
robots_in_map += 1
env_origin_x = (i + 0.5) * self.env_length
env_origin_y = (j + 0.5) * self.env_width
x1 = int((self.env_length / 2.0 - 1) / self.horizontal_scale)
x2 = int((self.env_length / 2.0 + 1) / self.horizontal_scale)
y1 = int((self.env_width / 2.0 - 1) / self.horizontal_scale)
y2 = int((self.env_width / 2.0 + 1) / self.horizontal_scale)
env_origin_z = np.max(terrain.height_field_raw[x1:x2, y1:y2]) * self.vertical_scale
self.env_origins[i, j] = [env_origin_x, env_origin_y, env_origin_z]
| 8,852 | Python | 50.47093 | 119 | 0.591618 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/utils/usd_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from pxr import UsdLux, UsdPhysics
def set_drive_type(prim_path, drive_type):
joint_prim = get_prim_at_path(prim_path)
# set drive type ("angular" or "linear")
drive = UsdPhysics.DriveAPI.Apply(joint_prim, drive_type)
return drive
def set_drive_target_position(drive, target_value):
if not drive.GetTargetPositionAttr():
drive.CreateTargetPositionAttr(target_value)
else:
drive.GetTargetPositionAttr().Set(target_value)
def set_drive_target_velocity(drive, target_value):
if not drive.GetTargetVelocityAttr():
drive.CreateTargetVelocityAttr(target_value)
else:
drive.GetTargetVelocityAttr().Set(target_value)
def set_drive_stiffness(drive, stiffness):
if not drive.GetStiffnessAttr():
drive.CreateStiffnessAttr(stiffness)
else:
drive.GetStiffnessAttr().Set(stiffness)
def set_drive_damping(drive, damping):
if not drive.GetDampingAttr():
drive.CreateDampingAttr(damping)
else:
drive.GetDampingAttr().Set(damping)
def set_drive_max_force(drive, max_force):
if not drive.GetMaxForceAttr():
drive.CreateMaxForceAttr(max_force)
else:
drive.GetMaxForceAttr().Set(max_force)
def set_drive(prim_path, drive_type, target_type, target_value, stiffness, damping, max_force) -> None:
drive = set_drive_type(prim_path, drive_type)
# set target type ("position" or "velocity")
if target_type == "position":
set_drive_target_position(drive, target_value)
elif target_type == "velocity":
set_drive_target_velocity(drive, target_value)
set_drive_stiffness(drive, stiffness)
set_drive_damping(drive, damping)
set_drive_max_force(drive, max_force)
| 3,403 | Python | 36.406593 | 103 | 0.740229 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/shared/reacher.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/tasks/shared/reacher.py
import math
from abc import abstractmethod
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView, XFormPrim
from omni.isaac.core.scenes.scene import Scene
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.utils.torch import *
from omniisaacgymenvs.tasks.base.rl_task import RLTask
# `scale` maps [-1, 1] to [L, U]; `unscale` maps [L, U] to [-1, 1]
from omni.isaac.core.utils.torch import scale, unscale
from omni.isaac.gym.vec_env import VecEnvBase
class ReacherTask(RLTask):
def __init__(
self,
name: str,
env: VecEnvBase,
offset=None
) -> None:
ReacherTask.update_config(self)
RLTask.__init__(self, name, env)
self.x_unit_tensor = torch.tensor([1, 0, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.y_unit_tensor = torch.tensor([0, 1, 0], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.z_unit_tensor = torch.tensor([0, 0, 1], dtype=torch.float, device=self.device).repeat((self.num_envs, 1))
self.reset_goal_buf = self.reset_buf.clone()
self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)
self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)
self.av_factor = torch.tensor(self.av_factor, dtype=torch.float, device=self.device)
self.total_successes = 0
self.total_resets = 0
def update_config(self):
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.success_tolerance = self._task_cfg["env"]["successTolerance"]
self.reach_goal_bonus = self._task_cfg["env"]["reachGoalBonus"]
self.rot_eps = self._task_cfg["env"]["rotEps"]
self.vel_obs_scale = self._task_cfg["env"]["velObsScale"]
self.reset_position_noise = self._task_cfg["env"]["resetPositionNoise"]
self.reset_rotation_noise = self._task_cfg["env"]["resetRotationNoise"]
self.reset_dof_pos_noise = self._task_cfg["env"]["resetDofPosRandomInterval"]
self.reset_dof_vel_noise = self._task_cfg["env"]["resetDofVelRandomInterval"]
self.arm_dof_speed_scale = self._task_cfg["env"]["dofSpeedScale"]
self.use_relative_control = self._task_cfg["env"]["useRelativeControl"]
self.act_moving_average = self._task_cfg["env"]["actionsMovingAverage"]
self.max_episode_length = self._task_cfg["env"]["episodeLength"]
self.reset_time = self._task_cfg["env"].get("resetTime", -1.0)
self.print_success_stat = self._task_cfg["env"]["printNumSuccesses"]
self.max_consecutive_successes = self._task_cfg["env"]["maxConsecutiveSuccesses"]
self.av_factor = self._task_cfg["env"].get("averFactor", 0.1)
self.dt = 1.0 / 60
control_freq_inv = self._task_cfg["env"].get("controlFrequencyInv", 1)
if self.reset_time > 0.0:
self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))
print("Reset time: ", self.reset_time)
print("New episode length: ", self.max_episode_length)
def set_up_scene(self, scene: Scene) -> None:
self._stage = get_current_stage()
self._assets_root_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0'
self.get_arm()
self.object_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.goal_displacement_tensor = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.goal_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) + self.goal_displacement_tensor
self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.get_object()
self.get_goal()
super().set_up_scene(scene)
self._arms = self.get_arm_view(scene)
scene.add(self._arms)
self._objects = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/object/object",
name="object_view",
reset_xform_properties=False,
)
self._objects._non_root_link = True # hack to ignore kinematics
scene.add(self._objects)
self._goals = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False
)
self._goals._non_root_link = True # hack to ignore kinematics
scene.add(self._goals)
def initialize_views(self, scene):
RLTask.initialize_views(self, scene)
if scene.object_exists("dofbot_view"):
scene.remove_object("dofbot_view", registry_only=True)
if scene.object_exists("ur10_view"):
scene.remove_object("ur10_view", registry_only=True)
if scene.object_exists("kuka_view"):
scene.remove_object("kuka_view", registry_only=True)
if scene.object_exists("hiwin_view"):
scene.remove_object("hiwin_view", registry_only=True)
if scene.object_exists("goal_view"):
scene.remove_object("goal_view", registry_only=True)
if scene.object_exists("object_view"):
scene.remove_object("object_view", registry_only=True)
self.object_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.object_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self.goal_displacement_tensor = torch.tensor([0.0, 0.0, 0.0], device=self.device)
self.goal_start_translation = torch.tensor([0.0, 0.0, 0.0], device=self.device) + self.goal_displacement_tensor
self.goal_start_orientation = torch.tensor([1.0, 0.0, 0.0, 0.0], device=self.device)
self._arms = self.get_arm_view(scene)
scene.add(self._arms)
self._objects = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/object/object",
name="object_view",
reset_xform_properties=False,
)
self._objects._non_root_link = True # hack to ignore kinematics
scene.add(self._objects)
self._goals = RigidPrimView(
prim_paths_expr="/World/envs/env_.*/goal/object", name="goal_view", reset_xform_properties=False
)
self._goals._non_root_link = True # hack to ignore kinematics
scene.add(self._goals)
@abstractmethod
def get_num_dof(self):
pass
@abstractmethod
def get_arm(self):
pass
@abstractmethod
def get_arm_view(self):
pass
@abstractmethod
def get_observations(self):
pass
@abstractmethod
def get_reset_target_new_pos(self, n_reset_envs):
pass
@abstractmethod
def send_joint_pos(self, joint_pos):
pass
def get_object(self):
self.object_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd"
add_reference_to_stage(self.object_usd_path, self.default_zero_env_path + "/object")
obj = XFormPrim(
prim_path=self.default_zero_env_path + "/object/object",
name="object",
translation=self.object_start_translation,
orientation=self.object_start_orientation,
scale=self.object_scale,
)
self._sim_config.apply_articulation_settings(
"object", get_prim_at_path(obj.prim_path), self._sim_config.parse_actor_config("object")
)
def get_goal(self):
self.goal_usd_path = f"{self._assets_root_path}/Isaac/Props/Blocks/block_instanceable.usd"
add_reference_to_stage(self.goal_usd_path, self.default_zero_env_path + "/goal")
goal = XFormPrim(
prim_path=self.default_zero_env_path + "/goal/object",
name="goal",
translation=self.goal_start_translation,
orientation=self.goal_start_orientation,
scale=self.goal_scale
)
self._sim_config.apply_articulation_settings("goal", get_prim_at_path(goal.prim_path), self._sim_config.parse_actor_config("goal_object"))
def post_reset(self):
self.num_arm_dofs = self.get_num_dof()
self.actuated_dof_indices = torch.arange(self.num_arm_dofs, dtype=torch.long, device=self.device)
self.arm_dof_targets = torch.zeros((self.num_envs, self._arms.num_dof), dtype=torch.float, device=self.device)
self.prev_targets = torch.zeros((self.num_envs, self.num_arm_dofs), dtype=torch.float, device=self.device)
self.cur_targets = torch.zeros((self.num_envs, self.num_arm_dofs), dtype=torch.float, device=self.device)
dof_limits = self._dof_limits[:, :self.num_arm_dofs]
self.arm_dof_lower_limits, self.arm_dof_upper_limits = torch.t(dof_limits[0].to(self.device))
self.arm_dof_default_pos = torch.zeros(self.num_arm_dofs, dtype=torch.float, device=self.device)
self.arm_dof_default_vel = torch.zeros(self.num_arm_dofs, dtype=torch.float, device=self.device)
self.end_effectors_init_pos, self.end_effectors_init_rot = self._arms._end_effectors.get_world_poses()
self.goal_pos, self.goal_rot = self._goals.get_world_poses()
self.goal_pos -= self._env_pos
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self):
self.fall_dist = 0
self.fall_penalty = 0
(
self.rew_buf[:],
self.reset_buf[:],
self.reset_goal_buf[:],
self.progress_buf[:],
self.successes[:],
self.consecutive_successes[:],
) = compute_arm_reward(
self.rew_buf,
self.reset_buf,
self.reset_goal_buf,
self.progress_buf,
self.successes,
self.consecutive_successes,
self.max_episode_length,
self.object_pos,
self.object_rot,
self.goal_pos,
self.goal_rot,
self.dist_reward_scale,
self.rot_reward_scale,
self.rot_eps,
self.actions,
self.action_penalty_scale,
self.success_tolerance,
self.reach_goal_bonus,
self.fall_dist,
self.fall_penalty,
self.max_consecutive_successes,
self.av_factor,
)
self.extras["consecutive_successes"] = self.consecutive_successes.mean()
if self.print_success_stat:
self.total_resets = self.total_resets + self.reset_buf.sum()
direct_average_successes = self.total_successes + self.successes.sum()
self.total_successes = self.total_successes + (self.successes * self.reset_buf).sum()
# The direct average shows the overall result more quickly, but slightly undershoots long term policy performance.
print(
"Direct average consecutive successes = {:.1f}".format(
direct_average_successes / (self.total_resets + self.num_envs)
)
)
if self.total_resets > 0:
print(
"Post-Reset average consecutive successes = {:.1f}".format(self.total_successes / self.total_resets)
)
def pre_physics_step(self, actions):
if not self._env._world.is_playing():
return
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)
end_effectors_pos, end_effectors_rot = self._arms._end_effectors.get_world_poses()
# Reverse the default rotation and rotate the displacement tensor according to the current rotation
self.object_pos = end_effectors_pos + quat_rotate(end_effectors_rot, quat_rotate_inverse(self.end_effectors_init_rot, self.get_object_displacement_tensor()))
self.object_pos -= self._env_pos # subtract world env pos
self.object_rot = end_effectors_rot
object_pos = self.object_pos + self._env_pos
object_rot = self.object_rot
self._objects.set_world_poses(object_pos, object_rot)
# if only goals need reset, then call set API
if len(goal_env_ids) > 0 and len(env_ids) == 0:
self.reset_target_pose(goal_env_ids)
elif len(goal_env_ids) > 0:
self.reset_target_pose(goal_env_ids)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.actions = actions.clone().to(self.device)
# Reacher tasks don't require gripper actions, disable it.
self.actions[:, 5] = 0.0
if self.use_relative_control:
targets = (
self.prev_targets[:, self.actuated_dof_indices] + self.arm_dof_speed_scale * self.dt * self.actions
)
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(
targets,
self.arm_dof_lower_limits[self.actuated_dof_indices],
self.arm_dof_upper_limits[self.actuated_dof_indices],
)
else:
self.cur_targets[:, self.actuated_dof_indices] = scale(
self.actions[:, :self.num_arm_dofs],
self.arm_dof_lower_limits[self.actuated_dof_indices],
self.arm_dof_upper_limits[self.actuated_dof_indices],
)
self.cur_targets[:, self.actuated_dof_indices] = (
self.act_moving_average * self.cur_targets[:, self.actuated_dof_indices]
+ (1.0 - self.act_moving_average) * self.prev_targets[:, self.actuated_dof_indices]
)
self.cur_targets[:, self.actuated_dof_indices] = tensor_clamp(
self.cur_targets[:, self.actuated_dof_indices],
self.arm_dof_lower_limits[self.actuated_dof_indices],
self.arm_dof_upper_limits[self.actuated_dof_indices],
)
self.prev_targets[:, self.actuated_dof_indices] = self.cur_targets[:, self.actuated_dof_indices]
self._arms.set_joint_position_targets(
self.cur_targets[:, self.actuated_dof_indices], indices=None, joint_indices=self.actuated_dof_indices
)
if self._task_cfg['sim2real']['enabled'] and self.test and self.num_envs == 1:
# Only retrieve the 0-th joint position even when multiple envs are used
cur_joint_pos = self._arms.get_joint_positions(indices=[0], joint_indices=self.actuated_dof_indices)
# Send the current joint positions to the real robot
joint_pos = cur_joint_pos[0]
if torch.any(joint_pos < self.arm_dof_lower_limits) or torch.any(joint_pos > self.arm_dof_upper_limits):
print("get_joint_positions out of bound, send_joint_pos skipped")
else:
self.send_joint_pos(joint_pos)
def is_done(self):
pass
def reset_target_pose(self, env_ids):
# reset goal
indices = env_ids.to(dtype=torch.int32)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), 4), device=self.device)
new_pos = self.get_reset_target_new_pos(len(env_ids))
new_rot = randomize_rotation(
rand_floats[:, 0], rand_floats[:, 1], self.x_unit_tensor[env_ids], self.y_unit_tensor[env_ids]
)
self.goal_pos[env_ids] = new_pos
self.goal_rot[env_ids] = new_rot
goal_pos, goal_rot = self.goal_pos.clone(), self.goal_rot.clone()
goal_pos[env_ids] = (
self.goal_pos[env_ids] + self._env_pos[env_ids]
) # add world env pos
self._goals.set_world_poses(goal_pos[env_ids], goal_rot[env_ids], indices)
self.reset_goal_buf[env_ids] = 0
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_arm_dofs * 2 + 5), device=self.device)
self.reset_target_pose(env_ids)
# reset arm
delta_max = self.arm_dof_upper_limits - self.arm_dof_default_pos
delta_min = self.arm_dof_lower_limits - self.arm_dof_default_pos
rand_delta = delta_min + (delta_max - delta_min) * (rand_floats[:, 5:5+self.num_arm_dofs] + 1.0) * 0.5
pos = self.arm_dof_default_pos + self.reset_dof_pos_noise * rand_delta
dof_pos = torch.zeros((self.num_envs, self._arms.num_dof), device=self.device)
dof_pos[env_ids, :self.num_arm_dofs] = pos
dof_vel = torch.zeros((self.num_envs, self._arms.num_dof), device=self.device)
dof_vel[env_ids, :self.num_arm_dofs] = self.arm_dof_default_vel + \
self.reset_dof_vel_noise * rand_floats[:, 5+self.num_arm_dofs:5+self.num_arm_dofs*2]
self.prev_targets[env_ids, :self.num_arm_dofs] = pos
self.cur_targets[env_ids, :self.num_arm_dofs] = pos
self.arm_dof_targets[env_ids, :self.num_arm_dofs] = pos
self._arms.set_joint_position_targets(self.arm_dof_targets[env_ids], indices)
# set_joint_positions doesn't seem to apply immediately.
self._arms.set_joint_positions(dof_pos[env_ids], indices)
self._arms.set_joint_velocities(dof_vel[env_ids], indices)
self.progress_buf[env_ids] = 0
self.reset_buf[env_ids] = 0
self.successes[env_ids] = 0
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def randomize_rotation(rand0, rand1, x_unit_tensor, y_unit_tensor):
return quat_mul(
quat_from_angle_axis(rand0 * np.pi, x_unit_tensor), quat_from_angle_axis(rand1 * np.pi, y_unit_tensor)
)
@torch.jit.script
def compute_arm_reward(
rew_buf,
reset_buf,
reset_goal_buf,
progress_buf,
successes,
consecutive_successes,
max_episode_length: float,
object_pos,
object_rot,
target_pos,
target_rot,
dist_reward_scale: float,
rot_reward_scale: float,
rot_eps: float,
actions,
action_penalty_scale: float,
success_tolerance: float,
reach_goal_bonus: float,
fall_dist: float,
fall_penalty: float,
max_consecutive_successes: int,
av_factor: float,
):
goal_dist = torch.norm(object_pos - target_pos, p=2, dim=-1)
# Orientation alignment for the cube in hand and goal cube
quat_diff = quat_mul(object_rot, quat_conjugate(target_rot))
rot_dist = 2.0 * torch.asin(
torch.clamp(torch.norm(quat_diff[:, 1:4], p=2, dim=-1), max=1.0)
) # changed quat convention
dist_rew = goal_dist * dist_reward_scale
rot_rew = 1.0 / (torch.abs(rot_dist) + rot_eps) * rot_reward_scale
action_penalty = torch.sum(actions**2, dim=-1)
# Total reward is: position distance + orientation alignment + action regularization + success bonus + fall penalty
reward = dist_rew + action_penalty * action_penalty_scale
# Find out which envs hit the goal and update successes count
goal_resets = torch.where(torch.abs(goal_dist) <= success_tolerance, torch.ones_like(reset_goal_buf), reset_goal_buf)
successes = successes + goal_resets
# Success bonus: orientation is within `success_tolerance` of goal orientation
reward = torch.where(goal_resets == 1, reward + reach_goal_bonus, reward)
resets = reset_buf
if max_consecutive_successes > 0:
# Reset progress buffer on goal envs if max_consecutive_successes > 0
progress_buf = torch.where(
torch.abs(rot_dist) <= success_tolerance, torch.zeros_like(progress_buf), progress_buf
)
resets = torch.where(successes >= max_consecutive_successes, torch.ones_like(resets), resets)
resets = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(resets), resets)
num_resets = torch.sum(resets)
finished_cons_successes = torch.sum(successes * resets.float())
cons_successes = torch.where(
num_resets > 0,
av_factor * finished_cons_successes / num_resets + (1.0 - av_factor) * consecutive_successes,
consecutive_successes,
)
return reward, resets, goal_resets, progress_buf, successes, cons_successes
| 22,312 | Python | 42.836935 | 165 | 0.629482 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tasks/shared/locomotion.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from abc import abstractmethod
import numpy as np
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch.maths import tensor_clamp, torch_rand_float, unscale
from omni.isaac.core.utils.torch.rotations import compute_heading_and_up, compute_rot, quat_conjugate
from omniisaacgymenvs.tasks.base.rl_task import RLTask
class LocomotionTask(RLTask):
def __init__(self, name, env, offset=None) -> None:
LocomotionTask.update_config(self)
RLTask.__init__(self, name, env)
return
def update_config(self):
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.angular_velocity_scale = self._task_cfg["env"]["angularVelocityScale"]
self.contact_force_scale = self._task_cfg["env"]["contactForceScale"]
self.power_scale = self._task_cfg["env"]["powerScale"]
self.heading_weight = self._task_cfg["env"]["headingWeight"]
self.up_weight = self._task_cfg["env"]["upWeight"]
self.actions_cost_scale = self._task_cfg["env"]["actionsCost"]
self.energy_cost_scale = self._task_cfg["env"]["energyCost"]
self.joints_at_limit_cost_scale = self._task_cfg["env"]["jointsAtLimitCost"]
self.death_cost = self._task_cfg["env"]["deathCost"]
self.termination_height = self._task_cfg["env"]["terminationHeight"]
self.alive_reward_scale = self._task_cfg["env"]["alive_reward_scale"]
@abstractmethod
def set_up_scene(self, scene) -> None:
pass
@abstractmethod
def get_robot(self):
pass
def get_observations(self) -> dict:
torso_position, torso_rotation = self._robots.get_world_poses(clone=False)
velocities = self._robots.get_velocities(clone=False)
velocity = velocities[:, 0:3]
ang_velocity = velocities[:, 3:6]
dof_pos = self._robots.get_joint_positions(clone=False)
dof_vel = self._robots.get_joint_velocities(clone=False)
# force sensors attached to the feet
sensor_force_torques = self._robots.get_measured_joint_forces(joint_indices=self._sensor_indices)
(
self.obs_buf[:],
self.potentials[:],
self.prev_potentials[:],
self.up_vec[:],
self.heading_vec[:],
) = get_observations(
torso_position,
torso_rotation,
velocity,
ang_velocity,
dof_pos,
dof_vel,
self.targets,
self.potentials,
self.dt,
self.inv_start_rot,
self.basis_vec0,
self.basis_vec1,
self.dof_limits_lower,
self.dof_limits_upper,
self.dof_vel_scale,
sensor_force_torques,
self._num_envs,
self.contact_force_scale,
self.actions,
self.angular_velocity_scale,
)
observations = {self._robots.name: {"obs_buf": self.obs_buf}}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
forces = self.actions * self.joint_gears * self.power_scale
indices = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
# applies joint torques
self._robots.set_joint_efforts(forces, indices=indices)
def reset_idx(self, env_ids):
num_resets = len(env_ids)
# randomize DOF positions and velocities
dof_pos = torch_rand_float(-0.2, 0.2, (num_resets, self._robots.num_dof), device=self._device)
dof_pos[:] = tensor_clamp(self.initial_dof_pos[env_ids] + dof_pos, self.dof_limits_lower, self.dof_limits_upper)
dof_vel = torch_rand_float(-0.1, 0.1, (num_resets, self._robots.num_dof), device=self._device)
root_pos, root_rot = self.initial_root_pos[env_ids], self.initial_root_rot[env_ids]
root_vel = torch.zeros((num_resets, 6), device=self._device)
# apply resets
self._robots.set_joint_positions(dof_pos, indices=env_ids)
self._robots.set_joint_velocities(dof_vel, indices=env_ids)
self._robots.set_world_poses(root_pos, root_rot, indices=env_ids)
self._robots.set_velocities(root_vel, indices=env_ids)
to_target = self.targets[env_ids] - self.initial_root_pos[env_ids]
to_target[:, 2] = 0.0
self.prev_potentials[env_ids] = -torch.norm(to_target, p=2, dim=-1) / self.dt
self.potentials[env_ids] = self.prev_potentials[env_ids].clone()
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
num_resets = len(env_ids)
def post_reset(self):
self._robots = self.get_robot()
self.initial_root_pos, self.initial_root_rot = self._robots.get_world_poses()
self.initial_dof_pos = self._robots.get_joint_positions()
# initialize some data used later on
self.start_rotation = torch.tensor([1, 0, 0, 0], device=self._device, dtype=torch.float32)
self.up_vec = torch.tensor([0, 0, 1], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.heading_vec = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.inv_start_rot = quat_conjugate(self.start_rotation).repeat((self.num_envs, 1))
self.basis_vec0 = self.heading_vec.clone()
self.basis_vec1 = self.up_vec.clone()
self.targets = torch.tensor([1000, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.target_dirs = torch.tensor([1, 0, 0], dtype=torch.float32, device=self._device).repeat((self.num_envs, 1))
self.dt = 1.0 / 60.0
self.potentials = torch.tensor([-1000.0 / self.dt], dtype=torch.float32, device=self._device).repeat(
self.num_envs
)
self.prev_potentials = self.potentials.clone()
self.actions = torch.zeros((self.num_envs, self.num_actions), device=self._device)
# randomize all envs
indices = torch.arange(self._robots.count, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = calculate_metrics(
self.obs_buf,
self.actions,
self.up_weight,
self.heading_weight,
self.potentials,
self.prev_potentials,
self.actions_cost_scale,
self.energy_cost_scale,
self.termination_height,
self.death_cost,
self._robots.num_dof,
self.get_dof_at_limit_cost(),
self.alive_reward_scale,
self.motor_effort_ratio,
)
def is_done(self) -> None:
self.reset_buf[:] = is_done(
self.obs_buf, self.termination_height, self.reset_buf, self.progress_buf, self._max_episode_length
)
#####################################################################
###=========================jit functions=========================###
#####################################################################
@torch.jit.script
def normalize_angle(x):
return torch.atan2(torch.sin(x), torch.cos(x))
@torch.jit.script
def get_observations(
torso_position,
torso_rotation,
velocity,
ang_velocity,
dof_pos,
dof_vel,
targets,
potentials,
dt,
inv_start_rot,
basis_vec0,
basis_vec1,
dof_limits_lower,
dof_limits_upper,
dof_vel_scale,
sensor_force_torques,
num_envs,
contact_force_scale,
actions,
angular_velocity_scale,
):
# type: (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, Tensor, Tensor, Tensor, Tensor, float, Tensor, int, float, Tensor, float) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
to_target = targets - torso_position
to_target[:, 2] = 0.0
prev_potentials = potentials.clone()
potentials = -torch.norm(to_target, p=2, dim=-1) / dt
torso_quat, up_proj, heading_proj, up_vec, heading_vec = compute_heading_and_up(
torso_rotation, inv_start_rot, to_target, basis_vec0, basis_vec1, 2
)
vel_loc, angvel_loc, roll, pitch, yaw, angle_to_target = compute_rot(
torso_quat, velocity, ang_velocity, targets, torso_position
)
dof_pos_scaled = unscale(dof_pos, dof_limits_lower, dof_limits_upper)
# obs_buf shapes: 1, 3, 3, 1, 1, 1, 1, 1, num_dofs, num_dofs, num_sensors * 6, num_dofs
obs = torch.cat(
(
torso_position[:, 2].view(-1, 1),
vel_loc,
angvel_loc * angular_velocity_scale,
normalize_angle(yaw).unsqueeze(-1),
normalize_angle(roll).unsqueeze(-1),
normalize_angle(angle_to_target).unsqueeze(-1),
up_proj.unsqueeze(-1),
heading_proj.unsqueeze(-1),
dof_pos_scaled,
dof_vel * dof_vel_scale,
sensor_force_torques.reshape(num_envs, -1) * contact_force_scale,
actions,
),
dim=-1,
)
return obs, potentials, prev_potentials, up_vec, heading_vec
@torch.jit.script
def is_done(obs_buf, termination_height, reset_buf, progress_buf, max_episode_length):
# type: (Tensor, float, Tensor, Tensor, float) -> Tensor
reset = torch.where(obs_buf[:, 0] < termination_height, torch.ones_like(reset_buf), reset_buf)
reset = torch.where(progress_buf >= max_episode_length - 1, torch.ones_like(reset_buf), reset)
return reset
@torch.jit.script
def calculate_metrics(
obs_buf,
actions,
up_weight,
heading_weight,
potentials,
prev_potentials,
actions_cost_scale,
energy_cost_scale,
termination_height,
death_cost,
num_dof,
dof_at_limit_cost,
alive_reward_scale,
motor_effort_ratio,
):
# type: (Tensor, Tensor, float, float, Tensor, Tensor, float, float, float, float, int, Tensor, float, Tensor) -> Tensor
heading_weight_tensor = torch.ones_like(obs_buf[:, 11]) * heading_weight
heading_reward = torch.where(obs_buf[:, 11] > 0.8, heading_weight_tensor, heading_weight * obs_buf[:, 11] / 0.8)
# aligning up axis of robot and environment
up_reward = torch.zeros_like(heading_reward)
up_reward = torch.where(obs_buf[:, 10] > 0.93, up_reward + up_weight, up_reward)
# energy penalty for movement
actions_cost = torch.sum(actions**2, dim=-1)
electricity_cost = torch.sum(
torch.abs(actions * obs_buf[:, 12 + num_dof : 12 + num_dof * 2]) * motor_effort_ratio.unsqueeze(0), dim=-1
)
# reward for duration of staying alive
alive_reward = torch.ones_like(potentials) * alive_reward_scale
progress_reward = potentials - prev_potentials
total_reward = (
progress_reward
+ alive_reward
+ up_reward
+ heading_reward
- actions_cost_scale * actions_cost
- energy_cost_scale * electricity_cost
- dof_at_limit_cost
)
# adjust reward for fallen agents
total_reward = torch.where(
obs_buf[:, 0] < termination_height, torch.ones_like(total_reward) * death_cost, total_reward
)
return total_reward
| 13,249 | Python | 37.294798 | 214 | 0.628802 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/sim2real/dofbot.py | # Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import struct
import time
import numpy as np
class RealWorldDofbot():
# Defined in dofbot.usd
sim_dof_angle_limits = [
(-90, 90, False),
(-90, 90, False),
(-90, 90, False),
(-90, 90, False),
(-90, 180, False),
(-30, 60, True),
# (-30, 60): /arm_01/link5/Finger_Left_01/Finger_Left_01_RevoluteJoint
# (-60, 30): /arm_01/link5/Finger_Right_01/Finger_Right_01_RevoluteJoint
] # _sim_dof_limits[:,2] == True indicates inversed joint angle compared to real
# Ref: Section `6.5 Control all servo` in http://www.yahboom.net/study/Dofbot-Jetson_nano
servo_angle_limits = [
(0, 180),
(0, 180),
(0, 180),
(0, 180),
(0, 270),
(0, 180),
]
def __init__(self, IP, PORT, fail_quietely=False, verbose=False) -> None:
print("Connecting to real-world Dofbot at IP:", IP, "and port:", PORT)
self.fail_quietely = fail_quietely
self.failed = False
self.last_sync_time = 0
self.sync_hz = 10000
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (IP, PORT)
self.sock.connect(server_address)
print("Connected to real-world Dofbot!")
except socket.error as e:
self.failed = True
print("Connection to real-world Dofbot failed!")
if self.fail_quietely:
print(e)
else:
raise e
def send_joint_pos(self, joint_pos):
if time.time() - self.last_sync_time < 1 / self.sync_hz:
return
self.last_sync_time = time.time()
if len(joint_pos) != 6:
raise Exception("The length of Dofbot joint_pos is {}, but should be 6!".format(len(joint_pos)))
# Convert Sim angles to Real angles
servo_angles = [90] * 6
for i, pos in enumerate(joint_pos):
if i == 5:
# Ignore the gripper joints for Reacher task
continue
# Map [L, U] to [A, B]
L, U, inversed = self.sim_dof_angle_limits[i]
A, B = self.servo_angle_limits[i]
angle = np.rad2deg(float(pos))
if not L <= angle <= U:
print("The {}-th simulation joint angle ({}) is out of range! Should be in [{}, {}]".format(i, angle, L, U))
angle = np.clip(angle, L, U)
servo_angles[i] = (angle - L) * ((B-A)/(U-L)) + A # Map [L, U] to [A, B]
if inversed:
servo_angles[i] = (B-A) - (servo_angles[i] - A) + A # Map [A, B] to [B, A]
if not A <= servo_angles[i] <= B:
raise Exception("(Should Not Happen) The {}-th real world joint angle ({}) is out of range! hould be in [{}, {}]".format(i, servo_angles[i], A, B))
print("Sending real-world Dofbot joint angles:", servo_angles)
if self.failed:
print("Cannot send joint states. Not connected to real-world Dofbot!")
return
packer = struct.Struct("f f f f f f")
packed_data = packer.pack(*servo_angles)
try:
self.sock.sendall(packed_data)
except socket.error as e:
self.failed = True
print("Send to real-world Dofbot failed!")
if self.fail_quietely:
print(e)
else:
raise e
if __name__ == "__main__":
IP = input("Enter Dofbot's IP: ")
PORT = input("Enter Dofbot's Port: ")
dofbot = RealWorldDofbot(IP, int(PORT))
pos = [np.deg2rad(0)] * 6
dofbot.send_joint_pos(pos)
print("Dofbot joint angles reset.")
| 5,238 | Python | 40.912 | 163 | 0.605766 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/scripts/rlgames_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import hydra
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.scripts.rlgames_train import RLGTrainer
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.utils.demo_util import initialize_demo
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
class RLGDemo(RLGTrainer):
def __init__(self, cfg, cfg_dict):
RLGTrainer.__init__(self, cfg, cfg_dict)
self.cfg.test = True
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
env = VecEnvRLGames(headless=headless, sim_device=cfg.device_id, enable_livestream=cfg.enable_livestream)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_demo(cfg_dict, env)
if cfg.wandb_activate:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
id=run_name,
resume="allow",
monitor_gym=True,
)
rlg_trainer = RLGDemo(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if cfg.wandb_activate:
wandb.finish()
if __name__ == "__main__":
parse_hydra_configs()
| 3,746 | Python | 35.735294 | 109 | 0.719434 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/scripts/rlgames_train.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import os
import hydra
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path, get_experience
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register("RLGPU", lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register("rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(self.rlg_config_dict)
runner.reset()
# dump config dict
experiment_dir = os.path.join("runs", self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
runner.run(
{"train": not self.cfg.test, "play": self.cfg.test, "checkpoint": self.cfg.checkpoint, "sigma": None}
)
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
headless = cfg.headless
# local rank (GPU id) in a current multi-gpu mode
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# global rank (GPU id) in multi-gpu multi-node mode
global_rank = int(os.getenv("RANK", "0"))
if cfg.multi_gpu:
cfg.device_id = local_rank
cfg.rl_device = f'cuda:{local_rank}'
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(headless, cfg.enable_livestream, enable_viewport, cfg.kit_app)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
experience=experience
)
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = retrieve_checkpoint_path(cfg.checkpoint)
if cfg.checkpoint is None:
quit()
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = cfg.seed + global_rank if cfg.seed != -1 else cfg.seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
if cfg.wandb_activate and global_rank == 0:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{cfg.wandb_name}_{time_str}"
wandb.init(
project=cfg.wandb_project,
group=cfg.wandb_group,
entity=cfg.wandb_entity,
config=cfg_dict,
sync_tensorboard=True,
name=run_name,
resume="allow",
)
torch.cuda.set_device(local_rank)
rlg_trainer = RLGTrainer(cfg, cfg_dict)
rlg_trainer.launch_rlg_hydra(env)
rlg_trainer.run()
env.close()
if cfg.wandb_activate and global_rank == 0:
wandb.finish()
if __name__ == "__main__":
parse_hydra_configs()
| 5,846 | Python | 37.721854 | 119 | 0.695689 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/scripts/dummy_dofbot_policy.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: /omniisaacgymenvs/scripts/random_policy.py
import hydra
import numpy as np
import torch
from omegaconf import DictConfig
from omniisaacgymenvs.envs.vec_env_rlgames import VecEnvRLGames
from omniisaacgymenvs.utils.config_utils.path_utils import get_experience
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.task_util import initialize_task
@hydra.main(version_base=None, config_name="config", config_path="../cfg")
def parse_hydra_configs(cfg: DictConfig):
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
headless = cfg.headless
render = not headless
enable_viewport = "enable_cameras" in cfg.task.sim and cfg.task.sim.enable_cameras
# select kit app file
experience = get_experience(headless, cfg.enable_livestream, enable_viewport, cfg.kit_app)
env = VecEnvRLGames(
headless=headless,
sim_device=cfg.device_id,
enable_livestream=cfg.enable_livestream,
enable_viewport=enable_viewport,
experience=experience
)
# sets seed. if seed is -1 will pick a random one
from omni.isaac.core.utils.torch.maths import set_seed
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
cfg_dict["seed"] = cfg.seed
task = initialize_task(cfg_dict, env)
while env._simulation_app.is_running():
if env._world.is_playing():
if env._world.current_time_step_index == 0:
env._world.reset(soft=True)
actions = torch.tensor(
np.array([env.action_space.sample() for _ in range(env.num_envs)]), device=task.rl_device
)
actions[:, 0] = 1.0
actions[:, 1] = 1.0
actions[:, 2] = 1.0
actions[:, 3] = 1.0
actions[:, 4] = 1.0
actions[:, 5] = 0.0
env._task.pre_physics_step(actions)
env._world.step(render=render)
env.sim_frame_count += 1
env._task.post_physics_step()
else:
env._world.step(render=render)
env._simulation_app.close()
if __name__ == "__main__":
parse_hydra_configs()
| 3,841 | Python | 39.020833 | 105 | 0.702421 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/demos/anymal_terrain.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask, wrap_to_pi
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.core.utils.torch.transformations import tf_combine
import numpy as np
import torch
import math
import omni
import carb
from omni.kit.viewport.utility.camera_state import ViewportCameraState
from omni.kit.viewport.utility import get_viewport_from_window_name
from pxr import Sdf
class AnymalTerrainDemo(AnymalTerrainTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
max_num_envs = 128
if sim_config.task_config["env"]["numEnvs"] >= max_num_envs:
print(f"num_envs reduced to {max_num_envs} for this demo.")
sim_config.task_config["env"]["numEnvs"] = max_num_envs
sim_config.task_config["env"]["learn"]["episodeLength_s"] = 120
AnymalTerrainTask.__init__(self, name, sim_config, env)
self.add_noise = False
self.knee_threshold = 0.05
self.create_camera()
self._current_command = [0.0, 0.0, 0.0, 0.0]
self.set_up_keyboard()
self._prim_selection = omni.usd.get_context().get_selection()
self._selected_id = None
self._previous_selected_id = None
return
def create_camera(self):
stage = omni.usd.get_context().get_stage()
self.view_port = get_viewport_from_window_name("Viewport")
# Create camera
self.camera_path = "/World/Camera"
self.perspective_path = "/OmniverseKit_Persp"
camera_prim = stage.DefinePrim(self.camera_path, "Camera")
camera_prim.GetAttribute("focalLength").Set(8.5)
coi_prop = camera_prim.GetProperty("omni:kit:centerOfInterest")
if not coi_prop or not coi_prop.IsValid():
camera_prim.CreateAttribute(
"omni:kit:centerOfInterest", Sdf.ValueTypeNames.Vector3d, True, Sdf.VariabilityUniform
).Set(Gf.Vec3d(0, 0, -10))
self.view_port.set_active_camera(self.perspective_path)
def set_up_keyboard(self):
self._input = carb.input.acquire_input_interface()
self._keyboard = omni.appwindow.get_default_app_window().get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._on_keyboard_event)
T = 1
R = 1
self._key_to_control = {
"UP": [T, 0.0, 0.0, 0.0],
"DOWN": [-T, 0.0, 0.0, 0.0],
"LEFT": [0.0, T, 0.0, 0.0],
"RIGHT": [0.0, -T, 0.0, 0.0],
"Z": [0.0, 0.0, R, 0.0],
"X": [0.0, 0.0, -R, 0.0],
}
def _on_keyboard_event(self, event, *args, **kwargs):
if event.type == carb.input.KeyboardEventType.KEY_PRESS:
if event.input.name in self._key_to_control:
self._current_command = self._key_to_control[event.input.name]
elif event.input.name == "ESCAPE":
self._prim_selection.clear_selected_prim_paths()
elif event.input.name == "C":
if self._selected_id is not None:
if self.view_port.get_active_camera() == self.camera_path:
self.view_port.set_active_camera(self.perspective_path)
else:
self.view_port.set_active_camera(self.camera_path)
elif event.type == carb.input.KeyboardEventType.KEY_RELEASE:
self._current_command = [0.0, 0.0, 0.0, 0.0]
def update_selected_object(self):
self._previous_selected_id = self._selected_id
selected_prim_paths = self._prim_selection.get_selected_prim_paths()
if len(selected_prim_paths) == 0:
self._selected_id = None
self.view_port.set_active_camera(self.perspective_path)
elif len(selected_prim_paths) > 1:
print("Multiple prims are selected. Please only select one!")
else:
prim_splitted_path = selected_prim_paths[0].split("/")
if len(prim_splitted_path) >= 4 and prim_splitted_path[3][0:4] == "env_":
self._selected_id = int(prim_splitted_path[3][4:])
if self._previous_selected_id != self._selected_id:
self.view_port.set_active_camera(self.camera_path)
self._update_camera()
else:
print("The selected prim was not an Anymal")
if self._previous_selected_id is not None and self._previous_selected_id != self._selected_id:
self.commands[self._previous_selected_id, 0] = np.random.uniform(self.command_x_range[0], self.command_x_range[1])
self.commands[self._previous_selected_id, 1] = np.random.uniform(self.command_y_range[0], self.command_y_range[1])
self.commands[self._previous_selected_id, 2] = 0.0
def _update_camera(self):
base_pos = self.base_pos[self._selected_id, :].clone()
base_quat = self.base_quat[self._selected_id, :].clone()
camera_local_transform = torch.tensor([-1.8, 0.0, 0.6], device=self.device)
camera_pos = quat_apply(base_quat, camera_local_transform) + base_pos
camera_state = ViewportCameraState(self.camera_path, self.view_port)
eye = Gf.Vec3d(camera_pos[0].item(), camera_pos[1].item(), camera_pos[2].item())
target = Gf.Vec3d(base_pos[0].item(), base_pos[1].item(), base_pos[2].item()+0.6)
camera_state.set_position_world(eye, True)
camera_state.set_target_world(target, True)
def post_physics_step(self):
self.progress_buf[:] += 1
self.refresh_dof_state_tensors()
self.refresh_body_state_tensors()
self.update_selected_object()
self.common_step_counter += 1
if self.common_step_counter % self.push_interval == 0:
self.push_robots()
# prepare quantities
self.base_lin_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 0:3])
self.base_ang_vel = quat_rotate_inverse(self.base_quat, self.base_velocities[:, 3:6])
self.projected_gravity = quat_rotate_inverse(self.base_quat, self.gravity_vec)
forward = quat_apply(self.base_quat, self.forward_vec)
heading = torch.atan2(forward[:, 1], forward[:, 0])
self.commands[:, 2] = torch.clip(0.5*wrap_to_pi(self.commands[:, 3] - heading), -1., 1.)
self.check_termination()
if self._selected_id is not None:
self.commands[self._selected_id, :] = torch.tensor(self._current_command, device=self.device)
self.timeout_buf[self._selected_id] = 0
self.reset_buf[self._selected_id] = 0
self.get_states()
env_ids = self.reset_buf.nonzero(as_tuple=False).flatten()
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.get_observations()
if self.add_noise:
self.obs_buf += (2 * torch.rand_like(self.obs_buf) - 1) * self.noise_scale_vec
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras | 8,841 | Python | 44.577319 | 126 | 0.636127 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/tests/runner.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import asyncio
from datetime import date
import sys
import unittest
import weakref
import omni.kit.test
from omni.kit.test import AsyncTestSuite
from omni.kit.test.async_unittest import AsyncTextTestRunner
import omni.ui as ui
from omni.isaac.ui.menu import make_menu_item_description
from omni.isaac.ui.ui_utils import btn_builder
from omni.kit.menu.utils import MenuItemDescription, add_menu_items
import omni.timeline
import omni.usd
from omniisaacgymenvs import RLExtension, get_instance
class GymRLTests(omni.kit.test.AsyncTestCase):
def __init__(self, *args, **kwargs):
super(GymRLTests, self).__init__(*args, **kwargs)
self.ext = get_instance()
async def _train(self, task, load=True, experiment=None, max_iterations=None):
task_idx = self.ext._task_list.index(task)
self.ext._task_dropdown.get_item_value_model().set_value(task_idx)
if load:
self.ext._on_load_world()
while True:
_, files_loaded, total_files = omni.usd.get_context().get_stage_loading_status()
if files_loaded or total_files:
await omni.kit.app.get_app().next_update_async()
else:
break
for _ in range(100):
await omni.kit.app.get_app().next_update_async()
self.ext._render_dropdown.get_item_value_model().set_value(2)
overrides = None
if experiment is not None:
overrides = [f"experiment={experiment}"]
if max_iterations is not None:
if overrides is None:
overrides = [f"max_iterations={max_iterations}"]
else:
overrides += [f"max_iterations={max_iterations}"]
await self.ext._on_train_async(overrides=overrides)
async def test_train(self):
date_str = date.today()
tasks = self.ext._task_list
for task in tasks:
await self._train(task, load=True, experiment=f"{task}_{date_str}")
async def test_train_determinism(self):
date_str = date.today()
tasks = self.ext._task_list
for task in tasks:
for i in range(3):
await self._train(task, load=(i==0), experiment=f"{task}_{date_str}_{i}", max_iterations=100)
class TestRunner():
def __init__(self):
self._build_ui()
def _build_ui(self):
menu_items = [make_menu_item_description("RL Examples Tests", "RL Examples Tests", lambda a=weakref.proxy(self): a._menu_callback())]
add_menu_items(menu_items, "Isaac Examples")
self._window = omni.ui.Window(
"RL Examples Tests", width=250, height=0, visible=True, dockPreference=ui.DockPreference.LEFT_BOTTOM
)
with self._window.frame:
main_stack = ui.VStack(spacing=5, height=0)
with main_stack:
dict = {
"label": "Run Tests",
"type": "button",
"text": "Run Tests",
"tooltip": "Run all tests",
"on_clicked_fn": self._run_tests,
}
btn_builder(**dict)
def _menu_callback(self):
self._window.visible = not self._window.visible
def _run_tests(self):
loader = unittest.TestLoader()
loader.SuiteClass = AsyncTestSuite
test_suite = AsyncTestSuite()
test_suite.addTests(loader.loadTestsFromTestCase(GymRLTests))
test_runner = AsyncTextTestRunner(verbosity=2, stream=sys.stdout)
async def single_run():
await test_runner.run(test_suite)
print("=======================================")
print(f"Running Tests")
print("=======================================")
asyncio.ensure_future(single_run())
TestRunner() | 4,254 | Python | 35.059322 | 141 | 0.607428 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/task_util.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def import_tasks():
from omniisaacgymenvs.tasks.allegro_hand import AllegroHandTask
from omniisaacgymenvs.tasks.ant import AntLocomotionTask
from omniisaacgymenvs.tasks.anymal import AnymalTask
from omniisaacgymenvs.tasks.anymal_terrain import AnymalTerrainTask
from omniisaacgymenvs.tasks.ball_balance import BallBalanceTask
from omniisaacgymenvs.tasks.cartpole import CartpoleTask
from omniisaacgymenvs.tasks.cartpole_camera import CartpoleCameraTask
from omniisaacgymenvs.tasks.crazyflie import CrazyflieTask
from omniisaacgymenvs.tasks.factory.factory_task_nut_bolt_pick import FactoryTaskNutBoltPick
from omniisaacgymenvs.tasks.factory.factory_task_nut_bolt_place import FactoryTaskNutBoltPlace
from omniisaacgymenvs.tasks.factory.factory_task_nut_bolt_screw import FactoryTaskNutBoltScrew
from omniisaacgymenvs.tasks.franka_cabinet import FrankaCabinetTask
from omniisaacgymenvs.tasks.franka_deformable import FrankaDeformableTask
from omniisaacgymenvs.tasks.humanoid import HumanoidLocomotionTask
from omniisaacgymenvs.tasks.ingenuity import IngenuityTask
from omniisaacgymenvs.tasks.quadcopter import QuadcopterTask
from omniisaacgymenvs.tasks.shadow_hand import ShadowHandTask
from omniisaacgymenvs.tasks.dofbot_reacher import DofbotReacherTask
from omniisaacgymenvs.tasks.warp.ant import AntLocomotionTask as AntLocomotionTaskWarp
from omniisaacgymenvs.tasks.warp.cartpole import CartpoleTask as CartpoleTaskWarp
from omniisaacgymenvs.tasks.warp.humanoid import HumanoidLocomotionTask as HumanoidLocomotionTaskWarp
# Mappings from strings to environments
task_map = {
"AllegroHand": AllegroHandTask,
"Ant": AntLocomotionTask,
"Anymal": AnymalTask,
"AnymalTerrain": AnymalTerrainTask,
"BallBalance": BallBalanceTask,
"Cartpole": CartpoleTask,
"CartpoleCamera": CartpoleCameraTask,
"FactoryTaskNutBoltPick": FactoryTaskNutBoltPick,
"FactoryTaskNutBoltPlace": FactoryTaskNutBoltPlace,
"FactoryTaskNutBoltScrew": FactoryTaskNutBoltScrew,
"FrankaCabinet": FrankaCabinetTask,
"FrankaDeformable": FrankaDeformableTask,
"Humanoid": HumanoidLocomotionTask,
"Ingenuity": IngenuityTask,
"Quadcopter": QuadcopterTask,
"Crazyflie": CrazyflieTask,
"ShadowHand": ShadowHandTask,
"ShadowHandOpenAI_FF": ShadowHandTask,
"ShadowHandOpenAI_LSTM": ShadowHandTask,
"DofbotReacher": DofbotReacherTask,
}
task_map_warp = {
"Cartpole": CartpoleTaskWarp,
"Ant":AntLocomotionTaskWarp,
"Humanoid": HumanoidLocomotionTaskWarp
}
return task_map, task_map_warp
def initialize_task(config, env, init_sim=True):
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
task_map, task_map_warp = import_tasks()
cfg = sim_config.config
if cfg["warp"]:
task_map = task_map_warp
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
backend = "warp" if cfg["warp"] else "torch"
rendering_dt = sim_config.get_physics_params()["rendering_dt"]
env.set_task(
task=task,
sim_params=sim_config.get_physics_params(),
backend=backend,
init_sim=init_sim,
rendering_dt=rendering_dt,
)
return task
| 5,049 | Python | 42.913043 | 105 | 0.75302 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/domain_randomization/randomize.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.utils.extensions import enable_extension
class Randomizer:
def __init__(self, main_config, task_config):
self._cfg = task_config
self._config = main_config
self.randomize = False
dr_config = self._cfg.get("domain_randomization", None)
self.distributions = dict()
self.active_domain_randomizations = dict()
self._observations_dr_params = None
self._actions_dr_params = None
if dr_config is not None:
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize and randomization_params is not None:
self.randomize = True
self.min_frequency = dr_config.get("min_frequency", 1)
# import DR extensions
enable_extension("omni.replicator.isaac")
import omni.replicator.core as rep
import omni.replicator.isaac as dr
self.rep = rep
self.dr = dr
def apply_on_startup_domain_randomization(self, task):
if self.randomize:
torch.manual_seed(self._config["seed"])
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
for opt in randomization_params.keys():
if opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
for attribute, params in randomization_params["rigid_prim_views"][view_name].items():
params = randomization_params["rigid_prim_views"][view_name][attribute]
if attribute in ["scale", "mass", "density"] and params is not None:
if "on_startup" in params.keys():
if not set(
("operation", "distribution", "distribution_parameters")
).issubset(params["on_startup"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} "
+ "on_startup are provided: operation, distribution, distribution_parameters."
)
view = task._env._world.scene._scene_registry.rigid_prim_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
elif attribute == "mass":
self.randomize_mass_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
)
elif attribute == "density":
self.randomize_density_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
)
if opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
for attribute, params in randomization_params["articulation_views"][view_name].items():
params = randomization_params["articulation_views"][view_name][attribute]
if attribute in ["scale"] and params is not None:
if "on_startup" in params.keys():
if not set(
("operation", "distribution", "distribution_parameters")
).issubset(params["on_startup"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} "
+ "on_startup are provided: operation, distribution, distribution_parameters."
)
view = task._env._world.scene._scene_registry.articulated_views[view_name]
if attribute == "scale":
self.randomize_scale_on_startup(
view=view,
distribution=params["on_startup"]["distribution"],
distribution_parameters=params["on_startup"][
"distribution_parameters"
],
operation=params["on_startup"]["operation"],
sync_dim_noise=True,
)
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("On Startup Domain randomization will not be applied.")
def set_up_domain_randomization(self, task):
if self.randomize:
randomization_params = self._cfg["domain_randomization"]["randomization_params"]
self.rep.set_global_seed(self._config["seed"])
with self.dr.trigger.on_rl_frame(num_envs=self._cfg["env"]["numEnvs"]):
for opt in randomization_params.keys():
if opt == "observations":
self._set_up_observations_randomization(task)
elif opt == "actions":
self._set_up_actions_randomization(task)
elif opt == "simulation":
if randomization_params["simulation"] is not None:
self.distributions["simulation"] = dict()
self.dr.physics_view.register_simulation_context(task._env._world)
for attribute, params in randomization_params["simulation"].items():
self._set_up_simulation_randomization(attribute, params)
elif opt == "rigid_prim_views":
if randomization_params["rigid_prim_views"] is not None:
self.distributions["rigid_prim_views"] = dict()
for view_name in randomization_params["rigid_prim_views"].keys():
if randomization_params["rigid_prim_views"][view_name] is not None:
self.distributions["rigid_prim_views"][view_name] = dict()
self.dr.physics_view.register_rigid_prim_view(
rigid_prim_view=task._env._world.scene._scene_registry.rigid_prim_views[
view_name
],
)
for attribute, params in randomization_params["rigid_prim_views"][
view_name
].items():
if attribute not in ["scale", "density"]:
self._set_up_rigid_prim_view_randomization(view_name, attribute, params)
elif opt == "articulation_views":
if randomization_params["articulation_views"] is not None:
self.distributions["articulation_views"] = dict()
for view_name in randomization_params["articulation_views"].keys():
if randomization_params["articulation_views"][view_name] is not None:
self.distributions["articulation_views"][view_name] = dict()
self.dr.physics_view.register_articulation_view(
articulation_view=task._env._world.scene._scene_registry.articulated_views[
view_name
],
)
for attribute, params in randomization_params["articulation_views"][
view_name
].items():
if attribute not in ["scale"]:
self._set_up_articulation_view_randomization(view_name, attribute, params)
self.rep.orchestrator.run()
else:
dr_config = self._cfg.get("domain_randomization", None)
if dr_config is None:
raise ValueError("No domain randomization parameters are specified in the task yaml config file")
randomize = dr_config.get("randomize", False)
randomization_params = dr_config.get("randomization_params", None)
if randomize == False or randomization_params is None:
print("Domain randomization will not be applied.")
def _set_up_observations_randomization(self, task):
task.randomize_observations = True
self._observations_dr_params = self._cfg["domain_randomization"]["randomization_params"]["observations"]
if self._observations_dr_params is None:
raise ValueError(f"Observations randomization parameters are not provided.")
if "on_reset" in self._observations_dr_params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(
self._observations_dr_params["on_reset"].keys()
):
raise ValueError(
f"Please ensure the following observations on_reset randomization parameters are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("observations", "on_reset")] = np.array(
self._observations_dr_params["on_reset"]["distribution_parameters"]
)
if "on_interval" in self._observations_dr_params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
self._observations_dr_params["on_interval"].keys()
):
raise ValueError(
f"Please ensure the following observations on_interval randomization parameters are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("observations", "on_interval")] = np.array(
self._observations_dr_params["on_interval"]["distribution_parameters"]
)
self._observations_counter_buffer = torch.zeros(
(self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"]
)
self._observations_correlated_noise = torch.zeros(
(self._cfg["env"]["numEnvs"], task.num_observations), device=self._config["rl_device"]
)
def _set_up_actions_randomization(self, task):
task.randomize_actions = True
self._actions_dr_params = self._cfg["domain_randomization"]["randomization_params"]["actions"]
if self._actions_dr_params is None:
raise ValueError(f"Actions randomization parameters are not provided.")
if "on_reset" in self._actions_dr_params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(
self._actions_dr_params["on_reset"].keys()
):
raise ValueError(
f"Please ensure the following actions on_reset randomization parameters are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("actions", "on_reset")] = np.array(
self._actions_dr_params["on_reset"]["distribution_parameters"]
)
if "on_interval" in self._actions_dr_params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
self._actions_dr_params["on_interval"].keys()
):
raise ValueError(
f"Please ensure the following actions on_interval randomization parameters are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("actions", "on_interval")] = np.array(
self._actions_dr_params["on_interval"]["distribution_parameters"]
)
self._actions_counter_buffer = torch.zeros(
(self._cfg["env"]["numEnvs"]), dtype=torch.int, device=self._config["rl_device"]
)
self._actions_correlated_noise = torch.zeros(
(self._cfg["env"]["numEnvs"], task.num_actions), device=self._config["rl_device"]
)
def apply_observations_randomization(self, observations, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._observations_counter_buffer[env_ids] = 0
self._observations_counter_buffer += 1
if "on_reset" in self._observations_dr_params.keys():
observations[:] = self._apply_correlated_noise(
buffer_type="observations",
buffer=observations,
reset_ids=env_ids,
operation=self._observations_dr_params["on_reset"]["operation"],
distribution=self._observations_dr_params["on_reset"]["distribution"],
distribution_parameters=self._observations_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._observations_dr_params.keys():
randomize_ids = (
(self._observations_counter_buffer >= self._observations_dr_params["on_interval"]["frequency_interval"])
.nonzero(as_tuple=False)
.squeeze(-1)
)
self._observations_counter_buffer[randomize_ids] = 0
observations[:] = self._apply_uncorrelated_noise(
buffer=observations,
randomize_ids=randomize_ids,
operation=self._observations_dr_params["on_interval"]["operation"],
distribution=self._observations_dr_params["on_interval"]["distribution"],
distribution_parameters=self._observations_dr_params["on_interval"]["distribution_parameters"],
)
return observations
def apply_actions_randomization(self, actions, reset_buf):
env_ids = reset_buf.nonzero(as_tuple=False).squeeze(-1)
self._actions_counter_buffer[env_ids] = 0
self._actions_counter_buffer += 1
if "on_reset" in self._actions_dr_params.keys():
actions[:] = self._apply_correlated_noise(
buffer_type="actions",
buffer=actions,
reset_ids=env_ids,
operation=self._actions_dr_params["on_reset"]["operation"],
distribution=self._actions_dr_params["on_reset"]["distribution"],
distribution_parameters=self._actions_dr_params["on_reset"]["distribution_parameters"],
)
if "on_interval" in self._actions_dr_params.keys():
randomize_ids = (
(self._actions_counter_buffer >= self._actions_dr_params["on_interval"]["frequency_interval"])
.nonzero(as_tuple=False)
.squeeze(-1)
)
self._actions_counter_buffer[randomize_ids] = 0
actions[:] = self._apply_uncorrelated_noise(
buffer=actions,
randomize_ids=randomize_ids,
operation=self._actions_dr_params["on_interval"]["operation"],
distribution=self._actions_dr_params["on_interval"]["distribution"],
distribution_parameters=self._actions_dr_params["on_interval"]["distribution_parameters"],
)
return actions
def _apply_uncorrelated_noise(self, buffer, randomize_ids, operation, distribution, distribution_parameters):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(
mean=distribution_parameters[0],
std=distribution_parameters[1],
size=(len(randomize_ids), buffer.shape[1]),
device=self._config["rl_device"],
)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(
(len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"]
) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand((len(randomize_ids), buffer.shape[1]), device=self._config["rl_device"])
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer[randomize_ids] += noise
elif operation == "scaling":
buffer[randomize_ids] *= noise
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _apply_correlated_noise(self, buffer_type, buffer, reset_ids, operation, distribution, distribution_parameters):
if buffer_type == "observations":
correlated_noise_buffer = self._observations_correlated_noise
elif buffer_type == "actions":
correlated_noise_buffer = self._actions_correlated_noise
if len(reset_ids) > 0:
if distribution == "gaussian" or distribution == "normal":
correlated_noise_buffer[reset_ids] = torch.normal(
mean=distribution_parameters[0],
std=distribution_parameters[1],
size=(len(reset_ids), buffer.shape[1]),
device=self._config["rl_device"],
)
elif distribution == "uniform":
correlated_noise_buffer[reset_ids] = (
distribution_parameters[1] - distribution_parameters[0]
) * torch.rand(
(len(reset_ids), buffer.shape[1]), device=self._config["rl_device"]
) + distribution_parameters[
0
]
elif distribution == "loguniform" or distribution == "log_uniform":
correlated_noise_buffer[reset_ids] = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand((len(reset_ids), buffer.shape[1]), device=self._config["rl_device"])
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
if operation == "additive":
buffer += correlated_noise_buffer
elif operation == "scaling":
buffer *= correlated_noise_buffer
else:
print(f"The specified {operation} operation type is not supported.")
return buffer
def _set_up_simulation_randomization(self, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for simulation {attribute} is not provided.")
if attribute in self.dr.SIMULATION_CONTEXT_ATTRIBUTES:
self.distributions["simulation"][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for simulation {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("simulation", attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"operation": params["on_reset"]["operation"]}
self.distributions["simulation"][attribute]["on_reset"] = self._generate_distribution(
dimension=self.dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_simulation_context(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for simulation {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("simulation", attribute, "on_interval")] = np.array(
params["on_interval"]["distribution_parameters"]
)
kwargs = {"operation": params["on_interval"]["operation"]}
self.distributions["simulation"][attribute]["on_interval"] = self._generate_distribution(
dimension=self.dr.physics_view._simulation_context_initial_values[attribute].shape[0],
view_name="simulation",
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["simulation"][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_simulation_context(**kwargs)
def _set_up_rigid_prim_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for rigid prim view {view_name} {attribute} is not provided.")
if attribute in self.dr.RIGID_PRIM_ATTRIBUTES:
self.distributions["rigid_prim_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"] = self._generate_distribution(
dimension=self.dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_rigid_prim_view(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("rigid_prim_views", view_name, attribute, "on_interval")] = np.array(
params["on_interval"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["rigid_prim_views"][view_name][attribute][
"on_interval"
] = self._generate_distribution(
dimension=self.dr.physics_view._rigid_prim_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["rigid_prim_views"][view_name][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_rigid_prim_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _set_up_articulation_view_randomization(self, view_name, attribute, params):
if params is None:
raise ValueError(f"Randomization parameters for articulation view {view_name} {attribute} is not provided.")
if attribute in self.dr.ARTICULATION_ATTRIBUTES:
self.distributions["articulation_views"][view_name][attribute] = dict()
if "on_reset" in params.keys():
if not set(("operation", "distribution", "distribution_parameters")).issubset(params["on_reset"]):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_reset are provided: "
+ "operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[("articulation_views", view_name, attribute, "on_reset")] = np.array(
params["on_reset"]["distribution_parameters"]
)
kwargs = {"view_name": view_name, "operation": params["on_reset"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_reset"].keys():
kwargs["num_buckets"] = params["on_reset"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute][
"on_reset"
] = self._generate_distribution(
dimension=self.dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_reset"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_reset"]
with self.dr.gate.on_env_reset():
self.dr.physics_view.randomize_articulation_view(**kwargs)
if "on_interval" in params.keys():
if not set(("frequency_interval", "operation", "distribution", "distribution_parameters")).issubset(
params["on_interval"]
):
raise ValueError(
f"Please ensure the following randomization parameters for {view_name} {attribute} on_interval are provided: "
+ "frequency_interval, operation, distribution, distribution_parameters."
)
self.active_domain_randomizations[
("articulation_views", view_name, attribute, "on_interval")
] = np.array(params["on_interval"]["distribution_parameters"])
kwargs = {"view_name": view_name, "operation": params["on_interval"]["operation"]}
if attribute == "material_properties" and "num_buckets" in params["on_interval"].keys():
kwargs["num_buckets"] = params["on_interval"]["num_buckets"]
self.distributions["articulation_views"][view_name][attribute][
"on_interval"
] = self._generate_distribution(
dimension=self.dr.physics_view._articulation_views_initial_values[view_name][attribute].shape[1],
view_name=view_name,
attribute=attribute,
params=params["on_interval"],
)
kwargs[attribute] = self.distributions["articulation_views"][view_name][attribute]["on_interval"]
with self.dr.gate.on_interval(interval=params["on_interval"]["frequency_interval"]):
self.dr.physics_view.randomize_articulation_view(**kwargs)
else:
raise ValueError(f"The attribute {attribute} for {view_name} is invalid for domain randomization.")
def _generate_distribution(self, view_name, attribute, dimension, params):
dist_params = self._sanitize_distribution_parameters(attribute, dimension, params["distribution_parameters"])
if params["distribution"] == "uniform":
return self.rep.distribution.uniform(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "gaussian" or params["distribution"] == "normal":
return self.rep.distribution.normal(tuple(dist_params[0]), tuple(dist_params[1]))
elif params["distribution"] == "loguniform" or params["distribution"] == "log_uniform":
return self.rep.distribution.log_uniform(tuple(dist_params[0]), tuple(dist_params[1]))
else:
raise ValueError(
f"The provided distribution for {view_name} {attribute} is not supported. "
+ "Options: uniform, gaussian/normal, loguniform/log_uniform"
)
def _sanitize_distribution_parameters(self, attribute, dimension, params):
distribution_parameters = np.array(params)
if distribution_parameters.shape == (2,):
# if the user does not provide a set of parameters for each dimension
dist_params = [[distribution_parameters[0]] * dimension, [distribution_parameters[1]] * dimension]
elif distribution_parameters.shape == (2, dimension):
# if the user provides a set of parameters for each dimension in the format [[...], [...]]
dist_params = distribution_parameters.tolist()
elif attribute in ["material_properties", "body_inertias"] and distribution_parameters.shape == (2, 3):
# if the user only provides the parameters for one body in the articulation, assume the same parameters for all other links
dist_params = [
[distribution_parameters[0]] * (dimension // 3),
[distribution_parameters[1]] * (dimension // 3),
]
else:
raise ValueError(
f"The provided distribution_parameters for {view_name} {attribute} is invalid due to incorrect dimensions."
)
return dist_params
def set_dr_distribution_parameters(self, distribution_parameters, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
if distribution_path[0] == "observations":
if len(distribution_parameters) == 2:
self._observations_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(
f"Please provide distribution_parameters for observations {distribution_path[1]} "
+ "in the form of [dist_param_1, dist_param_2]"
)
elif distribution_path[0] == "actions":
if len(distribution_parameters) == 2:
self._actions_dr_params[distribution_path[1]]["distribution_parameters"] = distribution_parameters
else:
raise ValueError(
f"Please provide distribution_parameters for actions {distribution_path[1]} "
+ "in the form of [dist_param_1, dist_param_2]"
)
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][
distribution_path[2]
]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if (
replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform"
or replicator_distribution.node.get_node_type().get_node_type()
== "omni.replicator.core.OgnSampleLogUniform"
):
dimension = len(self.dr.utils.get_distribution_params(replicator_distribution, ["lower"])[0])
dist_params = self._sanitize_distribution_parameters(
distribution_path[-2], dimension, distribution_parameters
)
self.dr.utils.set_distribution_params(
replicator_distribution, {"lower": dist_params[0], "upper": dist_params[1]}
)
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
dimension = len(self.dr.utils.get_distribution_params(replicator_distribution, ["mean"])[0])
dist_params = self._sanitize_distribution_parameters(
distribution_path[-2], dimension, distribution_parameters
)
self.dr.utils.set_distribution_params(
replicator_distribution, {"mean": dist_params[0], "std": dist_params[1]}
)
def get_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
if distribution_path[0] == "observations":
return self._observations_dr_params[distribution_path[1]]["distribution_parameters"]
elif distribution_path[0] == "actions":
return self._actions_dr_params[distribution_path[1]]["distribution_parameters"]
else:
replicator_distribution = self.distributions[distribution_path[0]][distribution_path[1]][
distribution_path[2]
]
if distribution_path[0] == "rigid_prim_views" or distribution_path[0] == "articulation_views":
replicator_distribution = replicator_distribution[distribution_path[3]]
if (
replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleUniform"
or replicator_distribution.node.get_node_type().get_node_type()
== "omni.replicator.core.OgnSampleLogUniform"
):
return self.dr.utils.get_distribution_params(replicator_distribution, ["lower", "upper"])
elif replicator_distribution.node.get_node_type().get_node_type() == "omni.replicator.core.OgnSampleNormal":
return self.dr.utils.get_distribution_params(replicator_distribution, ["mean", "std"])
def get_initial_dr_distribution_parameters(self, *distribution_path):
if distribution_path not in self.active_domain_randomizations.keys():
raise ValueError(
f"Cannot find a valid domain randomization distribution using the path {distribution_path}."
)
return self.active_domain_randomizations[distribution_path].copy()
def _generate_noise(self, distribution, distribution_parameters, size, device):
if distribution == "gaussian" or distribution == "normal":
noise = torch.normal(
mean=distribution_parameters[0], std=distribution_parameters[1], size=size, device=device
)
elif distribution == "uniform":
noise = (distribution_parameters[1] - distribution_parameters[0]) * torch.rand(
size, device=device
) + distribution_parameters[0]
elif distribution == "loguniform" or distribution == "log_uniform":
noise = torch.exp(
(np.log(distribution_parameters[1]) - np.log(distribution_parameters[0]))
* torch.rand(size, device=device)
+ np.log(distribution_parameters[0])
)
else:
print(f"The specified {distribution} distribution is not supported.")
return noise
def randomize_scale_on_startup(self, view, distribution, distribution_parameters, operation, sync_dim_noise=True):
scales = view.get_local_scales()
if sync_dim_noise:
dist_params = np.asarray(
self._sanitize_distribution_parameters(attribute="scale", dimension=1, params=distribution_parameters)
)
noise = (
self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device).repeat(3, 1).T
)
else:
dist_params = np.asarray(
self._sanitize_distribution_parameters(attribute="scale", dimension=3, params=distribution_parameters)
)
noise = torch.zeros((view.count, 3), device=view._device)
for i in range(3):
noise[:, i] = self._generate_noise(distribution, dist_params[:, i], (view.count,), view._device)
if operation == "additive":
scales += noise
elif operation == "scaling":
scales *= noise
elif operation == "direct":
scales = noise
else:
print(f"The specified {operation} operation type is not supported.")
view.set_local_scales(scales=scales)
def randomize_mass_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
masses = view.get_masses()
dist_params = np.asarray(
self._sanitize_distribution_parameters(
attribute=f"{view.name} mass", dimension=1, params=distribution_parameters
)
)
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_masses = view.set_masses
if operation == "additive":
masses += noise
elif operation == "scaling":
masses *= noise
elif operation == "direct":
masses = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_masses(masses)
def randomize_density_on_startup(self, view, distribution, distribution_parameters, operation):
if isinstance(view, omni.isaac.core.prims.RigidPrimView) or isinstance(view, RigidPrimView):
densities = view.get_densities()
dist_params = np.asarray(
self._sanitize_distribution_parameters(
attribute=f"{view.name} density", dimension=1, params=distribution_parameters
)
)
noise = self._generate_noise(distribution, dist_params.squeeze(), (view.count,), view._device)
set_densities = view.set_densities
if operation == "additive":
densities += noise
elif operation == "scaling":
densities *= noise
elif operation == "direct":
densities = noise
else:
print(f"The specified {operation} operation type is not supported.")
set_densities(densities)
| 45,603 | Python | 58.691099 | 136 | 0.555051 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/rlgames/rlgames_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Callable
import numpy as np
import torch
from rl_games.algos_torch import torch_ext
from rl_games.common import env_configurations, vecenv
from rl_games.common.algo_observer import AlgoObserver
class RLGPUAlgoObserver(AlgoObserver):
"""Allows us to log stats from the env along with the algorithm running stats."""
def __init__(self):
pass
def after_init(self, algo):
self.algo = algo
self.mean_scores = torch_ext.AverageMeter(1, self.algo.games_to_track).to(self.algo.ppo_device)
self.ep_infos = []
self.direct_info = {}
self.writer = self.algo.writer
def process_infos(self, infos, done_indices):
assert isinstance(infos, dict), "RLGPUAlgoObserver expects dict info"
if isinstance(infos, dict):
if "episode" in infos:
self.ep_infos.append(infos["episode"])
if len(infos) > 0 and isinstance(infos, dict): # allow direct logging from env
self.direct_info = {}
for k, v in infos.items():
# only log scalars
if (
isinstance(v, float)
or isinstance(v, int)
or (isinstance(v, torch.Tensor) and len(v.shape) == 0)
):
self.direct_info[k] = v
def after_clear_stats(self):
self.mean_scores.clear()
def after_print_stats(self, frame, epoch_num, total_time):
if self.ep_infos:
for key in self.ep_infos[0]:
infotensor = torch.tensor([], device=self.algo.device)
for ep_info in self.ep_infos:
# handle scalar and zero dimensional tensor infos
if not isinstance(ep_info[key], torch.Tensor):
ep_info[key] = torch.Tensor([ep_info[key]])
if len(ep_info[key].shape) == 0:
ep_info[key] = ep_info[key].unsqueeze(0)
infotensor = torch.cat((infotensor, ep_info[key].to(self.algo.device)))
value = torch.mean(infotensor)
self.writer.add_scalar("Episode/" + key, value, epoch_num)
self.ep_infos.clear()
for k, v in self.direct_info.items():
self.writer.add_scalar(f"{k}/frame", v, frame)
self.writer.add_scalar(f"{k}/iter", v, epoch_num)
self.writer.add_scalar(f"{k}/time", v, total_time)
if self.mean_scores.current_size > 0:
mean_scores = self.mean_scores.get_mean()
self.writer.add_scalar("scores/mean", mean_scores, frame)
self.writer.add_scalar("scores/iter", mean_scores, epoch_num)
self.writer.add_scalar("scores/time", mean_scores, total_time)
class RLGPUEnv(vecenv.IVecEnv):
def __init__(self, config_name, num_actors, **kwargs):
self.env = env_configurations.configurations[config_name]["env_creator"](**kwargs)
def step(self, action):
return self.env.step(action)
def reset(self):
return self.env.reset()
def get_number_of_agents(self):
return self.env.get_number_of_agents()
def get_env_info(self):
info = {}
info["action_space"] = self.env.action_space
info["observation_space"] = self.env.observation_space
if self.env.num_states > 0:
info["state_space"] = self.env.state_space
print(info["action_space"], info["observation_space"], info["state_space"])
else:
print(info["action_space"], info["observation_space"])
return info
| 5,201 | Python | 40.951613 | 103 | 0.636801 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/rlgames/rlgames_train_mt.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import datetime
import os
import queue
import threading
import traceback
import hydra
from omegaconf import DictConfig
from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT
from omniisaacgymenvs.envs.vec_env_rlgames_mt import VecEnvRLGamesMT
from omniisaacgymenvs.utils.config_utils.path_utils import retrieve_checkpoint_path
from omniisaacgymenvs.utils.hydra_cfg.hydra_utils import *
from omniisaacgymenvs.utils.hydra_cfg.reformat import omegaconf_to_dict, print_dict
from omniisaacgymenvs.utils.rlgames.rlgames_utils import RLGPUAlgoObserver, RLGPUEnv
from omniisaacgymenvs.utils.task_util import initialize_task
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
class RLGTrainer:
def __init__(self, cfg, cfg_dict):
self.cfg = cfg
self.cfg_dict = cfg_dict
# ensure checkpoints can be specified as relative paths
self._bad_checkpoint = False
if self.cfg.checkpoint:
self.cfg.checkpoint = retrieve_checkpoint_path(self.cfg.checkpoint)
if not self.cfg.checkpoint:
self._bad_checkpoint = True
def launch_rlg_hydra(self, env):
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
self.cfg_dict["task"]["test"] = self.cfg.test
# register the rl-games adapter to use inside the runner
vecenv.register("RLGPU", lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register("rlgpu", {"vecenv_type": "RLGPU", "env_creator": lambda **kwargs: env})
self.rlg_config_dict = omegaconf_to_dict(self.cfg.train)
def run(self):
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
# add evaluation parameters
if self.cfg.evaluation:
player_config = self.rlg_config_dict["params"]["config"].get("player", {})
player_config["evaluation"] = True
player_config["update_checkpoint_freq"] = 100
player_config["dir_to_monitor"] = os.path.dirname(self.cfg.checkpoint)
self.rlg_config_dict["params"]["config"]["player"] = player_config
# load config
runner.load(copy.deepcopy(self.rlg_config_dict))
runner.reset()
# dump config dict
experiment_dir = os.path.join("runs", self.cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.yaml"), "w") as f:
f.write(OmegaConf.to_yaml(self.cfg))
time_str = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if self.cfg.wandb_activate:
# Make sure to install WandB if you actually use this.
import wandb
run_name = f"{self.cfg.wandb_name}_{time_str}"
wandb.init(
project=self.cfg.wandb_project,
group=self.cfg.wandb_group,
entity=self.cfg.wandb_entity,
config=self.cfg_dict,
sync_tensorboard=True,
id=run_name,
resume="allow",
monitor_gym=True,
)
runner.run(
{"train": not self.cfg.test, "play": self.cfg.test, "checkpoint": self.cfg.checkpoint, "sigma": None}
)
if self.cfg.wandb_activate:
wandb.finish()
class Trainer(TrainerMT):
def __init__(self, trainer, env):
self.ppo_thread = None
self.action_queue = None
self.data_queue = None
self.trainer = trainer
self.is_running = False
self.env = env
self.create_task()
self.run()
def create_task(self):
self.trainer.launch_rlg_hydra(self.env)
# task = initialize_task(self.trainer.cfg_dict, self.env, init_sim=False)
self.task = self.env._task
def run(self):
self.is_running = True
self.action_queue = queue.Queue(1)
self.data_queue = queue.Queue(1)
if "mt_timeout" in self.trainer.cfg_dict:
self.env.initialize(self.action_queue, self.data_queue, self.trainer.cfg_dict["mt_timeout"])
else:
self.env.initialize(self.action_queue, self.data_queue)
self.ppo_thread = PPOTrainer(self.env, self.task, self.trainer)
self.ppo_thread.daemon = True
self.ppo_thread.start()
def stop(self):
self.env.stop = True
self.env.clear_queues()
if self.action_queue:
self.action_queue.join()
if self.data_queue:
self.data_queue.join()
if self.ppo_thread:
self.ppo_thread.join()
self.action_queue = None
self.data_queue = None
self.ppo_thread = None
self.is_running = False
class PPOTrainer(threading.Thread):
def __init__(self, env, task, trainer):
super().__init__()
self.env = env
self.task = task
self.trainer = trainer
def run(self):
from omni.isaac.gym.vec_env import TaskStopException
print("starting ppo...")
try:
self.trainer.run()
# trainer finished - send stop signal to main thread
self.env.should_run = False
self.env.send_actions(None, block=False)
except TaskStopException:
print("Task Stopped!")
self.env.should_run = False
self.env.send_actions(None, block=False)
except Exception as e:
# an error occurred on the RL side - signal stop to main thread
print(traceback.format_exc())
self.env.should_run = False
self.env.send_actions(None, block=False)
| 7,402 | Python | 36.770408 | 119 | 0.653337 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/config_utils/sim_config.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import carb
import numpy as np
import omni.usd
import torch
from omni.isaac.core.utils.extensions import enable_extension
from omniisaacgymenvs.utils.config_utils.default_scene_params import *
class SimConfig:
def __init__(self, config: dict = None):
if config is None:
config = dict()
self._config = config
self._cfg = config.get("task", dict())
self._parse_config()
if self._config["test"] == True:
self._sim_params["enable_scene_query_support"] = True
if (
self._config["headless"] == True
and not self._sim_params["enable_cameras"]
and not self._config["enable_livestream"]
):
self._sim_params["use_fabric"] = False
self._sim_params["enable_viewport"] = False
else:
self._sim_params["enable_viewport"] = True
enable_extension("omni.kit.viewport.bundle")
if self._sim_params["enable_cameras"]:
enable_extension("omni.replicator.isaac")
self._sim_params["warp"] = self._config["warp"]
self._sim_params["sim_device"] = self._config["sim_device"]
self._adjust_dt()
if self._sim_params["disable_contact_processing"]:
carb.settings.get_settings().set_bool("/physics/disableContactProcessing", True)
carb.settings.get_settings().set_bool("/physics/physxDispatcher", True)
# Force the background grid off all the time for RL tasks, to avoid the grid showing up in any RL camera task
carb.settings.get_settings().set("/app/viewport/grid/enabled", False)
# Disable framerate limiting which might cause rendering slowdowns
carb.settings.get_settings().set("/app/runLoops/main/rateLimitEnabled", False)
import omni.ui
# Dock floating UIs this might not be needed anymore as extensions dock themselves
# Method for docking a particular window to a location
def dock_window(space, name, location, ratio=0.5):
window = omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, ratio=ratio)
return window
# Acquire the main docking station
main_dockspace = omni.ui.Workspace.get_window("DockSpace")
dock_window(main_dockspace, "Content", omni.ui.DockPosition.BOTTOM, 0.3)
window = omni.ui.Workspace.get_window("Content")
if window:
window.visible = False
def _parse_config(self):
# general sim parameter
self._sim_params = copy.deepcopy(default_sim_params)
self._default_physics_material = copy.deepcopy(default_physics_material)
sim_cfg = self._cfg.get("sim", None)
if sim_cfg is not None:
for opt in sim_cfg.keys():
if opt in self._sim_params:
if opt == "default_physics_material":
for material_opt in sim_cfg[opt]:
self._default_physics_material[material_opt] = sim_cfg[opt][material_opt]
else:
self._sim_params[opt] = sim_cfg[opt]
else:
print("Sim params does not have attribute: ", opt)
self._sim_params["default_physics_material"] = self._default_physics_material
# physx parameters
self._physx_params = copy.deepcopy(default_physx_params)
if sim_cfg is not None and "physx" in sim_cfg:
for opt in sim_cfg["physx"].keys():
if opt in self._physx_params:
self._physx_params[opt] = sim_cfg["physx"][opt]
else:
print("Physx sim params does not have attribute: ", opt)
self._sanitize_device()
def _sanitize_device(self):
if self._sim_params["use_gpu_pipeline"]:
self._physx_params["use_gpu"] = True
# device should be in sync with pipeline
if self._sim_params["use_gpu_pipeline"]:
self._config["sim_device"] = f"cuda:{self._config['device_id']}"
else:
self._config["sim_device"] = "cpu"
# also write to physics params for setting sim device
self._physx_params["sim_device"] = self._config["sim_device"]
print("Pipeline: ", "GPU" if self._sim_params["use_gpu_pipeline"] else "CPU")
print("Pipeline Device: ", self._config["sim_device"])
print("Sim Device: ", "GPU" if self._physx_params["use_gpu"] else "CPU")
def parse_actor_config(self, actor_name):
actor_params = copy.deepcopy(default_actor_options)
if "sim" in self._cfg and actor_name in self._cfg["sim"]:
actor_cfg = self._cfg["sim"][actor_name]
for opt in actor_cfg.keys():
if actor_cfg[opt] != -1 and opt in actor_params:
actor_params[opt] = actor_cfg[opt]
elif opt not in actor_params:
print("Actor params does not have attribute: ", opt)
return actor_params
def _get_actor_config_value(self, actor_name, attribute_name, attribute=None):
actor_params = self.parse_actor_config(actor_name)
if attribute is not None:
if attribute_name not in actor_params:
return attribute.Get()
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
elif actor_params["override_usd_defaults"] and not attribute.IsAuthored():
return self._physx_params[attribute_name]
else:
if actor_params[attribute_name] != -1:
return actor_params[attribute_name]
def _adjust_dt(self):
# re-evaluate rendering dt to simulate physics substeps
physics_dt = self.sim_params["dt"]
rendering_dt = self.sim_params["rendering_dt"]
# by default, rendering dt = physics dt
if rendering_dt <= 0:
rendering_dt = physics_dt
self.task_config["renderingInterval"] = max(round((1/physics_dt) / (1/rendering_dt)), 1)
# we always set rendering dt to be the same as physics dt, stepping is taken care of in VecEnvRLGames
self.sim_params["rendering_dt"] = physics_dt
@property
def sim_params(self):
return self._sim_params
@property
def config(self):
return self._config
@property
def task_config(self):
return self._cfg
@property
def physx_params(self):
return self._physx_params
def get_physics_params(self):
return {**self.sim_params, **self.physx_params}
def _get_physx_collision_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
return physx_collision_api
def _get_physx_rigid_body_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI(prim)
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
return physx_rb_api
def _get_physx_articulation_api(self, prim):
from pxr import PhysxSchema, UsdPhysics
arti_api = PhysxSchema.PhysxArticulationAPI(prim)
if not arti_api:
arti_api = PhysxSchema.PhysxArticulationAPI.Apply(prim)
return arti_api
def set_contact_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
contact_offset = physx_collision_api.GetContactOffsetAttr()
# if not contact_offset:
# contact_offset = physx_collision_api.CreateContactOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "contact_offset", contact_offset)
if value != -1:
contact_offset.Set(value)
def set_rest_offset(self, name, prim, value=None):
physx_collision_api = self._get_physx_collision_api(prim)
rest_offset = physx_collision_api.GetRestOffsetAttr()
# if not rest_offset:
# rest_offset = physx_collision_api.CreateRestOffsetAttr()
if value is None:
value = self._get_actor_config_value(name, "rest_offset", rest_offset)
if value != -1:
rest_offset.Set(value)
def set_position_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_position_iteration_count = physx_rb_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_position_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_position_iteration_count.Set(value)
def set_velocity_iteration(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
solver_velocity_iteration_count = physx_rb_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_velocity_iteration_count", solver_velocity_iteration_count
)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_max_depenetration_velocity(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
max_depenetration_velocity = physx_rb_api.GetMaxDepenetrationVelocityAttr()
if value is None:
value = self._get_actor_config_value(name, "max_depenetration_velocity", max_depenetration_velocity)
if value != -1:
max_depenetration_velocity.Set(value)
def set_sleep_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
sleep_threshold = physx_rb_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_stabilization_threshold(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
stabilization_threshold = physx_rb_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def set_gyroscopic_forces(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
enable_gyroscopic_forces = physx_rb_api.GetEnableGyroscopicForcesAttr()
if value is None:
value = self._get_actor_config_value(name, "enable_gyroscopic_forces", enable_gyroscopic_forces)
if value != -1:
enable_gyroscopic_forces.Set(value)
def set_density(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
density = physx_rb_api.GetDensityAttr()
if value is None:
value = self._get_actor_config_value(name, "density", density)
if value != -1:
density.Set(value)
# auto-compute mass
self.set_mass(prim, 0.0)
def set_mass(self, name, prim, value=None):
physx_rb_api = self._get_physx_rigid_body_api(prim)
mass = physx_rb_api.GetMassAttr()
if value is None:
value = self._get_actor_config_value(name, "mass", mass)
if value != -1:
mass.Set(value)
def retain_acceleration(self, prim):
# retain accelerations if running with more than one substep
physx_rb_api = self._get_physx_rigid_body_api(prim)
if self._sim_params["substeps"] > 1:
physx_rb_api.GetRetainAccelerationsAttr().Set(True)
def make_kinematic(self, name, prim, cfg, value=None):
# make rigid body kinematic (fixed base and no collision)
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
if value is None:
value = self._get_actor_config_value(name, "make_kinematic")
if value == True:
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
if rb:
rb.CreateKinematicEnabledAttr().Set(True)
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
def set_articulation_position_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_position_iteration_count = arti_api.GetSolverPositionIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_position_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_position_iteration_count.Set(value)
def set_articulation_velocity_iteration(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
solver_velocity_iteration_count = arti_api.GetSolverVelocityIterationCountAttr()
if value is None:
value = self._get_actor_config_value(
name, "solver_velocity_iteration_count", solver_position_iteration_count
)
if value != -1:
solver_velocity_iteration_count.Set(value)
def set_articulation_sleep_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
sleep_threshold = arti_api.GetSleepThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "sleep_threshold", sleep_threshold)
if value != -1:
sleep_threshold.Set(value)
def set_articulation_stabilization_threshold(self, name, prim, value=None):
arti_api = self._get_physx_articulation_api(prim)
stabilization_threshold = arti_api.GetStabilizationThresholdAttr()
if value is None:
value = self._get_actor_config_value(name, "stabilization_threshold", stabilization_threshold)
if value != -1:
stabilization_threshold.Set(value)
def apply_rigid_body_settings(self, name, prim, cfg, is_articulation):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
rb_api = UsdPhysics.RigidBodyAPI.Get(stage, prim.GetPath())
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Get(stage, prim.GetPath())
if not physx_rb_api:
physx_rb_api = PhysxSchema.PhysxRigidBodyAPI.Apply(prim)
# if it's a body in an articulation, it's handled at articulation root
if not is_articulation:
self.make_kinematic(name, prim, cfg, cfg["make_kinematic"])
self.set_position_iteration(name, prim, cfg["solver_position_iteration_count"])
self.set_velocity_iteration(name, prim, cfg["solver_velocity_iteration_count"])
self.set_max_depenetration_velocity(name, prim, cfg["max_depenetration_velocity"])
self.set_sleep_threshold(name, prim, cfg["sleep_threshold"])
self.set_stabilization_threshold(name, prim, cfg["stabilization_threshold"])
self.set_gyroscopic_forces(name, prim, cfg["enable_gyroscopic_forces"])
# density and mass
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if mass_api is None:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_attr = mass_api.GetMassAttr()
density_attr = mass_api.GetDensityAttr()
if not mass_attr:
mass_attr = mass_api.CreateMassAttr()
if not density_attr:
density_attr = mass_api.CreateDensityAttr()
if cfg["density"] != -1:
density_attr.Set(cfg["density"])
mass_attr.Set(0.0) # mass is to be computed
elif cfg["override_usd_defaults"] and not density_attr.IsAuthored() and not mass_attr.IsAuthored():
density_attr.Set(self._physx_params["density"])
self.retain_acceleration(prim)
def apply_rigid_shape_settings(self, name, prim, cfg):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
# collision APIs
collision_api = UsdPhysics.CollisionAPI(prim)
if not collision_api:
collision_api = UsdPhysics.CollisionAPI.Apply(prim)
physx_collision_api = PhysxSchema.PhysxCollisionAPI(prim)
if not physx_collision_api:
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(prim)
self.set_contact_offset(name, prim, cfg["contact_offset"])
self.set_rest_offset(name, prim, cfg["rest_offset"])
def apply_articulation_settings(self, name, prim, cfg):
from pxr import PhysxSchema, UsdPhysics
stage = omni.usd.get_context().get_stage()
is_articulation = False
# check if is articulation
prims = [prim]
while len(prims) > 0:
prim_tmp = prims.pop(0)
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, prim_tmp.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, prim_tmp.GetPath())
if articulation_api or physx_articulation_api:
is_articulation = True
children_prims = prim_tmp.GetPrim().GetChildren()
prims = prims + children_prims
# parse through all children prims
prims = [prim]
while len(prims) > 0:
cur_prim = prims.pop(0)
rb = UsdPhysics.RigidBodyAPI.Get(stage, cur_prim.GetPath())
collision_body = UsdPhysics.CollisionAPI.Get(stage, cur_prim.GetPath())
articulation = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
if rb:
self.apply_rigid_body_settings(name, cur_prim, cfg, is_articulation)
if collision_body:
self.apply_rigid_shape_settings(name, cur_prim, cfg)
if articulation:
articulation_api = UsdPhysics.ArticulationRootAPI.Get(stage, cur_prim.GetPath())
physx_articulation_api = PhysxSchema.PhysxArticulationAPI.Get(stage, cur_prim.GetPath())
# enable self collisions
enable_self_collisions = physx_articulation_api.GetEnabledSelfCollisionsAttr()
if cfg["enable_self_collisions"] != -1:
enable_self_collisions.Set(cfg["enable_self_collisions"])
self.set_articulation_position_iteration(name, cur_prim, cfg["solver_position_iteration_count"])
self.set_articulation_velocity_iteration(name, cur_prim, cfg["solver_velocity_iteration_count"])
self.set_articulation_sleep_threshold(name, cur_prim, cfg["sleep_threshold"])
self.set_articulation_stabilization_threshold(name, cur_prim, cfg["stabilization_threshold"])
children_prims = cur_prim.GetPrim().GetChildren()
prims = prims + children_prims
| 20,932 | Python | 42.792887 | 117 | 0.632429 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/config_utils/default_scene_params.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
default_physx_params = {
### Per-scene settings
"use_gpu": False,
"worker_thread_count": 4,
"solver_type": 1, # 0: PGS, 1:TGS
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04, # A threshold of contact separation distance used to decide if a contact
# point will experience friction forces.
"friction_correlation_distance": 0.025, # Contact points can be merged into a single friction anchor if the
# distance between the contacts is smaller than correlation distance.
# disabling these can be useful for debugging
"enable_sleeping": True,
"enable_stabilization": True,
# GPU buffers
"gpu_max_rigid_contact_count": 512 * 1024,
"gpu_max_rigid_patch_count": 80 * 1024,
"gpu_found_lost_pairs_capacity": 1024,
"gpu_found_lost_aggregate_pairs_capacity": 1024,
"gpu_total_aggregate_pairs_capacity": 1024,
"gpu_max_soft_body_contacts": 1024 * 1024,
"gpu_max_particle_contacts": 1024 * 1024,
"gpu_heap_capacity": 64 * 1024 * 1024,
"gpu_temp_buffer_capacity": 16 * 1024 * 1024,
"gpu_max_num_partitions": 8,
"gpu_collision_stack_size": 64 * 1024 * 1024,
### Per-actor settings ( can override in actor_options )
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may go to sleep.
# Allowed range [0, max_float).
"stabilization_threshold": 0.0, # Mass-normalized kinetic energy threshold below which an actor may
# participate in stabilization. Allowed range [0, max_float).
### Per-body settings ( can override in actor_options )
"enable_gyroscopic_forces": False,
"density": 1000.0, # density to be used for bodies that do not specify mass or density
"max_depenetration_velocity": 100.0,
### Per-shape settings ( can override in actor_options )
"contact_offset": 0.02,
"rest_offset": 0.001,
}
default_physics_material = {"static_friction": 1.0, "dynamic_friction": 1.0, "restitution": 0.0}
default_sim_params = {
"gravity": [0.0, 0.0, -9.81],
"dt": 1.0 / 60.0,
"rendering_dt": -1.0, # we don't want to override this if it's set from cfg
"substeps": 1,
"use_gpu_pipeline": True,
"add_ground_plane": True,
"add_distant_light": True,
"use_fabric": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"disable_contact_processing": False,
"default_physics_material": default_physics_material,
}
default_actor_options = {
# -1 means use authored value from USD or default values from default_sim_params if not explicitly authored in USD.
# If an attribute value is not explicitly authored in USD, add one with the value given here,
# which overrides the USD default.
"override_usd_defaults": False,
"make_kinematic": -1,
"enable_self_collisions": -1,
"enable_gyroscopic_forces": -1,
"solver_position_iteration_count": -1,
"solver_velocity_iteration_count": -1,
"sleep_threshold": -1,
"stabilization_threshold": -1,
"max_depenetration_velocity": -1,
"density": -1,
"mass": -1,
"contact_offset": -1,
"rest_offset": -1,
}
| 4,783 | Python | 44.132075 | 119 | 0.703951 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/config_utils/path_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import carb
from hydra.utils import to_absolute_path
def is_valid_local_file(path):
return os.path.isfile(path)
def is_valid_ov_file(path):
import omni.client
result, entry = omni.client.stat(path)
return result == omni.client.Result.OK
def download_ov_file(source_path, target_path):
import omni.client
result = omni.client.copy(source_path, target_path)
if result == omni.client.Result.OK:
return True
return False
def break_ov_path(path):
import omni.client
return omni.client.break_url(path)
def retrieve_checkpoint_path(path):
# check if it's a local path
if is_valid_local_file(path):
return to_absolute_path(path)
# check if it's an OV path
elif is_valid_ov_file(path):
ov_path = break_ov_path(path)
file_name = os.path.basename(ov_path.path)
target_path = f"checkpoints/{file_name}"
copy_to_local = download_ov_file(path, target_path)
return to_absolute_path(target_path)
else:
carb.log_error(f"Invalid checkpoint path: {path}. Does the file exist?")
return None
def get_experience(headless, enable_livestream, enable_viewport, kit_app):
if kit_app == '':
if enable_viewport:
experience = os.path.abspath(os.path.join('../apps', 'omni.isaac.sim.python.gym.camera.kit'))
else:
experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.gym.kit'
if headless and not enable_livestream:
experience = f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.gym.headless.kit'
else:
experience = kit_app
return experience
| 3,226 | Python | 34.855555 | 105 | 0.713887 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/hydra_cfg/hydra_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hydra
from omegaconf import DictConfig, OmegaConf
## OmegaConf & Hydra Config
# Resolvers used in hydra configs (see https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#resolvers)
if not OmegaConf.has_resolver("eq"):
OmegaConf.register_new_resolver("eq", lambda x, y: x.lower() == y.lower())
if not OmegaConf.has_resolver("contains"):
OmegaConf.register_new_resolver("contains", lambda x, y: x.lower() in y.lower())
if not OmegaConf.has_resolver("if"):
OmegaConf.register_new_resolver("if", lambda pred, a, b: a if pred else b)
# allows us to resolve default arguments which are copied in multiple places in the config. used primarily for
# num_ensv
if not OmegaConf.has_resolver("resolve_default"):
OmegaConf.register_new_resolver("resolve_default", lambda default, arg: default if arg == "" else arg)
| 2,394 | Python | 51.065216 | 110 | 0.767753 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/terrain_utils/terrain_utils.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from math import sqrt
import numpy as np
from numpy.random import choice
from omni.isaac.core.prims import XFormPrim
from pxr import Gf, PhysxSchema, Sdf, UsdPhysics
from scipy import interpolate
def random_uniform_terrain(
terrain,
min_height,
max_height,
step=1,
downsampled_scale=None,
):
"""
Generate a uniform noise terrain
Parameters
terrain (SubTerrain): the terrain
min_height (float): the minimum height of the terrain [meters]
max_height (float): the maximum height of the terrain [meters]
step (float): minimum height change between two points [meters]
downsampled_scale (float): distance between two randomly sampled points ( musty be larger or equal to terrain.horizontal_scale)
"""
if downsampled_scale is None:
downsampled_scale = terrain.horizontal_scale
# switch parameters to discrete units
min_height = int(min_height / terrain.vertical_scale)
max_height = int(max_height / terrain.vertical_scale)
step = int(step / terrain.vertical_scale)
heights_range = np.arange(min_height, max_height + step, step)
height_field_downsampled = np.random.choice(
heights_range,
(
int(terrain.width * terrain.horizontal_scale / downsampled_scale),
int(terrain.length * terrain.horizontal_scale / downsampled_scale),
),
)
x = np.linspace(0, terrain.width * terrain.horizontal_scale, height_field_downsampled.shape[0])
y = np.linspace(0, terrain.length * terrain.horizontal_scale, height_field_downsampled.shape[1])
f = interpolate.RectBivariateSpline(y, x, height_field_downsampled)
x_upsampled = np.linspace(0, terrain.width * terrain.horizontal_scale, terrain.width)
y_upsampled = np.linspace(0, terrain.length * terrain.horizontal_scale, terrain.length)
z_upsampled = np.rint(f(y_upsampled, x_upsampled))
terrain.height_field_raw += z_upsampled.astype(np.int16)
return terrain
def sloped_terrain(terrain, slope=1):
"""
Generate a sloped terrain
Parameters:
terrain (SubTerrain): the terrain
slope (int): positive or negative slope
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * terrain.width)
terrain.height_field_raw[:, np.arange(terrain.length)] += (max_height * xx / terrain.width).astype(
terrain.height_field_raw.dtype
)
return terrain
def pyramid_sloped_terrain(terrain, slope=1, platform_size=1.0):
"""
Generate a sloped terrain
Parameters:
terrain (terrain): the terrain
slope (int): positive or negative slope
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
center_x = int(terrain.width / 2)
center_y = int(terrain.length / 2)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = (center_x - np.abs(center_x - xx)) / center_x
yy = (center_y - np.abs(center_y - yy)) / center_y
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
max_height = int(slope * (terrain.horizontal_scale / terrain.vertical_scale) * (terrain.width / 2))
terrain.height_field_raw += (max_height * xx * yy).astype(terrain.height_field_raw.dtype)
platform_size = int(platform_size / terrain.horizontal_scale / 2)
x1 = terrain.width // 2 - platform_size
x2 = terrain.width // 2 + platform_size
y1 = terrain.length // 2 - platform_size
y2 = terrain.length // 2 + platform_size
min_h = min(terrain.height_field_raw[x1, y1], 0)
max_h = max(terrain.height_field_raw[x1, y1], 0)
terrain.height_field_raw = np.clip(terrain.height_field_raw, min_h, max_h)
return terrain
def discrete_obstacles_terrain(terrain, max_height, min_size, max_size, num_rects, platform_size=1.0):
"""
Generate a terrain with gaps
Parameters:
terrain (terrain): the terrain
max_height (float): maximum height of the obstacles (range=[-max, -max/2, max/2, max]) [meters]
min_size (float): minimum size of a rectangle obstacle [meters]
max_size (float): maximum size of a rectangle obstacle [meters]
num_rects (int): number of randomly generated obstacles
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
max_height = int(max_height / terrain.vertical_scale)
min_size = int(min_size / terrain.horizontal_scale)
max_size = int(max_size / terrain.horizontal_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
(i, j) = terrain.height_field_raw.shape
height_range = [-max_height, -max_height // 2, max_height // 2, max_height]
width_range = range(min_size, max_size, 4)
length_range = range(min_size, max_size, 4)
for _ in range(num_rects):
width = np.random.choice(width_range)
length = np.random.choice(length_range)
start_i = np.random.choice(range(0, i - width, 4))
start_j = np.random.choice(range(0, j - length, 4))
terrain.height_field_raw[start_i : start_i + width, start_j : start_j + length] = np.random.choice(height_range)
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def wave_terrain(terrain, num_waves=1, amplitude=1.0):
"""
Generate a wavy terrain
Parameters:
terrain (terrain): the terrain
num_waves (int): number of sine waves across the terrain length
Returns:
terrain (SubTerrain): update terrain
"""
amplitude = int(0.5 * amplitude / terrain.vertical_scale)
if num_waves > 0:
div = terrain.length / (num_waves * np.pi * 2)
x = np.arange(0, terrain.width)
y = np.arange(0, terrain.length)
xx, yy = np.meshgrid(x, y, sparse=True)
xx = xx.reshape(terrain.width, 1)
yy = yy.reshape(1, terrain.length)
terrain.height_field_raw += (amplitude * np.cos(yy / div) + amplitude * np.sin(xx / div)).astype(
terrain.height_field_raw.dtype
)
return terrain
def stairs_terrain(terrain, step_width, step_height):
"""
Generate a stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the height of the step [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
num_steps = terrain.width // step_width
height = step_height
for i in range(num_steps):
terrain.height_field_raw[i * step_width : (i + 1) * step_width, :] += height
height += step_height
return terrain
def pyramid_stairs_terrain(terrain, step_width, step_height, platform_size=1.0):
"""
Generate stairs
Parameters:
terrain (terrain): the terrain
step_width (float): the width of the step [meters]
step_height (float): the step_height [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
step_width = int(step_width / terrain.horizontal_scale)
step_height = int(step_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height = 0
start_x = 0
stop_x = terrain.width
start_y = 0
stop_y = terrain.length
while (stop_x - start_x) > platform_size and (stop_y - start_y) > platform_size:
start_x += step_width
stop_x -= step_width
start_y += step_width
stop_y -= step_width
height += step_height
terrain.height_field_raw[start_x:stop_x, start_y:stop_y] = height
return terrain
def stepping_stones_terrain(terrain, stone_size, stone_distance, max_height, platform_size=1.0, depth=-10):
"""
Generate a stepping stones terrain
Parameters:
terrain (terrain): the terrain
stone_size (float): horizontal size of the stepping stones [meters]
stone_distance (float): distance between stones (i.e size of the holes) [meters]
max_height (float): maximum height of the stones (positive and negative) [meters]
platform_size (float): size of the flat platform at the center of the terrain [meters]
depth (float): depth of the holes (default=-10.) [meters]
Returns:
terrain (SubTerrain): update terrain
"""
# switch parameters to discrete units
stone_size = int(stone_size / terrain.horizontal_scale)
stone_distance = int(stone_distance / terrain.horizontal_scale)
max_height = int(max_height / terrain.vertical_scale)
platform_size = int(platform_size / terrain.horizontal_scale)
height_range = np.arange(-max_height - 1, max_height, step=1)
start_x = 0
start_y = 0
terrain.height_field_raw[:, :] = int(depth / terrain.vertical_scale)
if terrain.length >= terrain.width:
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
start_x = np.random.randint(0, stone_size)
# fill first hole
stop_x = max(0, start_x - stone_distance)
terrain.height_field_raw[0:stop_x, start_y:stop_y] = np.random.choice(height_range)
# fill row
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
terrain.height_field_raw[start_x:stop_x, start_y:stop_y] = np.random.choice(height_range)
start_x += stone_size + stone_distance
start_y += stone_size + stone_distance
elif terrain.width > terrain.length:
while start_x < terrain.width:
stop_x = min(terrain.width, start_x + stone_size)
start_y = np.random.randint(0, stone_size)
# fill first hole
stop_y = max(0, start_y - stone_distance)
terrain.height_field_raw[start_x:stop_x, 0:stop_y] = np.random.choice(height_range)
# fill column
while start_y < terrain.length:
stop_y = min(terrain.length, start_y + stone_size)
terrain.height_field_raw[start_x:stop_x, start_y:stop_y] = np.random.choice(height_range)
start_y += stone_size + stone_distance
start_x += stone_size + stone_distance
x1 = (terrain.width - platform_size) // 2
x2 = (terrain.width + platform_size) // 2
y1 = (terrain.length - platform_size) // 2
y2 = (terrain.length + platform_size) // 2
terrain.height_field_raw[x1:x2, y1:y2] = 0
return terrain
def convert_heightfield_to_trimesh(height_field_raw, horizontal_scale, vertical_scale, slope_threshold=None):
"""
Convert a heightfield array to a triangle mesh represented by vertices and triangles.
Optionally, corrects vertical surfaces above the provide slope threshold:
If (y2-y1)/(x2-x1) > slope_threshold -> Move A to A' (set x1 = x2). Do this for all directions.
B(x2,y2)
/|
/ |
/ |
(x1,y1)A---A'(x2',y1)
Parameters:
height_field_raw (np.array): input heightfield
horizontal_scale (float): horizontal scale of the heightfield [meters]
vertical_scale (float): vertical scale of the heightfield [meters]
slope_threshold (float): the slope threshold above which surfaces are made vertical. If None no correction is applied (default: None)
Returns:
vertices (np.array(float)): array of shape (num_vertices, 3). Each row represents the location of each vertex [meters]
triangles (np.array(int)): array of shape (num_triangles, 3). Each row represents the indices of the 3 vertices connected by this triangle.
"""
hf = height_field_raw
num_rows = hf.shape[0]
num_cols = hf.shape[1]
y = np.linspace(0, (num_cols - 1) * horizontal_scale, num_cols)
x = np.linspace(0, (num_rows - 1) * horizontal_scale, num_rows)
yy, xx = np.meshgrid(y, x)
if slope_threshold is not None:
slope_threshold *= horizontal_scale / vertical_scale
move_x = np.zeros((num_rows, num_cols))
move_y = np.zeros((num_rows, num_cols))
move_corners = np.zeros((num_rows, num_cols))
move_x[: num_rows - 1, :] += hf[1:num_rows, :] - hf[: num_rows - 1, :] > slope_threshold
move_x[1:num_rows, :] -= hf[: num_rows - 1, :] - hf[1:num_rows, :] > slope_threshold
move_y[:, : num_cols - 1] += hf[:, 1:num_cols] - hf[:, : num_cols - 1] > slope_threshold
move_y[:, 1:num_cols] -= hf[:, : num_cols - 1] - hf[:, 1:num_cols] > slope_threshold
move_corners[: num_rows - 1, : num_cols - 1] += (
hf[1:num_rows, 1:num_cols] - hf[: num_rows - 1, : num_cols - 1] > slope_threshold
)
move_corners[1:num_rows, 1:num_cols] -= (
hf[: num_rows - 1, : num_cols - 1] - hf[1:num_rows, 1:num_cols] > slope_threshold
)
xx += (move_x + move_corners * (move_x == 0)) * horizontal_scale
yy += (move_y + move_corners * (move_y == 0)) * horizontal_scale
# create triangle mesh vertices and triangles from the heightfield grid
vertices = np.zeros((num_rows * num_cols, 3), dtype=np.float32)
vertices[:, 0] = xx.flatten()
vertices[:, 1] = yy.flatten()
vertices[:, 2] = hf.flatten() * vertical_scale
triangles = -np.ones((2 * (num_rows - 1) * (num_cols - 1), 3), dtype=np.uint32)
for i in range(num_rows - 1):
ind0 = np.arange(0, num_cols - 1) + i * num_cols
ind1 = ind0 + 1
ind2 = ind0 + num_cols
ind3 = ind2 + 1
start = 2 * i * (num_cols - 1)
stop = start + 2 * (num_cols - 1)
triangles[start:stop:2, 0] = ind0
triangles[start:stop:2, 1] = ind3
triangles[start:stop:2, 2] = ind1
triangles[start + 1 : stop : 2, 0] = ind0
triangles[start + 1 : stop : 2, 1] = ind2
triangles[start + 1 : stop : 2, 2] = ind3
return vertices, triangles
def add_terrain_to_stage(stage, vertices, triangles, position=None, orientation=None):
num_faces = triangles.shape[0]
terrain_mesh = stage.DefinePrim("/World/terrain", "Mesh")
terrain_mesh.GetAttribute("points").Set(vertices)
terrain_mesh.GetAttribute("faceVertexIndices").Set(triangles.flatten())
terrain_mesh.GetAttribute("faceVertexCounts").Set(np.asarray([3] * num_faces))
terrain = XFormPrim(prim_path="/World/terrain", name="terrain", position=position, orientation=orientation)
UsdPhysics.CollisionAPI.Apply(terrain.prim)
# collision_api = UsdPhysics.MeshCollisionAPI.Apply(terrain.prim)
# collision_api.CreateApproximationAttr().Set("meshSimplification")
physx_collision_api = PhysxSchema.PhysxCollisionAPI.Apply(terrain.prim)
physx_collision_api.GetContactOffsetAttr().Set(0.02)
physx_collision_api.GetRestOffsetAttr().Set(0.00)
class SubTerrain:
def __init__(self, terrain_name="terrain", width=256, length=256, vertical_scale=1.0, horizontal_scale=1.0):
self.terrain_name = terrain_name
self.vertical_scale = vertical_scale
self.horizontal_scale = horizontal_scale
self.width = width
self.length = length
self.height_field_raw = np.zeros((self.width, self.length), dtype=np.int16)
| 17,645 | Python | 41.215311 | 147 | 0.649306 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/terrain_utils/create_terrain_demo.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(SCRIPT_DIR)
import omni
from omni.isaac.kit import SimulationApp
import numpy as np
import torch
simulation_app = SimulationApp({"headless": False})
from abc import abstractmethod
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.prims import RigidPrimView, RigidPrim, XFormPrim
from omni.isaac.core import World
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import define_prim, get_prim_at_path
from omni.isaac.core.utils.nucleus import find_nucleus_server
from omni.isaac.core.utils.stage import add_reference_to_stage, get_current_stage
from omni.isaac.core.materials import PreviewSurface
from omni.isaac.cloner import GridCloner
from pxr import UsdPhysics, UsdLux, UsdShade, Sdf, Gf, UsdGeom, PhysxSchema
from terrain_utils import *
class TerrainCreation(BaseTask):
def __init__(self, name, num_envs, num_per_row, env_spacing, config=None, offset=None,) -> None:
BaseTask.__init__(self, name=name, offset=offset)
self._num_envs = num_envs
self._num_per_row = num_per_row
self._env_spacing = env_spacing
self._device = "cpu"
self._cloner = GridCloner(self._env_spacing, self._num_per_row)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
@property
def default_base_env_path(self):
return "/World/envs"
@property
def default_zero_env_path(self):
return f"{self.default_base_env_path}/env_0"
def set_up_scene(self, scene) -> None:
self._stage = get_current_stage()
distantLight = UsdLux.DistantLight.Define(self._stage, Sdf.Path("/World/DistantLight"))
distantLight.CreateIntensityAttr(2000)
self.get_terrain()
self.get_ball()
super().set_up_scene(scene)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
print(f"cloning {self._num_envs} environments...")
self._env_pos = self._cloner.clone(
source_prim_path="/World/envs/env_0",
prim_paths=prim_paths
)
return
def get_terrain(self):
# create all available terrain types
num_terains = 8
terrain_width = 12.
terrain_length = 12.
horizontal_scale = 0.25 # [m]
vertical_scale = 0.005 # [m]
num_rows = int(terrain_width/horizontal_scale)
num_cols = int(terrain_length/horizontal_scale)
heightfield = np.zeros((num_terains*num_rows, num_cols), dtype=np.int16)
def new_sub_terrain():
return SubTerrain(width=num_rows, length=num_cols, vertical_scale=vertical_scale, horizontal_scale=horizontal_scale)
heightfield[0:num_rows, :] = random_uniform_terrain(new_sub_terrain(), min_height=-0.2, max_height=0.2, step=0.2, downsampled_scale=0.5).height_field_raw
heightfield[num_rows:2*num_rows, :] = sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[2*num_rows:3*num_rows, :] = pyramid_sloped_terrain(new_sub_terrain(), slope=-0.5).height_field_raw
heightfield[3*num_rows:4*num_rows, :] = discrete_obstacles_terrain(new_sub_terrain(), max_height=0.5, min_size=1., max_size=5., num_rects=20).height_field_raw
heightfield[4*num_rows:5*num_rows, :] = wave_terrain(new_sub_terrain(), num_waves=2., amplitude=1.).height_field_raw
heightfield[5*num_rows:6*num_rows, :] = stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[6*num_rows:7*num_rows, :] = pyramid_stairs_terrain(new_sub_terrain(), step_width=0.75, step_height=-0.5).height_field_raw
heightfield[7*num_rows:8*num_rows, :] = stepping_stones_terrain(new_sub_terrain(), stone_size=1.,
stone_distance=1., max_height=0.5, platform_size=0.).height_field_raw
vertices, triangles = convert_heightfield_to_trimesh(heightfield, horizontal_scale=horizontal_scale, vertical_scale=vertical_scale, slope_threshold=1.5)
position = np.array([-6.0, 48.0, 0])
orientation = np.array([0.70711, 0.0, 0.0, -0.70711])
add_terrain_to_stage(stage=self._stage, vertices=vertices, triangles=triangles, position=position, orientation=orientation)
def get_ball(self):
ball = DynamicSphere(prim_path=self.default_zero_env_path + "/ball",
name="ball",
translation=np.array([0.0, 0.0, 1.0]),
mass=0.5,
radius=0.2,)
def post_reset(self):
for i in range(self._num_envs):
ball_prim = self._stage.GetPrimAtPath(f"{self.default_base_env_path}/env_{i}/ball")
color = 0.5 + 0.5 * np.random.random(3)
visual_material = PreviewSurface(prim_path=f"{self.default_base_env_path}/env_{i}/ball/Looks/visual_material", color=color)
binding_api = UsdShade.MaterialBindingAPI(ball_prim)
binding_api.Bind(visual_material.material, bindingStrength=UsdShade.Tokens.strongerThanDescendants)
def get_observations(self):
pass
def calculate_metrics(self) -> None:
pass
def is_done(self) -> None:
pass
if __name__ == "__main__":
world = World(
stage_units_in_meters=1.0,
rendering_dt=1.0/60.0,
backend="torch",
device="cpu",
)
num_envs = 800
num_per_row = 80
env_spacing = 0.56*2
terrain_creation_task = TerrainCreation(name="TerrainCreation",
num_envs=num_envs,
num_per_row=num_per_row,
env_spacing=env_spacing,
)
world.add_task(terrain_creation_task)
world.reset()
while simulation_app.is_running():
if world.is_playing():
if world.current_time_step_index == 0:
world.reset(soft=True)
world.step(render=True)
else:
world.step(render=True)
simulation_app.close() | 7,869 | Python | 43.213483 | 166 | 0.650654 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_assets.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.client
import omni.usd
from pxr import Sdf, UsdGeom
def update_reference(source_prim_path, source_reference_path, target_reference_path):
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
prim_spec = stage.GetRootLayer().GetPrimAtPath(prim.GetPath())
reference_list = prim_spec.referenceList
refs = reference_list.GetAddedOrExplicitItems()
if len(refs) > 0:
for ref in refs:
if ref.assetPath == source_reference_path:
prim.GetReferences().RemoveReference(ref)
prim.GetReferences().AddReference(assetPath=target_reference_path, primPath=prim.GetPath())
prims = prims + prim.GetChildren()
def create_parent_xforms(asset_usd_path, source_prim_path, save_as_path=None):
"""Adds a new UsdGeom.Xform prim for each Mesh/Geometry prim under source_prim_path.
Moves material assignment to new parent prim if any exists on the Mesh/Geometry prim.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
"""
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
edits = Sdf.BatchNamespaceEdit()
while len(prims) > 0:
prim = prims.pop(0)
print(prim)
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
new_xform = UsdGeom.Xform.Define(stage, str(prim.GetPath()) + "_xform")
print(prim, new_xform)
edits.Add(Sdf.NamespaceEdit.Reparent(prim.GetPath(), new_xform.GetPath(), 0))
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
stage.GetRootLayer().Apply(edits)
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
def convert_asset_instanceable(asset_usd_path, source_prim_path, save_as_path=None, create_xforms=True):
"""Makes all mesh/geometry prims instanceable.
Can optionally add UsdGeom.Xform prim as parent for all mesh/geometry prims.
Makes a copy of the asset USD file, which will be used for referencing.
Updates asset file to convert all parent prims of mesh/geometry prims to reference cloned USD file.
Args:
asset_usd_path (str): USD file path for asset
source_prim_path (str): USD path of root prim
save_as_path (str): USD file path for modified USD stage. Defaults to None, will save in same file.
create_xforms (bool): Whether to add new UsdGeom.Xform prims to mesh/geometry prims.
"""
if create_xforms:
create_parent_xforms(asset_usd_path, source_prim_path, save_as_path)
asset_usd_path = save_as_path
instance_usd_path = ".".join(asset_usd_path.split(".")[:-1]) + "_meshes.usd"
omni.client.copy(asset_usd_path, instance_usd_path)
omni.usd.get_context().open_stage(asset_usd_path)
stage = omni.usd.get_context().get_stage()
prims = [stage.GetPrimAtPath(source_prim_path)]
while len(prims) > 0:
prim = prims.pop(0)
if prim:
if prim.GetTypeName() in ["Mesh", "Capsule", "Sphere", "Box"]:
parent_prim = prim.GetParent()
if parent_prim and not parent_prim.IsInstance():
parent_prim.GetReferences().AddReference(
assetPath=instance_usd_path, primPath=str(parent_prim.GetPath())
)
parent_prim.SetInstanceable(True)
continue
children_prims = prim.GetChildren()
prims = prims + children_prims
if save_as_path is None:
omni.usd.get_context().save_stage()
else:
omni.usd.get_context().save_as_stage(save_as_path)
| 5,627 | Python | 42.627907 | 111 | 0.67727 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_dofbot.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.usd
import omni.client
from pxr import UsdGeom, Sdf, UsdPhysics, UsdShade
# Note: this script should be executed in Isaac Sim `Script Editor` window
def create_dofbot(asset_usd_path, dofbot_usd_path):
# Duplicate dofbot.usd file
omni.client.copy(asset_usd_path, dofbot_usd_path)
def create_dofbot_mesh(asset_usd_path, dofbot_mesh_usd_path):
# Create dofbot_mesh.usd file
omni.client.copy(asset_usd_path, dofbot_mesh_usd_path)
omni.usd.get_context().open_stage(dofbot_mesh_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
# Reparent joints in link5
for d in ['Left', 'Right']:
# Reparent finger 03 joint
new_parent_path = f'/arm/link5/Finger_{d}_03'
old_parent_path = f'{new_parent_path}/Finger_{d}_03'
joint_path = f'{old_parent_path}/Finger_{d}_03_RevoluteJoint'
edits.Add(Sdf.NamespaceEdit.Reparent(joint_path, new_parent_path, 0))
# Reparent finger 02 joint
new_parent_path = f'/arm/link5/Finger_{d}_02'
old_parent_path = f'{new_parent_path}/Finger_{d}_02'
joint_path = f'{old_parent_path}/Finger_{d}_02_RevoluteJoint'
edits.Add(Sdf.NamespaceEdit.Reparent(joint_path, new_parent_path, 0))
# Create parent Xforms
# Joint 1 & 2 & 3
reparent_tasks = [
# base_link
['/arm/base_link/visuals', 'visuals_xform'],
['/arm/base_link/PCB_01', 'visuals_xform'],
['/arm/base_link/Base01_01', 'visuals_xform'],
['/arm/base_link/Antennas_01', 'visuals_xform'],
['/arm/base_link/collisions', 'collisions_xform'],
# link1
['/arm/link1/visuals', 'visuals_xform'],
['/arm/link1/collisions', 'collisions_xform'],
# link2
['/arm/link2/visuals', 'visuals_xform'],
['/arm/link2/collisions', 'collisions_xform'],
# link3
['/arm/link3/visuals', 'visuals_xform'],
['/arm/link3/collisions', 'collisions_xform'],
# link4
['/arm/link4/Wrist_Lift', 'geoms_xform'],
['/arm/link4/Camera', 'geoms_xform'],
# link5
['/arm/link5/Wrist_Twist/Wrist_Twist', 'geoms_xform'],
['/arm/link5/Finger_Left_01/Finger_Left_01', 'geoms_xform'],
['/arm/link5/Finger_Right_01/Finger_Right_01', 'geoms_xform'],
['/arm/link5/Finger_Left_03/Finger_Left_03', 'geoms_xform'],
['/arm/link5/Finger_Right_03/Finger_Right_03', 'geoms_xform'],
['/arm/link5/Finger_Left_02/Finger_Left_02', 'geoms_xform'],
['/arm/link5/Finger_Right_02/Finger_Right_02', 'geoms_xform'],
] # [prim_path, parent_xform_name]
for task in reparent_tasks:
prim_path, parent_xform_name = task
old_parent_path = '/'.join(prim_path.split('/')[:-1])
new_parent_path = f'{old_parent_path}/{parent_xform_name}'
UsdGeom.Xform.Define(stage, new_parent_path)
edits.Add(Sdf.NamespaceEdit.Reparent(prim_path, new_parent_path, -1))
# Delete redundant materials
edits.Add(Sdf.NamespaceEdit.Remove('/arm/link5/Looks'))
stage.GetRootLayer().Apply(edits)
# Fix link5 joints
for d in ['Left', 'Right']:
# finger 01 revolute joints
joint_path = f'/arm/link5/Finger_{d}_01/Finger_{d}_01_RevoluteJoint'
joint = UsdPhysics.Joint.Get(stage, joint_path)
joint.GetBody1Rel().SetTargets(['/arm/link5/Wrist_Twist/geoms_xform/Wrist_Twist'])
# finger 03 revolute joints
joint_path = f'/arm/link5/Finger_{d}_03/Finger_{d}_03_RevoluteJoint'
joint = UsdPhysics.Joint.Get(stage, joint_path)
joint.GetBody0Rel().SetTargets([f'/arm/link5/Finger_{d}_03'])
joint.GetBody1Rel().SetTargets([f'/arm/link5/Finger_{d}_01/geoms_xform/Finger_{d}_01'])
# finger 02 spherical joints
joint_path = f'/arm/link5/Finger_{d}_02/Finger_{d}_02_SphericalJoint'
joint = UsdPhysics.Joint.Get(stage, joint_path)
joint.GetBody0Rel().SetTargets([f'/arm/link5/Finger_{d}_03/geoms_xform/Finger_{d}_03'])
joint.GetBody1Rel().SetTargets([f'/arm/link5/Finger_{d}_02/geoms_xform/Finger_{d}_02'])
# finger 02 revolute joints
joint_path = f'/arm/link5/Finger_{d}_02/Finger_{d}_02_RevoluteJoint'
joint = UsdPhysics.Joint.Get(stage, joint_path)
joint.GetBody0Rel().SetTargets([f'/arm/link5/Finger_{d}_02/geoms_xform/Finger_{d}_02'])
joint.GetBody1Rel().SetTargets(['/arm/link5/Wrist_Twist/geoms_xform/Wrist_Twist'])
for prim in stage.Traverse():
if prim.GetTypeName() == 'Xform':
# Copy Looks folder into visuals_xform and geoms_xform
path = str(prim.GetPath())
if path.endswith('visuals_xform') or path.endswith('geoms_xform'):
omni.usd.duplicate_prim(stage, '/arm/Looks', f'{path}/Looks')
ref = stage.GetPrimAtPath(f'{path}/Looks').GetReferences()
ref.ClearReferences()
ref.AddReference('./dofbot_materials.usd')
pass
elif prim.GetTypeName() == 'GeomSubset':
# Bind GeomSubset to local materials
path = str(prim.GetPath())
parent_xform_path = path.split('/')
while parent_xform_path[-1] != 'visuals_xform' and parent_xform_path[-1] != 'geoms_xform':
parent_xform_path.pop()
parent_xform_path = '/'.join(parent_xform_path)
name = path.split('/')[-1]
material = UsdShade.Material.Get(stage, f'{parent_xform_path}/Looks/{name}')
UsdShade.MaterialBindingAPI(prim).Bind(material) # , UsdShade.Tokens.strongerThanDescendants)
edits = Sdf.BatchNamespaceEdit()
edits.Add(Sdf.NamespaceEdit.Remove('/arm/Looks'))
stage.GetRootLayer().Apply(edits)
# Save to file
omni.usd.get_context().save_stage()
def create_dofbot_materials(asset_usd_path, dofbot_materials_usd_path):
# Create dofbot_materials.usd file
omni.client.copy(asset_usd_path, dofbot_materials_usd_path)
omni.usd.get_context().open_stage(dofbot_materials_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
# Extract Looks folder
edits.Add(Sdf.NamespaceEdit.Reparent('/arm/Looks', '/', 0))
# Remove everything else
edits.Add(Sdf.NamespaceEdit.Remove('/World'))
edits.Add(Sdf.NamespaceEdit.Remove('/arm'))
# Apply & save to file
stage.GetRootLayer().Apply(edits)
prim = stage.GetPrimAtPath('/Looks')
stage.SetDefaultPrim(prim)
omni.usd.get_context().save_stage()
def create_dofbot_instanceable(dofbot_mesh_usd_path, dofbot_instanceable_usd_path):
omni.client.copy(dofbot_mesh_usd_path, dofbot_instanceable_usd_path)
omni.usd.get_context().open_stage(dofbot_instanceable_usd_path)
stage = omni.usd.get_context().get_stage()
# Set up references and instanceables
for prim in stage.Traverse():
if prim.GetTypeName() != 'Xform':
continue
# Add reference to visuals_xform, collisions_xform, geoms_xform, and make them instanceable
path = str(prim.GetPath())
if path.endswith('visuals_xform') or path.endswith('collisions_xform') or path.endswith('geoms_xform'):
ref = prim.GetReferences()
ref.ClearReferences()
ref.AddReference('./dofbot_mesh.usd', path)
prim.SetInstanceable(True)
# Save to file
omni.usd.get_context().save_stage()
def create_block_indicator():
for suffix in ['', '_instanceable']:
asset_usd_path = f'omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Props/Blocks/block{suffix}.usd'
block_usd_path = f'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Props/Blocks/block{suffix}.usd'
omni.client.copy(asset_usd_path, block_usd_path)
omni.usd.get_context().open_stage(block_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
edits.Add(Sdf.NamespaceEdit.Remove('/object/object/collisions'))
stage.GetRootLayer().Apply(edits)
omni.usd.get_context().save_stage()
if __name__ == '__main__':
asset_usd_path = 'omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot.usd'
dofbot_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot.usd'
dofbot_materials_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_materials.usd'
dofbot_mesh_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_mesh.usd'
dofbot_instanceable_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_instanceable.usd'
create_dofbot(asset_usd_path, dofbot_usd_path)
create_dofbot_materials(asset_usd_path, dofbot_materials_usd_path)
create_dofbot_mesh(asset_usd_path, dofbot_mesh_usd_path)
create_dofbot_instanceable(dofbot_mesh_usd_path, dofbot_instanceable_usd_path)
create_block_indicator()
print("Done!")
| 10,636 | Python | 49.174528 | 133 | 0.668578 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_dofbot_from_urdf.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Ref: https://docs.omniverse.nvidia.com/isaacsim/latest/advanced_tutorials/tutorial_advanced_import_urdf.html#importing-urdf-using-python
import os
import omni.kit.commands
import omni.usd
from omni.importer.urdf import _urdf
from omni.isaac.core.utils.extensions import get_extension_path_from_name
from pxr import Sdf, UsdGeom
def create_dofbot_from_urdf(urdf_path, usd_path, mesh_usd_path, instanceable_usd_path):
# Set the settings in the import config
import_config = _urdf.ImportConfig()
import_config.merge_fixed_joints = False
import_config.convex_decomp = False
import_config.import_inertia_tensor = False
import_config.fix_base = True
import_config.make_default_prim = True
import_config.self_collision = False
import_config.create_physics_scene = True
# The two values below follows the Dofbot USD file provided by NVIDIA
# Joint 5 should be damping = 10, stiffness = 1000, but we ignore it for now
import_config.default_drive_strength = 1048.0
import_config.default_position_drive_damping = 53.0
import_config.default_drive_type = _urdf.UrdfJointTargetType.JOINT_DRIVE_POSITION
import_config.distance_scale = 1
import_config.density = 0.0
# Finally import the robot & save it as USD
result, prim_path = omni.kit.commands.execute(
"URDFParseAndImportFile", urdf_path=urdf_path,
import_config=import_config, dest_path=usd_path,
)
import_config.make_instanceable=True
import_config.instanceable_usd_path=mesh_usd_path
# Finally import the robot & save it as instanceable USD
result, prim_path = omni.kit.commands.execute(
"URDFParseAndImportFile", urdf_path=urdf_path,
import_config=import_config, dest_path=instanceable_usd_path,
)
def create_block_indicator():
for suffix in ['', '_instanceable']:
asset_usd_path = f'omniverse://localhost/NVIDIA/Assets/Isaac/2023.1.0/Isaac/Props/Blocks/block{suffix}.usd'
block_usd_path = f'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Props/Blocks/block{suffix}.usd'
omni.client.copy(asset_usd_path, block_usd_path)
omni.usd.get_context().open_stage(block_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
edits.Add(Sdf.NamespaceEdit.Remove('/object/object/collisions'))
stage.GetRootLayer().Apply(edits)
omni.usd.get_context().save_stage()
if __name__ == '__main__':
dofbot_urdf_path = f'{os.path.expanduser("~")}/OmniIsaacGymEnvs-DofbotReacher/thirdparty/dofbot_info/urdf/dofbot.urdf'
dofbot_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_urdf.usd'
dofbot_mesh_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_urdf_instanceable_meshes.usd'
dofbot_instanceable_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2023.1.0/Isaac/Robots/Dofbot/dofbot_urdf_instanceable.usd'
create_dofbot_from_urdf(dofbot_urdf_path, dofbot_usd_path, dofbot_mesh_usd_path, dofbot_instanceable_usd_path)
create_block_indicator()
print("Done!")
| 4,749 | Python | 51.197802 | 138 | 0.744999 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/shadow_hand.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import carb
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
from pxr import Gf, PhysxSchema, Sdf, Usd, UsdGeom, UsdPhysics
class ShadowHand(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "shadow_hand",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/ShadowHand/shadow_hand_instanceable.usd"
self._position = torch.tensor([0.0, 0.0, 0.5]) if translation is None else translation
self._orientation = torch.tensor([1.0, 0.0, 0.0, 0.0]) if orientation is None else orientation
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
def set_shadow_hand_properties(self, stage, shadow_hand_prim):
for link_prim in shadow_hand_prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(True)
rb.GetRetainAccelerationsAttr().Set(True)
def set_motor_control_mode(self, stage, shadow_hand_path):
joints_config = {
"robot0_WRJ1": {"stiffness": 5, "damping": 0.5, "max_force": 4.785},
"robot0_WRJ0": {"stiffness": 5, "damping": 0.5, "max_force": 2.175},
"robot0_FFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_FFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_FFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_MFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_MFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_MFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_RFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_RFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_RFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_LFJ4": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_LFJ3": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_LFJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.9},
"robot0_LFJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.7245},
"robot0_THJ4": {"stiffness": 1, "damping": 0.1, "max_force": 2.3722},
"robot0_THJ3": {"stiffness": 1, "damping": 0.1, "max_force": 1.45},
"robot0_THJ2": {"stiffness": 1, "damping": 0.1, "max_force": 0.99},
"robot0_THJ1": {"stiffness": 1, "damping": 0.1, "max_force": 0.99},
"robot0_THJ0": {"stiffness": 1, "damping": 0.1, "max_force": 0.81},
}
for joint_name, config in joints_config.items():
set_drive(
f"{self.prim_path}/joints/{joint_name}",
"angular",
"position",
0.0,
config["stiffness"] * np.pi / 180,
config["damping"] * np.pi / 180,
config["max_force"],
)
| 5,517 | Python | 46.982608 | 103 | 0.623527 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/cabinet.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
class Cabinet(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "cabinet",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Props/Sektion_Cabinet/sektion_cabinet_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
self._position = torch.tensor([0.0, 0.0, 0.4]) if translation is None else translation
self._orientation = torch.tensor([0.1, 0.0, 0.0, 0.0]) if orientation is None else orientation
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
| 1,819 | Python | 35.399999 | 111 | 0.660803 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/franka.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import math
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from omniisaacgymenvs.tasks.utils.usd_utils import set_drive
from pxr import PhysxSchema
class Franka(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "franka",
usd_path: Optional[str] = None,
translation: Optional[torch.tensor] = None,
orientation: Optional[torch.tensor] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
self._position = torch.tensor([1.0, 0.0, 0.0]) if translation is None else translation
self._orientation = torch.tensor([0.0, 0.0, 0.0, 1.0]) if orientation is None else orientation
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find Isaac Sim assets folder")
self._usd_path = assets_root_path + "/Isaac/Robots/Franka/franka_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=self._position,
orientation=self._orientation,
articulation_controller=None,
)
dof_paths = [
"panda_link0/panda_joint1",
"panda_link1/panda_joint2",
"panda_link2/panda_joint3",
"panda_link3/panda_joint4",
"panda_link4/panda_joint5",
"panda_link5/panda_joint6",
"panda_link6/panda_joint7",
"panda_hand/panda_finger_joint1",
"panda_hand/panda_finger_joint2",
]
drive_type = ["angular"] * 7 + ["linear"] * 2
default_dof_pos = [math.degrees(x) for x in [0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8]] + [0.02, 0.02]
stiffness = [400 * np.pi / 180] * 7 + [10000] * 2
damping = [80 * np.pi / 180] * 7 + [100] * 2
max_force = [87, 87, 87, 87, 12, 12, 12, 200, 200]
max_velocity = [math.degrees(x) for x in [2.175, 2.175, 2.175, 2.175, 2.61, 2.61, 2.61]] + [0.2, 0.2]
for i, dof in enumerate(dof_paths):
set_drive(
prim_path=f"{self.prim_path}/{dof}",
drive_type=drive_type[i],
target_type="position",
target_value=default_dof_pos[i],
stiffness=stiffness[i],
damping=damping[i],
max_force=max_force[i],
)
PhysxSchema.PhysxJointAPI(get_prim_at_path(f"{self.prim_path}/{dof}")).CreateMaxJointVelocityAttr().Set(
max_velocity[i]
)
def set_franka_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(True)
| 3,653 | Python | 37.0625 | 116 | 0.599781 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/anymal.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.robots.robot import Robot
from omni.isaac.core.utils.nucleus import get_assets_root_path
from omni.isaac.core.utils.stage import add_reference_to_stage
from pxr import PhysxSchema
class Anymal(Robot):
def __init__(
self,
prim_path: str,
name: Optional[str] = "Anymal",
usd_path: Optional[str] = None,
translation: Optional[np.ndarray] = None,
orientation: Optional[np.ndarray] = None,
) -> None:
"""[summary]"""
self._usd_path = usd_path
self._name = name
if self._usd_path is None:
assets_root_path = get_assets_root_path()
if assets_root_path is None:
carb.log_error("Could not find nucleus server with /Isaac folder")
self._usd_path = assets_root_path + "/Isaac/Robots/ANYbotics/anymal_instanceable.usd"
add_reference_to_stage(self._usd_path, prim_path)
super().__init__(
prim_path=prim_path,
name=name,
translation=translation,
orientation=orientation,
articulation_controller=None,
)
self._dof_names = [
"LF_HAA",
"LH_HAA",
"RF_HAA",
"RH_HAA",
"LF_HFE",
"LH_HFE",
"RF_HFE",
"RH_HFE",
"LF_KFE",
"LH_KFE",
"RF_KFE",
"RH_KFE",
]
@property
def dof_names(self):
return self._dof_names
def set_anymal_properties(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.GetDisableGravityAttr().Set(False)
rb.GetRetainAccelerationsAttr().Set(False)
rb.GetLinearDampingAttr().Set(0.0)
rb.GetMaxLinearVelocityAttr().Set(1000.0)
rb.GetAngularDampingAttr().Set(0.0)
rb.GetMaxAngularVelocityAttr().Set(64 / np.pi * 180)
def prepare_contacts(self, stage, prim):
for link_prim in prim.GetChildren():
if link_prim.HasAPI(PhysxSchema.PhysxRigidBodyAPI):
if "_HIP" not in str(link_prim.GetPrimPath()):
rb = PhysxSchema.PhysxRigidBodyAPI.Get(stage, link_prim.GetPrimPath())
rb.CreateSleepThresholdAttr().Set(0)
cr_api = PhysxSchema.PhysxContactReportAPI.Apply(link_prim)
cr_api.CreateThresholdAttr().Set(0)
| 4,273 | Python | 38.943925 | 97 | 0.648022 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/views/cabinet_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class CabinetView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "CabinetView",
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._drawers = RigidPrimView(
prim_paths_expr="/World/envs/.*/cabinet/drawer_top", name="drawers_view", reset_xform_properties=False
)
| 586 | Python | 28.349999 | 114 | 0.653584 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/views/shadow_hand_view.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import torch
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class ShadowHandView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "ShadowHandView",
) -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._fingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/shadow_hand/robot0.*distal",
name="finger_view",
reset_xform_properties=False,
)
@property
def actuated_dof_indices(self):
return self._actuated_dof_indices
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self.actuated_joint_names = [
"robot0_WRJ1",
"robot0_WRJ0",
"robot0_FFJ3",
"robot0_FFJ2",
"robot0_FFJ1",
"robot0_MFJ3",
"robot0_MFJ2",
"robot0_MFJ1",
"robot0_RFJ3",
"robot0_RFJ2",
"robot0_RFJ1",
"robot0_LFJ4",
"robot0_LFJ3",
"robot0_LFJ2",
"robot0_LFJ1",
"robot0_THJ4",
"robot0_THJ3",
"robot0_THJ2",
"robot0_THJ1",
"robot0_THJ0",
]
self._actuated_dof_indices = list()
for joint_name in self.actuated_joint_names:
self._actuated_dof_indices.append(self.get_dof_index(joint_name))
self._actuated_dof_indices.sort()
limit_stiffness = torch.tensor([30.0] * self.num_fixed_tendons, device=self._device)
damping = torch.tensor([0.1] * self.num_fixed_tendons, device=self._device)
self.set_fixed_tendon_properties(dampings=damping, limit_stiffnesses=limit_stiffness)
fingertips = ["robot0_ffdistal", "robot0_mfdistal", "robot0_rfdistal", "robot0_lfdistal", "robot0_thdistal"]
self._sensor_indices = torch.tensor([self._body_indices[j] for j in fingertips], device=self._device, dtype=torch.long)
| 3,681 | Python | 38.591397 | 127 | 0.669383 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/views/franka_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class FrankaView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "FrankaView",
) -> None:
"""[summary]"""
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
self._hands = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_link7", name="hands_view", reset_xform_properties=False
)
self._lfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_leftfinger", name="lfingers_view", reset_xform_properties=False
)
self._rfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_rightfinger",
name="rfingers_view",
reset_xform_properties=False,
)
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self._gripper_indices = [self.get_dof_index("panda_finger_joint1"), self.get_dof_index("panda_finger_joint2")]
@property
def gripper_indices(self):
return self._gripper_indices
| 1,241 | Python | 32.567567 | 120 | 0.637389 |
j3soon/OmniIsaacGymEnvs-DofbotReacher/omniisaacgymenvs/robots/articulations/views/factory_franka_view.py | from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class FactoryFrankaView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "FactoryFrankaView",
) -> None:
"""Initialize articulation view."""
super().__init__(
prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False
)
self._hands = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_hand",
name="hands_view",
reset_xform_properties=False,
)
self._lfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_leftfinger",
name="lfingers_view",
reset_xform_properties=False,
track_contact_forces=True,
)
self._rfingers = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_rightfinger",
name="rfingers_view",
reset_xform_properties=False,
track_contact_forces=True,
)
self._fingertip_centered = RigidPrimView(
prim_paths_expr="/World/envs/.*/franka/panda_fingertip_centered",
name="fingertips_view",
reset_xform_properties=False,
)
def initialize(self, physics_sim_view):
"""Initialize physics simulation view."""
super().initialize(physics_sim_view)
| 1,488 | Python | 31.369565 | 84 | 0.598118 |
j3soon/OmniIsaacGymEnvs-UR10Reacher/setup.py | """Installation script for the 'isaacgymenvs' python package."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from setuptools import setup, find_packages
import os
# Minimum dependencies required prior to installation
INSTALL_REQUIRES = [
"protobuf==3.20.1",
"omegaconf==2.1.1",
"hydra-core==1.1.1",
"redis==3.5.3", # needed by Ray on Windows
"rl-games==1.5.2"
]
# Installation operation
setup(
name="omniisaacgymenvs",
author="NVIDIA",
version="1.1.0",
description="RL environments for robot learning in NVIDIA Isaac Sim.",
keywords=["robotics", "rl"],
include_package_data=True,
install_requires=INSTALL_REQUIRES,
packages=find_packages("."),
classifiers=["Natural Language :: English", "Programming Language :: Python :: 3.7, 3.8"],
zip_safe=False,
)
# EOF
| 890 | Python | 24.457142 | 94 | 0.678652 |
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/tasks/ur10_reacher.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from omniisaacgymenvs.sim2real.ur10 import RealWorldUR10
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from omniisaacgymenvs.tasks.shared.reacher import ReacherTask
from omniisaacgymenvs.robots.articulations.views.ur10_view import UR10View
from omniisaacgymenvs.robots.articulations.ur10 import UR10
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.torch import *
from omni.isaac.gym.vec_env import VecEnvBase
import numpy as np
import torch
import math
class UR10ReacherTask(ReacherTask):
def __init__(
self,
name: str,
sim_config: SimConfig,
env: VecEnvBase,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.obs_type = self._task_cfg["env"]["observationType"]
if not (self.obs_type in ["full"]):
raise Exception(
"Unknown type of observations!\nobservationType should be one of: [full]")
print("Obs type:", self.obs_type)
self.num_obs_dict = {
"full": 29,
# 6: UR10 joints position (action space)
# 6: UR10 joints velocity
# 3: goal position
# 4: goal rotation
# 4: goal relative rotation
# 6: previous action
}
self.object_scale = torch.tensor([1.0] * 3)
self.goal_scale = torch.tensor([2.0] * 3)
self._num_observations = self.num_obs_dict[self.obs_type]
self._num_actions = 6
self._num_states = 0
pi = math.pi
if self._task_cfg['safety']['enabled']:
# Depends on your real robot setup
self._dof_limits = torch.tensor([[
[np.deg2rad(-135), np.deg2rad(135)],
[np.deg2rad(-180), np.deg2rad(-60)],
[np.deg2rad(0), np.deg2rad(180)],
[np.deg2rad(-180), np.deg2rad(0)],
[np.deg2rad(-180), np.deg2rad(0)],
[np.deg2rad(-180), np.deg2rad(180)],
]], dtype=torch.float32, device=self._cfg["sim_device"])
else:
# For actions
self._dof_limits = torch.tensor([[
[-2*pi, 2*pi], # [-2*pi, 2*pi],
[-pi + pi/8, 0 - pi/8], # [-2*pi, 2*pi],
[-pi + pi/8, pi - pi/8], # [-2*pi, 2*pi],
[-pi, 0], # [-2*pi, 2*pi],
[-pi, pi], # [-2*pi, 2*pi],
[-2*pi, 2*pi], # [-2*pi, 2*pi],
]], dtype=torch.float32, device=self._cfg["sim_device"])
# The last action space cannot be [0, 0]
# It will introduce the following error:
# ValueError: Expected parameter loc (Tensor of shape (2048, 6)) of distribution Normal(loc: torch.Size([2048, 6]), scale: torch.Size([2048, 6])) to satisfy the constraint Real(), but found invalid values
ReacherTask.__init__(self, name=name, env=env)
# Setup Sim2Real
sim2real_config = self._task_cfg['sim2real']
if sim2real_config['enabled'] and self.test and self.num_envs == 1:
self.act_moving_average /= 5 # Reduce moving speed
self.real_world_ur10 = RealWorldUR10(
sim2real_config['fail_quietely'],
sim2real_config['verbose']
)
return
def get_num_dof(self):
return self._arms.num_dof
def get_arm(self):
ur10 = UR10(prim_path=self.default_zero_env_path + "/ur10", name="UR10")
self._sim_config.apply_articulation_settings(
"ur10",
get_prim_at_path(ur10.prim_path),
self._sim_config.parse_actor_config("ur10"),
)
def get_arm_view(self, scene):
arm_view = UR10View(prim_paths_expr="/World/envs/.*/ur10", name="ur10_view")
scene.add(arm_view._end_effectors)
return arm_view
def get_object_displacement_tensor(self):
return torch.tensor([0.0, 0.05, 0.0], device=self.device).repeat((self.num_envs, 1))
def get_observations(self):
self.arm_dof_pos = self._arms.get_joint_positions()
self.arm_dof_vel = self._arms.get_joint_velocities()
if self.obs_type == "full_no_vel":
self.compute_full_observations(True)
elif self.obs_type == "full":
self.compute_full_observations()
else:
print("Unkown observations type!")
observations = {
self._arms.name: {
"obs_buf": self.obs_buf
}
}
return observations
def get_reset_target_new_pos(self, n_reset_envs):
# Randomly generate goal positions, although the resulting goal may still not be reachable.
new_pos = torch_rand_float(-1, 1, (n_reset_envs, 3), device=self.device)
if self._task_cfg['sim2real']['enabled'] and self.test and self.num_envs == 1:
# Depends on your real robot setup
new_pos[:, 0] = torch.abs(new_pos[:, 0] * 0.1) + 0.35
new_pos[:, 1] = torch.abs(new_pos[:, 1] * 0.1) + 0.35
new_pos[:, 2] = torch.abs(new_pos[:, 2] * 0.5) + 0.3
else:
new_pos[:, 0] = new_pos[:, 0] * 0.4 + 0.5 * torch.sign(new_pos[:, 0])
new_pos[:, 1] = new_pos[:, 1] * 0.4 + 0.5 * torch.sign(new_pos[:, 1])
new_pos[:, 2] = torch.abs(new_pos[:, 2] * 0.8) + 0.1
if self._task_cfg['safety']['enabled']:
new_pos[:, 0] = torch.abs(new_pos[:, 0]) / 1.25
new_pos[:, 1] = torch.abs(new_pos[:, 1]) / 1.25
return new_pos
def compute_full_observations(self, no_vel=False):
if no_vel:
raise NotImplementedError()
else:
# There are many redundant information for the simple Reacher task, but we'll keep them for now.
self.obs_buf[:, 0:self.num_arm_dofs] = unscale(self.arm_dof_pos[:, :self.num_arm_dofs],
self.arm_dof_lower_limits, self.arm_dof_upper_limits)
self.obs_buf[:, self.num_arm_dofs:2*self.num_arm_dofs] = self.vel_obs_scale * self.arm_dof_vel[:, :self.num_arm_dofs]
base = 2 * self.num_arm_dofs
self.obs_buf[:, base+0:base+3] = self.goal_pos
self.obs_buf[:, base+3:base+7] = self.goal_rot
self.obs_buf[:, base+7:base+11] = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))
self.obs_buf[:, base+11:base+17] = self.actions
def send_joint_pos(self, joint_pos):
self.real_world_ur10.send_joint_pos(joint_pos)
| 8,205 | Python | 42.882353 | 216 | 0.599878 |
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/tasks/base/rl_task.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import abstractmethod
import numpy as np
import torch
from gym import spaces
from omni.isaac.core.tasks import BaseTask
from omni.isaac.core.utils.types import ArticulationAction
from omni.isaac.core.utils.prims import define_prim
from omni.isaac.cloner import GridCloner
from omniisaacgymenvs.tasks.utils.usd_utils import create_distant_light
from omniisaacgymenvs.utils.domain_randomization.randomize import Randomizer
import omni.kit
class RLTask(BaseTask):
""" This class provides a PyTorch RL-specific interface for setting up RL tasks.
It includes utilities for setting up RL task related parameters,
cloning environments, and data collection for RL algorithms.
"""
def __init__(self, name, env, offset=None) -> None:
""" Initializes RL parameters, cloner object, and buffers.
Args:
name (str): name of the task.
env (VecEnvBase): an instance of the environment wrapper class to register task.
offset (Optional[np.ndarray], optional): offset applied to all assets of the task. Defaults to None.
"""
super().__init__(name=name, offset=offset)
self.test = self._cfg["test"]
self._device = self._cfg["sim_device"]
self._dr_randomizer = Randomizer(self._sim_config)
print("Task Device:", self._device)
self.randomize_actions = False
self.randomize_observations = False
self.clip_obs = self._cfg["task"]["env"].get("clipObservations", np.Inf)
self.clip_actions = self._cfg["task"]["env"].get("clipActions", np.Inf)
self.rl_device = self._cfg.get("rl_device", "cuda:0")
self.control_frequency_inv = self._cfg["task"]["env"].get("controlFrequencyInv", 1)
print("RL device: ", self.rl_device)
self._env = env
if not hasattr(self, "_num_agents"):
self._num_agents = 1 # used for multi-agent environments
if not hasattr(self, "_num_states"):
self._num_states = 0
# initialize data spaces (defaults to gym.Box)
if not hasattr(self, "action_space"):
self.action_space = spaces.Box(np.ones(self.num_actions) * -1.0, np.ones(self.num_actions) * 1.0)
if not hasattr(self, "observation_space"):
self.observation_space = spaces.Box(np.ones(self.num_observations) * -np.Inf, np.ones(self.num_observations) * np.Inf)
if not hasattr(self, "state_space"):
self.state_space = spaces.Box(np.ones(self.num_states) * -np.Inf, np.ones(self.num_states) * np.Inf)
self._cloner = GridCloner(spacing=self._env_spacing)
self._cloner.define_base_env(self.default_base_env_path)
define_prim(self.default_zero_env_path)
self.cleanup()
def cleanup(self) -> None:
""" Prepares torch buffers for RL data collection."""
# prepare tensors
self.obs_buf = torch.zeros((self._num_envs, self.num_observations), device=self._device, dtype=torch.float)
self.states_buf = torch.zeros((self._num_envs, self.num_states), device=self._device, dtype=torch.float)
self.rew_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.float)
self.reset_buf = torch.ones(self._num_envs, device=self._device, dtype=torch.long)
self.progress_buf = torch.zeros(self._num_envs, device=self._device, dtype=torch.long)
self.extras = {}
def set_up_scene(self, scene) -> None:
""" Clones environments based on value provided in task config and applies collision filters to mask
collisions across environments.
Args:
scene (Scene): Scene to add objects to.
"""
super().set_up_scene(scene)
collision_filter_global_paths = list()
if self._sim_config.task_config["sim"].get("add_ground_plane", True):
self._ground_plane_path = "/World/defaultGroundPlane"
collision_filter_global_paths.append(self._ground_plane_path)
scene.add_default_ground_plane(prim_path=self._ground_plane_path)
prim_paths = self._cloner.generate_paths("/World/envs/env", self._num_envs)
self._env_pos = self._cloner.clone(source_prim_path="/World/envs/env_0", prim_paths=prim_paths)
self._env_pos = torch.tensor(np.array(self._env_pos), device=self._device, dtype=torch.float)
self._cloner.filter_collisions(
self._env._world.get_physics_context().prim_path, "/World/collisions", prim_paths, collision_filter_global_paths)
self.set_initial_camera_params(camera_position=[10, 10, 3], camera_target=[0, 0, 0])
if self._sim_config.task_config["sim"].get("add_distant_light", True):
create_distant_light()
def set_initial_camera_params(self, camera_position=[10, 10, 3], camera_target=[0, 0, 0]):
if self._env._render:
viewport = omni.kit.viewport_legacy.get_default_viewport_window()
viewport.set_camera_position("/OmniverseKit_Persp", camera_position[0], camera_position[1], camera_position[2], True)
viewport.set_camera_target("/OmniverseKit_Persp", camera_target[0], camera_target[1], camera_target[2], True)
@property
def default_base_env_path(self):
""" Retrieves default path to the parent of all env prims.
Returns:
default_base_env_path(str): Defaults to "/World/envs".
"""
return "/World/envs"
@property
def default_zero_env_path(self):
""" Retrieves default path to the first env prim (index 0).
Returns:
default_zero_env_path(str): Defaults to "/World/envs/env_0".
"""
return f"{self.default_base_env_path}/env_0"
@property
def num_envs(self):
""" Retrieves number of environments for task.
Returns:
num_envs(int): Number of environments.
"""
return self._num_envs
@property
def num_actions(self):
""" Retrieves dimension of actions.
Returns:
num_actions(int): Dimension of actions.
"""
return self._num_actions
@property
def num_observations(self):
""" Retrieves dimension of observations.
Returns:
num_observations(int): Dimension of observations.
"""
return self._num_observations
@property
def num_states(self):
""" Retrieves dimesion of states.
Returns:
num_states(int): Dimension of states.
"""
return self._num_states
@property
def num_agents(self):
""" Retrieves number of agents for multi-agent environments.
Returns:
num_agents(int): Dimension of states.
"""
return self._num_agents
def get_states(self):
""" API for retrieving states buffer, used for asymmetric AC training.
Returns:
states_buf(torch.Tensor): States buffer.
"""
return self.states_buf
def get_extras(self):
""" API for retrieving extras data for RL.
Returns:
extras(dict): Dictionary containing extras data.
"""
return self.extras
def reset(self):
""" Flags all environments for reset.
"""
self.reset_buf = torch.ones_like(self.reset_buf)
def pre_physics_step(self, actions):
""" Optionally implemented by individual task classes to process actions.
Args:
actions (torch.Tensor): Actions generated by RL policy.
"""
pass
def post_physics_step(self):
""" Processes RL required computations for observations, states, rewards, resets, and extras.
Also maintains progress buffer for tracking step count per environment.
Returns:
obs_buf(torch.Tensor): Tensor of observation data.
rew_buf(torch.Tensor): Tensor of rewards data.
reset_buf(torch.Tensor): Tensor of resets/dones data.
extras(dict): Dictionary of extras data.
"""
self.progress_buf[:] += 1
if self._env._world.is_playing():
self.get_observations()
self.get_states()
self.calculate_metrics()
self.is_done()
self.get_extras()
return self.obs_buf, self.rew_buf, self.reset_buf, self.extras
| 9,915 | Python | 38.349206 | 130 | 0.649723 |
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/sim2real/ur10.py | # Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import asyncio
import math
import numpy as np
try:
import rospy
# Ref: https://github.com/ros-controls/ros_controllers/blob/melodic-devel/rqt_joint_trajectory_controller/src/rqt_joint_trajectory_controller/joint_trajectory_controller.py
from control_msgs.msg import JointTrajectoryControllerState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
except ImportError:
rospy = None
class RealWorldUR10():
# Defined in ur10.usd
sim_dof_angle_limits = [
(-360, 360, False),
(-360, 360, False),
(-360, 360, False),
(-360, 360, False),
(-360, 360, False),
(-360, 360, False),
] # _sim_dof_limits[:,2] == True indicates inversed joint angle compared to real
# Ref: https://github.com/ros-industrial/universal_robot/issues/112
pi = math.pi
servo_angle_limits = [
(-2*pi, 2*pi),
(-2*pi, 2*pi),
(-2*pi, 2*pi),
(-2*pi, 2*pi),
(-2*pi, 2*pi),
(-2*pi, 2*pi),
]
# ROS-related strings
state_topic = '/scaled_pos_joint_traj_controller/state'
cmd_topic = '/scaled_pos_joint_traj_controller/command'
joint_names = [
'elbow_joint',
'shoulder_lift_joint',
'shoulder_pan_joint',
'wrist_1_joint',
'wrist_2_joint',
'wrist_3_joint'
]
# Joint name mapping to simulation action index
joint_name_to_idx = {
'elbow_joint': 2,
'shoulder_lift_joint': 1,
'shoulder_pan_joint': 0,
'wrist_1_joint': 3,
'wrist_2_joint': 4,
'wrist_3_joint': 5
}
def __init__(self, fail_quietely=False, verbose=False) -> None:
print("Connecting to real-world UR10")
self.fail_quietely = fail_quietely
self.verbose = verbose
self.pub_freq = 10 # Hz
# Not really sure if current_pos and target_pos require mutex here.
self.current_pos = None
self.target_pos = None
if rospy is None:
if not self.fail_quietely:
raise ValueError("ROS is not installed!")
print("ROS is not installed!")
return
try:
rospy.init_node("custom_controller", anonymous=True, disable_signals=True, log_level=rospy.ERROR)
except rospy.exceptions.ROSException as e:
print("Node has already been initialized, do nothing")
if self.verbose:
print("Receiving real-world UR10 joint angles...")
print("If you didn't see any outputs, you may have set up UR5 or ROS incorrectly.")
self.sub = rospy.Subscriber(
self.state_topic,
JointTrajectoryControllerState,
self.sub_callback,
queue_size=1
)
self.pub = rospy.Publisher(
self.cmd_topic,
JointTrajectory,
queue_size=1
)
# self.min_traj_dur = 5.0 / self.pub_freq # Minimum trajectory duration
self.min_traj_dur = 0 # Minimum trajectory duration
# For catching exceptions in asyncio
def custom_exception_handler(loop, context):
print(context)
# Ref: https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.set_exception_handler
asyncio.get_event_loop().set_exception_handler(custom_exception_handler)
# Ref: https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/tutorial_ros_custom_message.html
asyncio.ensure_future(self.pub_task())
def sub_callback(self, msg):
# msg has type: JointTrajectoryControllerState
actual_pos = {}
for i in range(len(msg.joint_names)):
joint_name = msg.joint_names[i]
joint_pos = msg.actual.positions[i]
actual_pos[joint_name] = joint_pos
self.current_pos = actual_pos
if self.verbose:
print(f'(sub) {actual_pos}')
async def pub_task(self):
while not rospy.is_shutdown():
await asyncio.sleep(1.0 / self.pub_freq)
if self.current_pos is None:
# Not ready (recieved UR state) yet
continue
if self.target_pos is None:
# No command yet
continue
# Construct message
dur = [] # move duration of each joints
traj = JointTrajectory()
traj.joint_names = self.joint_names
point = JointTrajectoryPoint()
moving_average = 1
for name in traj.joint_names:
pos = self.current_pos[name]
cmd = pos * (1-moving_average) + self.target_pos[self.joint_name_to_idx[name]] * moving_average
max_vel = 3.15 # from ur5.urdf (or ur5.urdf.xacro)
duration = abs(cmd - pos) / max_vel # time = distance / velocity
dur.append(max(duration, self.min_traj_dur))
point.positions.append(cmd)
point.time_from_start = rospy.Duration(max(dur))
traj.points.append(point)
self.pub.publish(traj)
print(f'(pub) {point.positions}')
def send_joint_pos(self, joint_pos):
if len(joint_pos) != 6:
raise Exception("The length of UR10 joint_pos is {}, but should be 6!".format(len(joint_pos)))
# Convert Sim angles to Real angles
target_pos = [0] * 6
for i, pos in enumerate(joint_pos):
if i == 5:
# Ignore the gripper joints for Reacher task
continue
# Map [L, U] to [A, B]
L, U, inversed = self.sim_dof_angle_limits[i]
A, B = self.servo_angle_limits[i]
angle = np.rad2deg(float(pos))
if not L <= angle <= U:
print("The {}-th simulation joint angle ({}) is out of range! Should be in [{}, {}]".format(i, angle, L, U))
angle = np.clip(angle, L, U)
target_pos[i] = (angle - L) * ((B-A)/(U-L)) + A # Map [L, U] to [A, B]
if inversed:
target_pos[i] = (B-A) - (target_pos[i] - A) + A # Map [A, B] to [B, A]
if not A <= target_pos[i] <= B:
raise Exception("(Should Not Happen) The {}-th real world joint angle ({}) is out of range! hould be in [{}, {}]".format(i, target_pos[i], A, B))
self.target_pos = target_pos
if __name__ == "__main__":
print("Make sure you are running `roslaunch ur_robot_driver`.")
print("If the machine running Isaac is not the ROS master node, " + \
"make sure you have set the environment variables: " + \
"`ROS_MASTER_URI` and `ROS_HOSTNAME`/`ROS_IP` correctly.")
ur10 = RealWorldUR10(verbose=True)
rospy.spin()
| 8,268 | Python | 41.405128 | 176 | 0.608128 |
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/utils/usd_utils/create_instanceable_ur10.py | # Copyright (c) 2018-2022, NVIDIA Corporation
# Copyright (c) 2022-2023, Johnson Sun
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import omni.usd
import omni.client
from pxr import UsdGeom, Sdf, UsdPhysics, UsdShade
# Note: this script should be executed in Isaac Sim `Script Editor` window
def create_ur10(asset_dir_usd_path, ur10_dir_usd_path):
# Duplicate UR10 folder
omni.client.copy(asset_dir_usd_path, ur10_dir_usd_path)
def create_ur10_mesh(asset_usd_path, ur10_mesh_usd_path):
# Create ur10_mesh.usd file
omni.client.copy(asset_usd_path, ur10_mesh_usd_path)
omni.usd.get_context().open_stage(ur10_mesh_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
# Create parent Xforms
reparent_tasks = [
# base_link
['/ur10/base_link/cylinder', 'geoms_xform'],
['/ur10/base_link/ur10_base', 'geoms_xform'],
# shoulder_link
['/ur10/shoulder_link/cylinder', 'geoms_xform'],
['/ur10/shoulder_link/cylinder_0', 'geoms_xform'],
['/ur10/shoulder_link/ur10_shoulder', 'geoms_xform'],
# upper_arm_link
['/ur10/upper_arm_link/cylinder', 'geoms_xform'],
['/ur10/upper_arm_link/cylinder_0', 'geoms_xform'],
['/ur10/upper_arm_link/cylinder_1', 'geoms_xform'],
['/ur10/upper_arm_link/ur10_upper_arm', 'geoms_xform'],
# forearm_link
['/ur10/forearm_link/cylinder', 'geoms_xform'],
['/ur10/forearm_link/cylinder_0', 'geoms_xform'],
['/ur10/forearm_link/cylinder_1', 'geoms_xform'],
['/ur10/forearm_link/ur10_forearm', 'geoms_xform'],
# wrist_1_link
['/ur10/wrist_1_link/cylinder', 'geoms_xform'],
['/ur10/wrist_1_link/cylinder_0', 'geoms_xform'],
['/ur10/wrist_1_link/ur10_wrist_1', 'geoms_xform'],
# wrist_2_link
['/ur10/wrist_2_link/cylinder', 'geoms_xform'],
['/ur10/wrist_2_link/cylinder_0', 'geoms_xform'],
['/ur10/wrist_2_link/ur10_wrist_2', 'geoms_xform'],
# wrist_3_link
['/ur10/wrist_3_link/cylinder', 'geoms_xform'],
['/ur10/wrist_3_link/ur10_wrist_3', 'geoms_xform'],
] # [prim_path, parent_xform_name]
for task in reparent_tasks:
prim_path, parent_xform_name = task
old_parent_path = '/'.join(prim_path.split('/')[:-1])
new_parent_path = f'{old_parent_path}/{parent_xform_name}'
UsdGeom.Xform.Define(stage, new_parent_path)
edits.Add(Sdf.NamespaceEdit.Reparent(prim_path, new_parent_path, -1))
stage.GetRootLayer().Apply(edits)
# Save to file
omni.usd.get_context().save_stage()
def create_ur10_instanceable(ur10_mesh_usd_path, ur10_instanceable_usd_path):
omni.client.copy(ur10_mesh_usd_path, ur10_instanceable_usd_path)
omni.usd.get_context().open_stage(ur10_instanceable_usd_path)
stage = omni.usd.get_context().get_stage()
# Set up references and instanceables
for prim in stage.Traverse():
if prim.GetTypeName() != 'Xform':
continue
# Add reference to visuals_xform, collisions_xform, geoms_xform, and make them instanceable
path = str(prim.GetPath())
if path.endswith('visuals_xform') or path.endswith('collisions_xform') or path.endswith('geoms_xform'):
ref = prim.GetReferences()
ref.ClearReferences()
ref.AddReference('./ur10_mesh.usd', path)
prim.SetInstanceable(True)
# Save to file
omni.usd.get_context().save_stage()
def create_block_indicator():
asset_usd_path = 'omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Props/Blocks/block.usd'
block_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Props/Blocks/block.usd'
omni.client.copy(asset_usd_path, block_usd_path)
omni.usd.get_context().open_stage(block_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
edits.Add(Sdf.NamespaceEdit.Remove('/object/object/collisions'))
stage.GetRootLayer().Apply(edits)
omni.usd.get_context().save_stage()
asset_usd_path = 'omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Props/Blocks/block_instanceable.usd'
block_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Props/Blocks/block_instanceable.usd'
omni.client.copy(asset_usd_path, block_usd_path)
omni.usd.get_context().open_stage(block_usd_path)
stage = omni.usd.get_context().get_stage()
edits = Sdf.BatchNamespaceEdit()
edits.Add(Sdf.NamespaceEdit.Remove('/object/object/collisions'))
stage.GetRootLayer().Apply(edits)
omni.usd.get_context().save_stage()
if __name__ == '__main__':
asset_dir_usd_path = 'omniverse://localhost/NVIDIA/Assets/Isaac/2022.1/Isaac/Robots/UR10'
ur10_dir_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10'
ur10_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10/ur10.usd'
ur10_mesh_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10/ur10_mesh.usd'
ur10_instanceable_usd_path = 'omniverse://localhost/Projects/J3soon/Isaac/2022.1/Isaac/Robots/UR10/ur10_instanceable.usd'
create_ur10(asset_dir_usd_path, ur10_dir_usd_path)
create_ur10_mesh(ur10_usd_path, ur10_mesh_usd_path)
create_ur10_instanceable(ur10_mesh_usd_path, ur10_instanceable_usd_path)
create_block_indicator()
print("Done!")
| 6,907 | Python | 48.342857 | 125 | 0.691617 |
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/cabinet_view.py |
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class CabinetView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "CabinetView",
) -> None:
"""[summary]
"""
super().__init__(
prim_paths_expr=prim_paths_expr,
name=name,
reset_xform_properties=False
)
self._drawers = RigidPrimView(prim_paths_expr="/World/envs/.*/cabinet/drawer_top", name="drawers_view", reset_xform_properties=False) | 619 | Python | 25.956521 | 141 | 0.61874 |
j3soon/OmniIsaacGymEnvs-UR10Reacher/omniisaacgymenvs/robots/articulations/views/franka_view.py |
from typing import Optional
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.prims import RigidPrimView
class FrankaView(ArticulationView):
def __init__(
self,
prim_paths_expr: str,
name: Optional[str] = "FrankaView",
) -> None:
"""[summary]
"""
super().__init__(
prim_paths_expr=prim_paths_expr,
name=name,
reset_xform_properties=False
)
self._hands = RigidPrimView(prim_paths_expr="/World/envs/.*/franka/panda_link7", name="hands_view", reset_xform_properties=False)
self._lfingers = RigidPrimView(prim_paths_expr="/World/envs/.*/franka/panda_leftfinger", name="lfingers_view", reset_xform_properties=False)
self._rfingers = RigidPrimView(prim_paths_expr="/World/envs/.*/franka/panda_rightfinger", name="rfingers_view", reset_xform_properties=False)
def initialize(self, physics_sim_view):
super().initialize(physics_sim_view)
self._gripper_indices = [self.get_dof_index("panda_finger_joint1"), self.get_dof_index("panda_finger_joint2")]
@property
def gripper_indices(self):
return self._gripper_indices
| 1,220 | Python | 32.916666 | 150 | 0.648361 |
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/instance.py | from .settings import BoolSetting, CategoricalSetting, SettingItem
class InstanceManagerBase:
def __init__(self):
self._settings = SettingItem("ace")
self._setting = CategoricalSetting("ace")
self.boolSetting = BoolSetting("ace")
def shutdown(self):
self._settings = None
self._setting = None
self.boolSetting = None | 375 | Python | 27.923075 | 66 | 0.653333 |
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/utils_io.py | import os
import omni.client
A2F_SERVER_TYPE = "omniverse:"
def is_ov_path(path):
return A2F_SERVER_TYPE in path
def path_join(root, fname):
if A2F_SERVER_TYPE in root:
return f"{root}/{fname}"
else:
return os.path.normpath(os.path.join(root, fname))
def is_folder(path):
result, entry = omni.client.stat(path)
# bitewise operation, folder flags is 4
return entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN
def is_valid_path(path):
result, entry = omni.client.stat(path)
return result == omni.client.Result.OK
def list_folder(path):
items = []
path = path.rstrip("/")
result, entries = omni.client.list(path)
if result != omni.client.Result.OK:
return items
for en in entries:
# Skip if it is a folder
if en.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
continue
name = en.relative_path
items.append(name)
return items
def read_file(fpath):
result, _str, bytes_data = omni.client.read_file(fpath)
if result != omni.client.Result.OK:
raise RuntimeError("Unable to read file: {}".format(fpath))
return bytes_data
def write_file(fpath, bytes_data):
result = omni.client.write_file(fpath, bytes_data)
if result != omni.client.Result.OK:
raise RuntimeError("Unable to write file: {}".format(fpath))
| 1,378 | Python | 23.625 | 68 | 0.650218 |
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/styles.py | import os
import omni.ui as ui
from omni.ui import color as cl
ELEM_MARGIN = 4
BORDER_RADIUS = 4
VSPACING = ELEM_MARGIN * 2
RECORDER_BTN_WIDTH = 75
LABEL_WIDTH = 100
BTN_WIDTH = 40
BTN_HEIGHT = 16
WAVEFORM_HEIGHT = 22 * 2 + VSPACING + 10
ERROR_CLR = 0xCC7777FF
WARN_CLR = 0xCC77FFFF
KEYFRAME_CLR = 0xAAAA77FF
IMAGE_SIZE = 25
A2F_SERVER_TYPE = "omniverse:"
EXT_ROOT = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../"))
DATA_PATH = os.path.join(EXT_ROOT, "icons")
PlayBtnStyle = {"image_url": DATA_PATH + "/timeline_play.svg"}
PauseBtnStyle = {"image_url": DATA_PATH + "/timeline_pause.svg"}
ComposeBtnStyle = {"image_url": DATA_PATH + "/timeline_loop.svg"}
LoadingBtnStyle = {"image_url": DATA_PATH + "/loading.gif"}
LocationBtnStyle = {"image_url": DATA_PATH + "/folder.svg"}
AUDIO_FILE_TYPES = [".ufdata"]
StringFieldStyle = {"margin_height": 0, "margin_width": ELEM_MARGIN, "border_radius": BORDER_RADIUS}
ComboBoxStyle = {"border_radius": BORDER_RADIUS + 2}
HandlePlaybackStyle = {"border_radius": 0, "background_color": 0xFFEEEE33}
HandleRecordingStyle = {"border_radius": 0, "background_color": 0xFF3333EE}
HandleStreamingStyle = {"border_radius": 0, "background_color": 0xFF33EE33}
TrackWaveformStyle = {"margin_height": 0, "margin_width": 0, "border_radius": 0}
RangeStartSpacerStyle = {"border_width": 0, "padding": 0, "border_radius": 0, "margin_width": 0}
BigLableStyle = {"font_size": 16, "color": 0xFFFFFFFF}
SmallLableStyle = {"font_size": 14, "color": 0xFF4B4B4B}
ScrollingFrameStyle = {"background_color": 0xFF323232}
MainWindowStyle = {
"Image::header_frame": {"image_url": DATA_PATH + "/head.png"},
"Line::group_line": {"color": cl("#4B4B4B"), "margin_height": 0, "padding": 0},
"Slider::float_slider": {
"background_color": cl("#FF3300"),
"secondary_color": cl("#24211F"),
"border_radius": 3,
"corner_flag": ui.CornerFlag.ALL,
"draw_mode": ui.SliderDrawMode.FILLED,
},
}
PlaybackSliderBackgroundStyle = {
"background_color": 0xFF24211F,
"margin_height": 0,
"margin_width": 0,
"border_radius": 0,
}
LargeBtnStyle = {
"border_radius": BORDER_RADIUS,
"border_width": 0,
"font_size": 14,
"padding": ELEM_MARGIN * 2,
"margin_width": ELEM_MARGIN,
"margin_height": ELEM_MARGIN,
}
FileBrowseBtnStyle = {
"image_url": DATA_PATH + "/folder.svg",
"background_color": 0xFF333333,
":hovered": {"background_color": 0xFF9E9E9E},
}
ModalBtnStyle = {
"border_radius": BORDER_RADIUS,
"border_width": 0,
"font_size": 14,
"padding": ELEM_MARGIN * 2,
"margin_width": ELEM_MARGIN,
"margin_height": ELEM_MARGIN,
}
TrashBtnStyle = {
"image_url": "${glyphs}/trash.svg",
"background_color": 0xFF333333,
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
TrashDarkBtnStyle = {
"image_url": "${glyphs}/trash.svg",
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
PlusBtnStyle = {
"image_url": "${glyphs}/plus.svg",
"background_color": 0xFF333333,
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
PlusDarkBtnStyle = {
"image_url": "${glyphs}/plus.svg",
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
PlusDarkExcitedBtnStyle = {
"image_url": "${glyphs}/plus.svg",
"color": WARN_CLR,
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
MinusDarkBtnStyle = {
"image_url": "${omni_audio2face_common_resources}/minus.png",
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
AngleLeftDarkBtnStyle = {
"image_url": "${glyphs}/angle_left.svg",
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
AngleRightDarkBtnStyle = {
"image_url": "${glyphs}/angle_right.svg",
":hovered": {"background_color": 0xFF9E9E9E},
":disabled": {"color": 0x60FFFFFF},
}
FileBrowseBtnStyle = {
"image_url": "resources/glyphs/folder.svg",
"background_color": 0xFF333333,
":hovered": {"background_color": 0xFF9E9E9E},
}
RangeRectStyle = {
"background_color": 0x30BBAB58,
"padding": 0,
"margin_width": 0,
"margin_height": 0,
"border_radius": 0,
"border_color": 0x70BBAB58,
"border_width": 1,
}
RangeRectRecordingStyle = {
"background_color": 0x305858BB,
"padding": 0,
"margin_width": 0,
"margin_height": 0,
"border_radius": 0,
"border_color": 0x705858BB,
"border_width": 1,
}
RangeRectStreamingStyle = {
"background_color": 0x3058BB58,
"padding": 0,
"margin_width": 0,
"margin_height": 0,
"border_radius": 0,
"border_color": 0x7058BB58,
"border_width": 1,
}
| 4,854 | Python | 26.275281 | 100 | 0.639885 |
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/extension.py | from .styles import VSPACING, BigLableStyle, MainWindowStyle
from .ui import (
WAVEFORM_HEIGHT,
ButtonComposing,
ButtonLocation,
ButtonPlayPause,
CategoricalSettingWidgetWithReset,
PathWidgetWithReset,
FemaleEntertainerWidger,
TimecodeWidget,
TimelineWidget,
)
import omni.ext
import omni.ui as ui
import omni.client
class MyExtension(omni.ext.IExt):
def on_startup(self, ext_id):
print("[timedomain.ai.singer] MyExtension startup")
self._window = ui.Window("TIMEDOMAIN AI SINGER", width=840, height=650)
self._window.frame.set_build_fn(self.show_window)
self._window.frame.style = MainWindowStyle
def on_shutdown(self):
print("[timedomain.ai.singer] MyExtension shutdown")
self._root_path_widget = None
self._track_widget = None
self._range_widget = None
self.frame = None
self._btn_loop = None
self._timecode_widget.shutdown()
self._timecode_widget = None
self._btn_play.shutdown()
self._btn_play = None
self._timeline_widget.shutdown()
self._timeline_widget = None
self._btn_recorder = None
if self._window:
self._window.destroy()
self._window = None
def show_window(self):
with self._window.frame:
with ui.VStack(spacing=10):
self._root_path_widget = PathWidgetWithReset()
self._root_path_widget._build_content()
self._track_widget = CategoricalSettingWidgetWithReset()
self._track_widget._build_content()
with ui.VStack(height=5):
ui.Line(name="group_line", alignment=ui.Alignment.CENTER)
self.frame = FemaleEntertainerWidger()
self.frame._build_glyph()
with ui.HStack(height=0):
ui.Line(name="group_line", alignment=ui.Alignment.CENTER)
with ui.VStack(height=20):
ui.Label("Mix Your Voice Style", style=BigLableStyle)
self.frame._build_content()
self._btn_loop = ButtonComposing()
self._btn_loop._build_widget()
with ui.HStack(height=WAVEFORM_HEIGHT):
self._timeline_widget = TimelineWidget()
self._timeline_widget._build_content()
ui.Spacer(width=4)
with ui.VStack(spacing=VSPACING, width=0):
self._timecode_widget = TimecodeWidget()
self._timecode_widget._build_content()
with ui.HStack():
self._btn_play = ButtonPlayPause()
self._btn_play._build_content()
self._btn_recorder = ButtonLocation()
self._btn_recorder._build_widget()
| 2,911 | Python | 38.351351 | 79 | 0.567846 |
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/settings.py | from typing import TypeVar
from pxr import Sdf
SettingType = TypeVar("SettingType", bound="SettingItem")
class SettingItem:
_val = None
_filename = None
_state = None
_mix_info = {
"duration": [],
"pitch": [],
"air": [],
"falsetto": [],
"tension": [],
"energy": [],
"mel": [],
}
def __init__(self, name):
self._name = name
self._init_fn = None
self._changed_fn = None
self._prim = None
self._default_val = None
self._org_default_val = None
self._initialized = False
def shutdown(self):
self._prim = None
def init(self, default_val=None, init_fn=None, changed_fn=None, prim=None):
self._init_fn = init_fn
self._changed_fn = changed_fn
self._prim = prim
self._default_val = self._check(default_val)
self._org_default_val = self._default_val
SettingItem._val = self._default_val # Required if set_val(val) will fail
if self._prim is not None and self._prim.HasAttribute(self.get_usd_attr_name()):
val = self._prim.GetAttribute(self.get_usd_attr_name()).Get()
else:
val = self._default_val
self.set_val(val, use_callback=True, use_init_fn=True)
self._initialized = True
def initialized(self):
return self._initialized
def get_name(self):
return self._name
def get_ui_name(self):
return self._name.replace("_", " ").title()
def get_usd_attr_name(self):
return f"state:setting_{self._name}"
def get_val(self):
if SettingItem._filename is not None:
SettingItem._state = False
return SettingItem._val
def get_default(self):
return self._default_val
def is_default(self):
return SettingItem._val == self._default_val
def set_val(self, val, use_callback=True, use_init_fn=False):
# val_checked = self._check(val)
# callback_fn = self._init_fn if use_init_fn else self._changed_fn
# val_prev = SettingItem._val
SettingItem._val = val
# if use_callback and callback_fn is not None:
# try:
# callback_fn(val_checked)
# except Exception as e:
# SettingItem._val = val_prev
# print(e)
# raise
# self._update_usd_prim_attr()
def set_default(self, default_val):
self._default_val = self._check(default_val)
def reset_default(self):
self._default_val = self._get_safe_default()
def reset(self):
self.set_val(self._default_val, use_callback=True, use_init_fn=False)
def get_usd_type(self):
raise NotImplementedError
def get_arr_usd_type(self):
raise NotImplementedError # Should be implemented in derived class
def to_arr_usd_data(self, arr):
raise NotImplementedError # Should be implemented in derived class
def from_arr_usd_data(self, arr, arr_len):
raise NotImplementedError # Should be implemented in derived class
def interpolate(self, val1, val2, alpha):
raise NotImplementedError # Should be implemented in derived class
def _update_usd_prim_attr(self):
if self._prim is not None and self._prim.IsValid():
if SettingItem._val is not None:
self._prim.CreateAttribute(self.get_usd_attr_name(), self.get_usd_type()).Set(SettingItem._val)
def _check(self, val):
return val
class CategoricalSetting(SettingItem):
def __init__(self, name, options=[], value=None):
self.options = options
self._value = value
super().__init__(name)
def init(self, default_val, init_fn, changed_fn, prim):
super().init(default_val, init_fn, changed_fn, prim)
def get_options(self):
if len(self._options) > 0:
SettingItem._filename = self._options[0]
return self._options
def set_options_and_keep(self, options):
self._options = options
# if SettingItem._val not in self._options:
# # log_warn(
# # f"Setting [{self.get_name()}]: Old value [{self._val}]
# # is not in the new list [{self._options}], resetting to default"
# # )
# self.reset_default()
# self.reset()
def set_options_and_reset(self, options):
self._options = options
self.reset_default()
self.reset()
def set_value(self, val):
self._value = val
SettingItem._filename = val
SettingItem._state = False
def get_value(self):
return self._value
def set_options_and_val(self, options, val):
self._options = options
self.reset_default()
self.set_value(val, use_callback=True, use_init_fn=False)
def get_index(self):
if self._value is not None:
BoolSetting._filename = self._value
return self._options.index(self._value)
else:
return None
def set_index(self, val_index):
val = self._options[val_index]
self.set_value(val)
def get_usd_type(self):
return Sdf.ValueTypeNames.String
def get_arr_usd_type(self):
return Sdf.ValueTypeNames.StringArray
def to_arr_usd_data(self, arr):
return list(arr)
def from_arr_usd_data(self, arr, arr_len):
return list(arr)
def interpolate(self, val1, val2, alpha):
return val1
def _get_safe_default(self):
if len(self._options) > 0:
return self._options[0]
else:
return None
def _check(self, val):
if val is None:
return self._get_safe_default()
if val not in self._options:
raise AttributeError(
f"Setting [{self.get_name()}]: value '{val}' is not in the list of options {self._options}"
)
return val
class BoolSetting(SettingItem):
def __init__(self, name):
super().__init__(name)
def init(self, default_val, init_fn, changed_fn, prim):
super().init(default_val, init_fn, changed_fn, prim)
def get_usd_type(self):
return Sdf.ValueTypeNames.Bool
def get_arr_usd_type(self):
return Sdf.ValueTypeNames.BoolArray
def to_arr_usd_data(self, arr):
return list(arr)
def from_arr_usd_data(self, arr, arr_len):
return list(arr)
def interpolate(self, val1, val2, alpha):
return val1
def toggle(self, use_callback=True):
pass
def get_state(self):
return SettingItem._state
def _get_safe_default(self):
return False
def _check(self, val):
if val is None:
return self._get_safe_default()
return bool(val)
| 6,831 | Python | 27.827004 | 111 | 0.580003 |
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/ui.py | import os
import pathlib
import json
import omni.kit.pipapi
from .scripts.ui import BoolSettingWidgetBase, SimpleWidget
from threading import Thread
from .styles import (
A2F_SERVER_TYPE,
AUDIO_FILE_TYPES,
BTN_HEIGHT,
BTN_WIDTH,
DATA_PATH,
EXT_ROOT,
LABEL_WIDTH,
WAVEFORM_HEIGHT,
ComboBoxStyle,
FileBrowseBtnStyle,
HandlePlaybackStyle,
HandleRecordingStyle,
HandleStreamingStyle,
BigLableStyle,
LargeBtnStyle,
LocationBtnStyle,
PauseBtnStyle,
PlayBtnStyle,
PlaybackSliderBackgroundStyle,
RangeRectRecordingStyle,
RangeRectStreamingStyle,
RangeRectStyle,
RangeStartSpacerStyle,
ScrollingFrameStyle,
SmallLableStyle,
StringFieldStyle,
TrackWaveformStyle,
)
from .instance import InstanceManagerBase
import omni.client
import omni.ui as ui
import numpy as np
import scipy.ndimage
os.environ["PATH"] += os.pathsep + os.path.join(EXT_ROOT, "dep/ffmpeg")
omni.kit.pipapi.install("pydub")
omni.kit.pipapi.install("requests")
from pydub import AudioSegment
import requests
from .requestData import GetData
class PathWidgetWithReset(InstanceManagerBase):
def __init__(self):
super().__init__()
self._lbl = None
self._field_model = None
self._field = None
self._browse_btn = None
self._browse_dialog = None
def _on_browse_selected(self, filename, dirname):
if self._field is not None:
self._settings.set_val(dirname, use_callback=True)
if self._browse_dialog is not None:
self._browse_dialog.hide()
self._field_model.set_value(self._settings.get_val())
def _on_browse_canceled(self, filename, dirname):
if self._browse_dialog is not None:
self._browse_dialog.hide()
def _on_browse(self):
if self._browse_dialog is None:
self._browse_dialog = omni.kit.window.filepicker.FilePickerDialog(
"Select Audio Directory",
allow_multi_selection=False,
apply_button_label="Select",
click_apply_handler=self._on_browse_selected,
click_cancel_handler=self._on_browse_canceled,
current_directory=str(pathlib.Path.home()),
enable_filename_input=False,
)
else:
self._browse_dialog.show()
self._browse_dialog.refresh_current_directory()
def _on_changed(self, val):
self._settings.set_val(val, use_callback=True)
self._field_model.set_value(self._settings.get_val())
def _on_begin_edit(self, *_):
pass
def _build_content(self):
with ui.VStack(height=28):
ui.Label("Import Your Score", style=BigLableStyle)
ui.Label("Support format: ufdata", style=SmallLableStyle)
with ui.HStack(height=20):
ui.Label("Score Root Path", width=LABEL_WIDTH)
value = self._settings.get_val()
self._field_model = StringFieldModel(value, self._on_changed)
self._field_model.add_begin_edit_fn(self._on_begin_edit)
self._field_model.set_value(self._settings.get_val())
self._field = ui.StringField(self._field_model, style=StringFieldStyle)
self._browse_btn = ui.Button(
width=BTN_WIDTH, image_height=BTN_HEIGHT, style=FileBrowseBtnStyle, clicked_fn=self._on_browse
)
class CategoricalSettingWidgetWithReset(InstanceManagerBase):
def __init__(self):
super().__init__()
self._lbl = None
self._combo_model = None
self._combo = None
self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update)
self._frame = None
def shutdown(self):
self._update_sub = None
self._lbl = None
if self._combo_model is not None:
self._combo_model.shutdown()
self._combo_model = None
self._combo = None
super().shutdown()
def _build_content(self):
self._frame = ui.HStack(height=20)
with self._frame:
self._lbl = ui.Label("Score Name", width=LABEL_WIDTH)
# # options: 列表数组
tracks = self._load_track_list(self.get_abs_track_root_path())
self._setting.set_options_and_keep(tracks)
options = self._setting.get_options()
cur_option = self._setting.get_index()
self._combo_model = ComboBoxMinimalModel(options, cur_option, self._on_changed)
if len(self._setting.get_options()) == 0 or self._setting.get_val() is None:
self._combo = None
ui.Label("No options")
else:
self._combo = ui.ComboBox(self._combo_model, style=ComboBoxStyle)
def _on_changed(self, val_index):
self._setting.set_index(val_index)
def _on_update(self, *_):
if self.get_abs_track_root_path():
tracks = self._load_track_list(self.get_abs_track_root_path())
if tracks != self._setting.get_options():
self._setting.set_options_and_keep(tracks)
if self._combo_model is not None:
if self._setting.get_val() is not None:
self._combo_model.set_index(self._setting.get_index())
if self._combo_model.get_options() != self._setting.get_options():
self._refresh()
def _load_track_list(self, path: str):
# path = path.replace("\\", "/")
if not self.is_folder(path):
print(f"Unable to load list of tracks from {path}")
return []
dir_files = self.list_folder(path)
return [x for x in dir_files if (os.path.splitext(x)[1] in AUDIO_FILE_TYPES)]
def is_folder(self, path):
result, entry = omni.client.stat(path)
# bitewise operation, folder flags is 4
return entry.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN
def list_folder(self, path):
items = []
# rstrip() 删除 string 字符串末尾的指定字符,默认为空白符,包括空格、换行符、回车符、制表符。
# path = path.rstrip("/")
result, entries = omni.client.list(path)
if result != omni.client.Result.OK:
return items
for en in entries:
# Skip if it is a folder
if en.flags & omni.client.ItemFlags.CAN_HAVE_CHILDREN:
continue
name = en.relative_path
items.append(name)
return items
def is_ov_path(path):
return A2F_SERVER_TYPE in path
def get_abs_track_root_path(self):
"""normpath if it is local path
for ov path not apply normpath
"""
path = self._setting.get_val()
# path = self._setting._val
# if not self.is_ov_path(path):
# if not os.path.isabs(path):
# path = os.path.abspath(os.path.join(PLAYER_DEPS_ROOT, path))
# return os.path.normpath(path).replace("\\", "/")
return path
def _changed_fn(self, model):
index = model.as_int
self._item_changed(None)
if not self._from_set_index:
if self._changed_callback_fn is not None:
self._changed_callback_fn(index)
def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes
self._build_content()
def _refresh(self):
if self._frame is not None:
self._frame.clear()
with self._frame:
self._build_content_wrapper()
class StringFieldModel(ui.AbstractValueModel):
def __init__(self, initial_value, changed_callback_fn=None):
super().__init__()
self._value = initial_value
self._changed_callback_fn = changed_callback_fn
self.add_end_edit_fn(self._end_edit_fn)
def shutdown(self):
self._changed_callback_fn = None
def get_value(self):
return self._value
def get_value_as_string(self):
return str(self._value)
def set_value(self, value):
self._value = value
self._value_changed()
def _end_edit_fn(self, model):
value = model.get_value()
if self._changed_callback_fn is not None:
self._changed_callback_fn(value)
class ComboBoxMinimalItem(ui.AbstractItem):
def __init__(self, text):
super().__init__()
self.model = ui.SimpleStringModel(text)
class ComboBoxMinimalModel(ui.AbstractItemModel):
def __init__(self, options, initial_index, changed_callback_fn=None):
super().__init__()
self._options = options
self._changed_callback_fn = changed_callback_fn
self._items = [ComboBoxMinimalItem(text) for text in self._options]
self._current_index = ui.SimpleIntModel()
if initial_index is not None:
self._current_index.as_int = initial_index
self._from_set_index = False
self._current_index.add_value_changed_fn(self._changed_fn)
def shutdown(self):
self._changed_callback_fn = None
self._current_index = None
self._items = None
def get_options(self):
return self._options
def get_item_children(self, item):
return self._items
def get_item_value_model(self, item, column_id):
if item is None:
return self._current_index
return item.model
def get_index(self):
return self._current_index.as_int
def set_index(self, index):
if index is not None:
if index >= 0 and index < len(self._items):
self._from_set_index = True
self._current_index.as_int = index
self._from_set_index = False
def _changed_fn(self, model):
index = model.as_int
self._item_changed(None)
if not self._from_set_index:
if self._changed_callback_fn is not None:
self._changed_callback_fn(index)
class FemaleEntertainerWidger(InstanceManagerBase):
list_array_name = []
list_array_id = []
list_array_float = []
list_array_avatar = []
def __init__(self):
self._btn_create_timedomain_pipeline = None
self._btn_create_audio_palyer = None
self._btn_create_a2f_core = None
self._btn_create_head_template = None
self._frame = None
self._female_entertainer_data = None
self._id = None
def shutdown(self):
self._btn_create_timedomain_pipeline = None
self._btn_create_audio_palyer = None
self._btn_create_a2f_core = None
self._btn_create_head_template = None
self._frame = None
self._female_entertainer_data = None
self._id = None
def _add_menu_item(self, *args, **kwargs):
editor_menu = omni.kit.ui.get_editor_menu()
self._menu_items.append(editor_menu.add_item(*args, **kwargs))
def _build_content(self):
if self._frame is None:
self._frame = ui.ScrollingFrame(height=ui.Percent(35), style=ScrollingFrameStyle)
self._frame.set_build_fn(self._build_fn)
self._frame.rebuild()
def _build_fn(self):
with self._frame:
with ui.VStack(spacing=5):
sliders = [self.create_ui_float_slider(i) for i in range(len(FemaleEntertainerWidger.list_array_name))]
if len(FemaleEntertainerWidger.list_array_name) > 0:
for i in range(len(FemaleEntertainerWidger.list_array_name)):
with ui.HStack(height=25):
IMAGE = FemaleEntertainerWidger.list_array_avatar[i]
ui.Image(IMAGE, width=25, height=25)
ui.Label(
f"{FemaleEntertainerWidger.list_array_name[i]}",
width=ui.Percent(8),
name="text",
)
sliders[i]()
else:
ui.Label("No Voiceseed Selected", alignment=ui.Alignment.CENTER)
def _build_glyph(self):
self._request_female_entertainer_data()
with ui.VStack(height=28):
ui.Label("Choose Your Voice Style (up to 10)", style=BigLableStyle)
ui.Label("Choose one or more voiceseeds to mix a voice", style=SmallLableStyle)
with ui.ScrollingFrame(height=ui.Percent(15)):
with ui.VGrid(column_width=200):
glyph_plus = ui.get_custom_glyph_code("${glyphs}/plus.svg")
if isinstance(self._female_entertainer_data["data"], list):
functions = [
self.create_female_entertainer_clicked(i) for i in range(len(self._female_entertainer_data["data"]))
]
for index in range(len(self._female_entertainer_data["data"])):
_name = self._female_entertainer_data["data"][index]["name_chn"]
_tooltip = self._female_entertainer_data["data"][index]["characteristic"]
with ui.HStack():
ui.Button(
f"{_name} {glyph_plus}",
style=LargeBtnStyle,
clicked_fn=functions[index],
tooltip=_tooltip
)
def _refresh(self):
if self._frame is not None:
self._frame.rebuild()
def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes
self._build_content()
def create_ui_float_slider(self, index):
def set_value(value, index):
value = round(value, 2)
FemaleEntertainerWidger.list_array_float[index] = value
def _delete_avatar():
del FemaleEntertainerWidger.list_array_name[index]
del FemaleEntertainerWidger.list_array_id[index]
del FemaleEntertainerWidger.list_array_avatar[index]
del FemaleEntertainerWidger.list_array_float[index]
self._refresh()
def _click_get_model_value():
IMAGE_DELETE = DATA_PATH + "/delete.svg"
slider = ui.FloatSlider(name="float_slider", min=0, max=1).model
slider.set_value(0.5)
FemaleEntertainerWidger.list_array_float[index] = 0.5
slider.add_value_changed_fn(lambda m: set_value(m.get_value_as_float(), index))
ui.Button(width=25, height=25, image_url=IMAGE_DELETE, clicked_fn=_delete_avatar)
return _click_get_model_value
def create_female_entertainer_clicked(self, index):
name = self._female_entertainer_data["data"][index]["name_chn"]
id = self._female_entertainer_data["data"][index]["id"]
avatar = self._female_entertainer_data["data"][index]["avatar"]
def _on_btn_create_female_entertainer_clicked():
if name not in FemaleEntertainerWidger.list_array_name:
FemaleEntertainerWidger.list_array_name.append(name)
FemaleEntertainerWidger.list_array_id.append(id)
FemaleEntertainerWidger.list_array_avatar.append(avatar)
FemaleEntertainerWidger.list_array_float.append([])
self._refresh()
return _on_btn_create_female_entertainer_clicked
def _request_female_entertainer_data(self):
self._female_entertainer_data = GetData._get_female_entertainer_data()
def _get_female_data():
_array = []
for i in range(len(FemaleEntertainerWidger.list_array_name)):
_array.append([])
_array[i] = [FemaleEntertainerWidger.list_array_id[i], FemaleEntertainerWidger.list_array_float[i]]
return _array
class ScalarSliderModel(ui.AbstractValueModel):
def __init__(self, initial_value, min_val, max_val, changed_callback_fn=None, fast_change=True):
super().__init__()
self._value = initial_value
self._min_val = min_val
self._max_val = max_val
self._changed_callback_fn = changed_callback_fn
self._fast_change = fast_change
if not self._fast_change:
self.add_end_edit_fn(self._end_edit_fn)
def shutdown(self):
self._changed_callback_fn = None
def get_value(self):
return self._value
def get_min(self):
return self._min_val
def get_max(self):
return self._max_val
def get_value_as_int(self):
return int(self._value)
def get_value_as_float(self):
return float(self._value)
def set_value(self, value):
self._value = value
self._value_changed()
if self._fast_change and self._changed_callback_fn is not None:
self._changed_callback_fn(self._value)
def set_field(self, value):
if value is not None:
self._value = value
self._value_changed()
def _end_edit_fn(self, model):
value = model.get_value()
if self._changed_callback_fn is not None:
self._changed_callback_fn(value)
class WaveformWidget(SimpleWidget):
def __init__(self, height):
super().__init__()
self._height = height
self._waveform_image_provider = None
self._waveform_image = None
self._canvas = None
self._canvas_width = 1024
self._canvas_height = WAVEFORM_HEIGHT
def shutdown(self):
self._waveform_image_provider = None
self._waveform_image = None
self._canvas = None
super().shutdown()
def update_track_waveform(self, track):
num_samples = track.get_num_samples()
width, height = self._canvas_width, self._canvas_height
ex_factor = 1
width_ex = width * ex_factor
shrink_factor = max(num_samples // width_ex, 1)
if 0:
volume = np.abs(track.data[::shrink_factor][:width_ex])
else:
if num_samples >= shrink_factor * width_ex:
volume = track.data[: shrink_factor * width_ex].reshape(width_ex, shrink_factor)
else:
tmp = np.zeros((shrink_factor * width_ex), np.float32)
tmp[:num_samples] = track.data
volume = tmp.reshape(width_ex, shrink_factor)
volume = np.abs(np.max(volume, axis=1))
# volume /= max(np.max(volume), 1e-8)
# dB logarithmic scale
if 0:
volume = np.maximum(volume, 1e-6)
volume = 20.0 * np.log10(volume / 1.0)
# [-50, 0] dB
volume = np.maximum((volume / 50.0) + 1.0, 0.0)
volume *= 0.7
canvas = np.zeros((height, width_ex, 4), dtype=np.uint8)
print("canvas.shape[1]======>", canvas.shape[1])
for x in range(canvas.shape[1]):
start = int(round((1.0 - volume[x]) * float(height) / 2))
end = int(round((1.0 + volume[x]) * float(height) / 2))
canvas[start:end, x, :] = [255, 255, 255, 130]
if start == end:
canvas[start: end + 1, x, :] = [255, 255, 255, 60]
if ex_factor > 1:
canvas = scipy.ndimage.zoom(canvas.astype(np.float32), (1, 1.0 / ex_factor, 1), order=1).astype(np.uint8)
self._canvas = canvas.flatten().tolist()
if self._waveform_image_provider is not None:
self._waveform_image_provider.set_bytes_data(self._canvas, [self._canvas_width, self._canvas_height])
def _build_content(self):
self._waveform_image_provider = ui.ByteImageProvider()
if self._canvas is not None:
self._waveform_image_provider.set_bytes_data(self._canvas, [self._canvas_width, self._canvas_height])
with ui.HStack():
self._waveform_image = ui.ImageWithProvider(
self._waveform_image_provider,
height=self._height,
style=TrackWaveformStyle,
fill_policy=ui.IwpFillPolicy.IWP_STRETCH,
)
class TimelineRangeWidget(InstanceManagerBase):
def __init__(self, height):
super().__init__()
self._height = height
self._rect_range_start = None
self._rect_range = None
def shutdown(self):
self._rect_range_start = None
self._rect_range = None
super().shutdown()
def set_rect_style(self, style):
if self._rect_range is not None:
self._rect_range.set_style(style)
def update_range_rect(self, range_start, range_end, track_len):
if self._rect_range_start is not None and self._rect_range is not None:
if track_len == 0:
start_perc = 0
rect_perc = 0
else:
start_perc = range_start / track_len * 100.0
rect_perc = (range_end - range_start) / track_len * 100.0
self._rect_range_start.width = ui.Percent(start_perc)
self._rect_range.width = ui.Percent(rect_perc)
def _build_content(self):
with ui.HStack(height=self._height):
self._rect_range_start = ui.Spacer(width=omni.ui.Percent(0), style=RangeStartSpacerStyle)
self._rect_range = ui.Rectangle(width=omni.ui.Percent(100), height=self._height, style=RangeRectStyle)
class PlaybackSliderWidget(SimpleWidget):
def __init__(self, height, on_changed_fn=None, on_changed_from_mouse_fn=None):
super().__init__()
self._height = height
self._on_changed_fn = on_changed_fn
self._on_changed_from_mouse_fn = on_changed_from_mouse_fn
self._max_value = 0.001
self._value = 0.0
self._handle_width = 1
self._pressed = False
self._mouse_catcher = None
self._slider_placer = None
self._handle = None
self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update)
def shutdown(self):
self._update_sub = None
self._on_changed_fn = None
self._on_changed_from_mouse_fn = None
self._max_value = 0.001
self._value = 0.0
self._pressed = False
self._mouse_catcher = None
self._slider_placer = None
self._handle = None
super().shutdown()
def set_value(self, value):
if self._pressed:
return # pressed mouse overrides external change of the value
self._value = value
if self._value < 0.0:
self._value = 0.0
elif self._value > self._max_value:
self._value = self._max_value
if self._on_changed_fn is not None:
self._on_changed_fn(self._value)
if self._max_value > 0:
rel_x_perc = self._value / self._max_value
self._set_slider_position(rel_x_perc)
elif self._max_value == 0:
self._set_slider_position(0)
def get_value(self):
return self._value
def set_max(self, max_value):
if max_value < 0:
raise ValueError("Playback Slider max value can't be less than zero")
self._max_value = max_value if max_value > 0 else 0.001
def set_handle_style(self, style):
if self._handle is not None:
self._handle.set_style(style)
def _set_slider_position(self, rel_x_perc):
if self._slider_placer is not None:
self._slider_placer.offset_x = ui.Percent(rel_x_perc * 100.0)
def _on_mouse_moved(self, x, y, _, btn):
if btn is True:
self._update_from_mouse(x)
def _on_mouse_pressed(self, x, y, btn, *args):
if btn == 0:
self._pressed = True
self._update_from_mouse(x)
def _on_mouse_released(self, x, y, btn, *args):
if btn == 0:
self._pressed = False
def _update_from_mouse(self, x):
if self._mouse_catcher is not None and self._slider_placer is not None:
rel_x = x - self._mouse_catcher.screen_position_x
if rel_x < 0:
rel_x = 0
elif rel_x >= self._mouse_catcher.computed_width:
rel_x = self._mouse_catcher.computed_width
rel_x_perc = rel_x / self._mouse_catcher.computed_width
self._set_slider_position(rel_x_perc)
self._value = self._max_value * rel_x_perc
if self._on_changed_fn is not None:
self._on_changed_fn(self._value)
def _build_content(self):
with ui.ZStack():
self._mouse_catcher = ui.Rectangle(
height=self._height,
style={
"background_color": 0x0,
"padding": 0,
"margin_width": 0,
"margin_height": 0,
"border_radius": 0,
"border_color": 0x0,
"border_width": 0,
},
mouse_moved_fn=self._on_mouse_moved,
mouse_pressed_fn=self._on_mouse_pressed,
mouse_released_fn=self._on_mouse_released,
)
with ui.HStack():
self._slider_placer = ui.Placer(draggable=False, stable_size=True)
with self._slider_placer:
with ui.HStack():
self._handle = ui.Rectangle(
width=self._handle_width, height=self._height, style=HandlePlaybackStyle
)
ui.Spacer()
def _on_update(self, *_):
if self._pressed:
if self._on_changed_from_mouse_fn is not None:
self._on_changed_from_mouse_fn(self._value)
class TimelineWidget(BoolSettingWidgetBase):
_frame = None
def __init__(self):
super().__init__()
self._waveform_widget = WaveformWidget(height=WAVEFORM_HEIGHT)
self._timeline_range_widget = TimelineRangeWidget(height=WAVEFORM_HEIGHT)
self._playback_slider_widget = PlaybackSliderWidget(
height=WAVEFORM_HEIGHT, on_changed_fn=None, on_changed_from_mouse_fn=self._on_changed
)
self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update)
def shutdown(self):
self._update_sub = None
self._waveform_widget.shutdown()
self._waveform_widget = None
self._timeline_range_widget.shutdown()
self._timeline_range_widget = None
self._playback_slider_widget.shutdown()
self._playback_slider_widget = None
# super().shutdown()
def set_style(self, style):
if style == "regular":
self._playback_slider_widget.set_handle_style(HandlePlaybackStyle)
self._timeline_range_widget.set_rect_style(RangeRectStyle)
elif style == "streaming":
self._playback_slider_widget.set_handle_style(HandleStreamingStyle)
self._timeline_range_widget.set_rect_style(RangeRectStreamingStyle)
elif style == "recording":
self._playback_slider_widget.set_handle_style(HandleRecordingStyle)
self._timeline_range_widget.set_rect_style(RangeRectRecordingStyle)
def update_track_waveform(self):
track = self._audio_player.get_track_ref()
self._waveform_widget.update_track_waveform(track)
def _build_content(self):
TimelineWidget._frame = ui.ZStack()
with TimelineWidget._frame:
ui.Rectangle(style=PlaybackSliderBackgroundStyle)
self._waveform_widget._build_content()
self._timeline_range_widget._build_content()
self._playback_slider_widget._build_content()
def _refresh(self):
if TimelineWidget._frame is not None:
TimelineWidget._frame.clear()
with TimelineWidget._frame:
self._build_content_wrapper()
def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes
self._build_content()
def _on_changed(self, t):
if self._track is not None:
track_len = self._track.get_length()
self._playback_slider_widget.set_max(track_len)
self._playback_slider_widget.set_value(t)
seek_sample = self._track.sec_to_sample(t)
self._audio_player.seek(seek_sample)
def _on_update(self, *_):
if self._track is not None and self._audio_player is not None:
self._pressed = False
track_len = self._track.get_length()
self._playback_slider_widget.set_max(track_len)
t = self._audio_player.get_current_time()
self._playback_slider_widget.set_value(t)
# if t == track_len and not self.boolSetting._state:
# self.boolSetting._state = True
# self._on_toggled()
class TimecodeWidget(BoolSettingWidgetBase):
def __init__(self):
super().__init__()
self.ts = None
self._timecode_lbl = None
self._timecode_tms_lbl = None
self._timecode_max_lbl = None
self._timecode_max_tms_lbl = None
self._button_play_pause = ButtonPlayPause()
self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update)
def shutdown(self):
self.ts = None
self._update_sub = None
self._timecode_lbl = None
self._timecode_tms_lbl = None
self._timecode_max_lbl = None
self._timecode_max_tms_lbl = None
# super().shutdown()
def _build_content(self):
with ui.HStack(height=22, style={"margin_width": 0}):
ui.Spacer()
self._timecode_lbl = ui.Label("0:00", width=0)
self._timecode_tms_lbl = ui.Label(".00", width=0, style={"color": 0x50FFFFFF})
ui.Label(" | ", style={"color": 0x70FFFFFF})
self._timecode_max_lbl = ui.Label("0:00", width=0)
self._timecode_max_tms_lbl = ui.Label(".00", width=0, style={"color": 0x50FFFFFF})
ui.Spacer()
def _set_timecode(self, t, m_sec_lbl, tms_lbl):
tmss = int(round(t * 100))
secs = tmss // 100
mins = secs // 60
secs_sub = secs % 60
tmss_sub = tmss % 100
m_sec_lbl.text = "{}:{:02d}".format(mins, secs_sub)
tms_lbl.text = ".{:02d}".format(tmss_sub)
if self.ts is not None and t == self.ts:
self._button_play_pause._update_from_state(is_playing=False)
else:
self.ts = t
def _on_update(self, *_):
if self._timecode_lbl is not None and self._timecode_tms_lbl is not None:
t = self._audio_player.get_current_time()
self._set_timecode(t, self._timecode_lbl, self._timecode_tms_lbl)
if self._timecode_max_lbl is not None and self._timecode_max_tms_lbl is not None and self._track is not None:
track_len = self._track.get_length()
self._set_timecode(track_len, self._timecode_max_lbl, self._timecode_max_tms_lbl)
class ButtonPlayPause(BoolSettingWidgetBase):
_btn = None
def __init__(self):
super().__init__()
def shutdown(self):
ButtonPlayPause._btn = None
super().shutdown()
def _build_widget(self):
with ui.HStack(width=BTN_WIDTH, height=30):
ButtonPlayPause._btn = ui.Button(width=BTN_WIDTH, style=PlayBtnStyle, tooltip="Play/Pause (P)")
ButtonPlayPause._btn.set_clicked_fn(self._on_toggled)
def _update_from_state(self, is_playing):
if ButtonPlayPause._btn is not None:
if is_playing is True:
ButtonPlayPause._btn.set_style(PauseBtnStyle)
else:
ButtonPlayPause._btn.set_style(PlayBtnStyle)
class ButtonComposing(BoolSettingWidgetBase):
def __init__(self):
super().__init__()
self._btn = None
self._compose_data = None
self._timeline_widget = TimelineWidget()
def shutdown(self):
self._btn = None
super().shutdown()
def _build_widget(self):
with ui.VStack():
self._btn = ui.Button('Synthesis your song', height=BTN_HEIGHT*2.5, tooltip="Synthesized Voice")
self._btn.set_clicked_fn(self._on_compound)
def _on_compound(self):
thread = Thread(target=self._request_compose_data)
thread.start()
def _update_from_state(self, is_looping):
if self._btn is not None:
self._btn.selected = is_looping
def _request_compose_data(self):
_array = FemaleEntertainerWidger._get_female_data()
path = os.path.join(self.boolSetting._val, self.boolSetting._filename)
files = {"file": open(path, "rb")}
mix_str = json.dumps(
{
"duration": _array,
"pitch": _array,
"air": _array,
"falsetto": _array,
"tension": _array,
"energy": _array,
"mel": _array,
},
)
data_dict = {"flag": 135, "is_male": 1, "mix_info": mix_str}
try:
self._btn.text = 'processing...'
res = GetData._get_compose_data(files, data_dict)
if res["code"] == 200:
r = requests.get(res["data"][-1]["audio"], stream=True)
if not os.path.exists(os.path.join(EXT_ROOT, "voice")):
os.makedirs(os.path.join(EXT_ROOT, "voice"))
memory_address_ogg = os.path.join(EXT_ROOT, "voice\\voice.ogg")
memory_address_wav = os.path.join(EXT_ROOT, "voice\\voice.wav")
with open(memory_address_ogg, "wb") as ace_music:
for chunk in r.iter_content(chunk_size=1024): # 1024 bytes
if chunk:
ace_music.write(chunk)
song = AudioSegment.from_ogg(memory_address_ogg)
song.export(memory_address_wav, format="wav")
self._load_track(memory_address_wav)
self._timeline_widget.update_track_waveform()
self._timeline_widget._refresh()
else:
print(res)
except BaseException as e:
print(e)
self._btn.text = 'Synthesis your song'
self._btn.set_style({})
class ButtonLocation(BoolSettingWidgetBase):
def __init__(self):
self._btn = None
def shutdown(self):
self._btn = None
super().shutdown()
def _build_widget(self):
with ui.HStack(width=BTN_WIDTH, height=30):
self._btn = ui.Button(width=BTN_WIDTH, style=LocationBtnStyle, tooltip="Locate the composite file")
self._btn.set_clicked_fn(self.get_location)
def get_location(self):
# memory_address为需要打开文件夹的路径
if not os.path.exists(os.path.join(EXT_ROOT, "voice")):
os.makedirs(os.path.join(EXT_ROOT, "voice"))
memory_address = os.path.join(EXT_ROOT, "voice")
os.startfile(memory_address)
def _update_from_state(self, recorder_enabled):
if self._btn is not None:
self._btn.selected = recorder_enabled
| 35,085 | Python | 36.848975 | 124 | 0.57774 |
timedomain-tech/Timedomain-Ai-Singer-Extension/exts/timedomain.ai.singer/timedomain/ai/singer/scripts/ui.py | from timedomain.ai.singer.instance import InstanceManagerBase
from timedomain.ai.singer.utils_io import read_file
import omni.ui as ui
import omni.kit.ui
import omni.kit.app
import omni.kit.window.filepicker
import omni.kit.pipapi
a2f_audio = omni.audio2face.player_deps.import_a2f_audio()
class Refreshable:
def __init__(self):
self.__need_refresh = False
self.__update_sub = (
omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self.__on_update)
)
def shutdown(self):
self.__update_sub = None
def refresh(self):
# We can't call self._refresh() directly, since it will clear the UI
# while the caller of this function could be that UI too
self.__need_refresh = True
def __on_update(self, *_):
if self.__need_refresh:
self.__need_refresh = False
self._refresh()
def _refresh(self): # Should be implemented in the derived class
raise NotImplementedError
class SimpleWidget(Refreshable):
def __init__(self):
super().__init__()
self._frame = None
def shutdown(self):
self._frame = None
super().shutdown()
def build(self):
self._frame = ui.VStack(height=0, spacing=0)
with self._frame:
self._build_content_wrapper()
def show(self):
if self._frame is not None:
self._frame.visible = True
def hide(self):
if self._frame is not None:
self._frame.visible = False
def enable(self):
if self._frame is not None:
self._frame.enabled = True
def disable(self):
if self._frame is not None:
self._frame.enabled = False
def clear(self):
if self._frame is not None:
self._frame.clear()
def _refresh(self):
if self._frame is not None:
self._frame.clear()
with self._frame:
self._build_content_wrapper()
def _build_content_wrapper(self): # Required for extra UI wrapers in intermediate dervied classes
self._build_content()
def _build_content(self): # Should be implemented in the derived class
raise NotImplementedError
class BoolSettingWidgetBase(InstanceManagerBase):
_track = None
_audio_player = a2f_audio.AudioPlayer(verbose=True)
def __init__(self):
super().__init__()
self._update_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(self._on_update)
def shutdown(self):
self._update_sub = None
BoolSettingWidgetBase._audio_player.pause()
BoolSettingWidgetBase._audio_player = None
super().shutdown()
def _build_content(self):
self._build_widget()
if self.boolSetting._state is not None:
self._update_from_state(self.boolSetting._state)
def _on_toggled(self):
self.boolSetting._state = not self.boolSetting._state
if self.boolSetting._state:
if self.boolSetting._val is not None and self.boolSetting._filename is not None:
BoolSettingWidgetBase._audio_player.play()
self._update_from_state(True)
self.boolSetting._state = True
else:
self._update_from_state(False)
BoolSettingWidgetBase._audio_player.pause()
self.boolSetting._state = False
else:
self._update_from_state(False)
BoolSettingWidgetBase._audio_player.pause()
def _load_track(self, track_fpath):
bytes_data = read_file(track_fpath)
track = a2f_audio.read_track_from_bytes(bytes_data)
BoolSettingWidgetBase._track = track
BoolSettingWidgetBase._audio_player.set_track(track)
def _on_update(self, *_):
if self.boolSetting._state:
self.boolSetting.toggle()
def _build_widget(self): # Should be implemented in the derived class
raise NotImplementedError
def _update_from_state(self): # Should be implemented in the derived class
raise NotImplementedError | 4,136 | Python | 30.340909 | 119 | 0.617505 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/extension.py | ############# omniverse import ##################
import omni.ext
import omni.ui as ui
import carb
import pxr
############# python import ##################
import asyncio
import os
import time
import random
import math
import json
import numpy as np
############# VRKitchen import ##################
from .param import *
# from .layout.house import House
from .layout.randomizer import Randomizer
from .layout.utils import add_semantics
from .layout.house_new import House as HouseNew
from .autotask.auto import AutoTasker
# from .autotask.auto_label import AutoLabeler
from .render.helper import CustomSyntheticDataHelper
###################### ui import ################
from .ui.indoorkit_ui_widget import TaskTypeComboboxWidget, CustomRecordGroup, CustomControlGroup, CustomBoolWidget, CustomSliderWidget, \
CustomSkySelectionGroup, CustomIdNotice, CustomPathButtonWidget, CustomRenderTypeSelectionGroup
from omni.kit.window.popup_dialog import MessageDialog
# Any class derived from `omni.ext.IExt` in top level module (defined in `python.modules` of `extension.toml`) will be
# instantiated when extension gets enabled and `on_startup(ext_id)` will be called. Later when extension gets disabled
# on_shutdown() is called.
class MyExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
print("[vrkitchen.indoor.kit] VRKitchen2.0-Indoor-Kit startup")
# set rendering settings:
carb.settings.get_settings().set_bool("/rtx/ecoMode/enabled", True)
FPS = 60.0
carb.settings.get_settings().set_bool("/app/runLoops/main/rateLimitEnabled", True)
carb.settings.get_settings().set_int("/app/runLoops/main/rateLimitFrequency", int( FPS))
# carb.settings.get_settings().set_int("/persistent/simulation/minFrameRate", int(FPS))
# stage and timeline
self.stage = omni.usd.get_context().get_stage()
pxr.UsdGeom.SetStageUpAxis(self.stage, pxr.UsdGeom.Tokens.y)
self.timeline = omni.timeline.get_timeline_interface()
# robot
self.franka = None
# self.auto_labeler = AutoLabeler(None)
self.task_type = None
# set up render
self.use_isosurface = False # use isosurface
self.render_folder = RENDER_ROOT
self.render_helper = CustomSyntheticDataHelper()
# build windows
self.build_setup_layout_window()
################################################################################################
######################################## Build omni ui window ##################################
################################################################################################
def build_setup_layout_window(self):
"""
Build a window to control/debug layout
"""
from .ui.style import julia_modeler_style
self._window = ui.Window("VRKitchen2.0-Indoor-Kit", width=390)
with self._window.frame:
self._window.frame.style = julia_modeler_style
with ui.ScrollingFrame():
with ui.VStack(height=0):
# ui.Button("Debug", clicked_fn = self.debug)
self.task_desc_ui = ui.StringField(height=20, style={ "margin_height": 2})
self.task_desc_ui.model.set_value(" Welcome to VRKitchen2.0 Indoor Kit!")
ui.Spacer(height=10)
ui.Line(style_type_name_override="HeaderLine")
self.task_layout_collapse_ui = ui.CollapsableFrame("TASK LAYOUT", build_header_fn=self._build_custom_frame_header)
# self.task_layout_collapse_ui.set_collapsed_changed_fn(lambda x:self.on_task_layout_ui_collapse(x))
with self.task_layout_collapse_ui:
with ui.VStack(height=0, spacing=0):
ui.Line(style_type_name_override="HeaderLine")
ui.Spacer(height = 12)
with ui.HStack(height=30):
# set up tasks
self.task_types = TASK_TYPES
# ui.Label(" Task type: ", width = 30, style={ "margin": 2 , "color": "cornflowerblue", "font_size":18})
# default_task_index = self.task_types.index("pickup_object")
# self.task_type_ui = ui.ComboBox(default_task_index, width = 200, *self.task_types, style={ "margin": 8, "color": "cornflowerblue", "font_size":18})
self.task_type_ui = TaskTypeComboboxWidget(label="Task type:\t", options=self.task_types, on_restore_fn=self.fill_task_info)
# ui.Button(" + ", clicked_fn=self.auto_next_task, width = 20, style={ "margin_height": 8})
# ui.Button("+ object id", clicked_fn=self.auto_next_obj_only, style={ "margin": 8})
self.annotators = ANNOTATORS
ui.Label(" Annotator: ", width = 30, style={ "font_size": 12 , "color": "PowderBlue"}, visible = False)
annotator_index = ANNOTATORS.index("MyLuckyUser")
self.annotator_ui = ui.ComboBox(annotator_index, width = 100, *self.annotators, style={ "margin_height": 8, "font_size": 12, "color": "PowderBlue" }, visible=False)
# self.auto_suggest.annotator_ui = self.annotator_ui
with ui.HStack(height=30):
with ui.HStack():
ui.Label("\tObject id: ", width=30, style={"color": "DarkSalmon"})
self.task_id_ui = omni.ui.IntField(width = 30, name = "choose_id", style={ "color": "DarkSalmon"})
ui.Button("+", width = 30, style={"margin_height": 8, "color": "DarkSalmon", "border_color": 1, "border_width": 1},
clicked_fn=lambda: self.task_id_ui.model.set_value(min(self.task_id_ui.model.get_value_as_int() + 1, 19)))
ui.Button("-", width = 30, style={ "margin_height": 8, "color": "DarkSalmon", "border_color": 1, "border_width": 1},
clicked_fn=lambda: self.task_id_ui.model.set_value(max(self.task_id_ui.model.get_value_as_int() - 1, 0 )))
ui.Button("Add object", name = "add_button", clicked_fn=self.auto_add_obj, style={ "color": "DarkSalmon"})
ui.Label(" Object ", width=20, visible = False)
self.object_id_ui = omni.ui.IntField(height=20, width = 25, style={ "margin_height": 8 , "margin_width": 4}, visible = False)
self.object_id_ui.model.set_value(0)
ui.Button("+", width = 20, style={"margin_height": 8, "font_size": 12},
clicked_fn=lambda: self.object_id_ui.model.set_value(self.object_id_ui.model.get_value_as_int() + 1), visible = False)
ui.Button("-", width = 20, style={ "margin_height": 8, "font_size": 12},
clicked_fn=lambda: self.object_id_ui.model.set_value(self.object_id_ui.model.get_value_as_int() - 1), visible = False)
ui.Label(" Anchor:", width=20, visible = False)
self.anchor_id_ui = omni.ui.IntField(height=20, width = 25, style={ "margin_height": 8 , "margin_width": 4}, visible = False)
self.anchor_id_ui.model.set_value(0)
ui.Button("+", width = 20, style={"margin_height": 8, "font_size": 12},
clicked_fn=lambda: self.anchor_id_ui.model.set_value(self.anchor_id_ui.model.get_value_as_int() + 1), visible = False)
ui.Button("-", width = 20, style={ "margin_height": 8, "font_size": 12},
clicked_fn=lambda: self.anchor_id_ui.model.set_value(self.anchor_id_ui.model.get_value_as_int() - 1), visible = False)
ui.Label(" Robot:", width=20, visible = False)
self.robot_id_ui = omni.ui.IntField(height=20, width = 25, style={ "margin_height": 8 , "margin_width": 4}, visible = False)
ui.Button("+", width = 20, style={"margin_height": 8, "font_size": 12},
clicked_fn=lambda: self.robot_id_ui.model.set_value(self.robot_id_ui.model.get_value_as_int() + 1), visible = False)
ui.Button("-", width = 20, style={ "margin_height": 8, "font_size": 12},
clicked_fn=lambda: self.robot_id_ui.model.set_value(self.robot_id_ui.model.get_value_as_int() - 1), visible = False)
ui.Label("Mission ", width=20, visible = False)
self.mission_id_ui = omni.ui.IntField(height=20, width = 40, style={ "margin": 8 }, visible = False)
with ui.HStack():
ui.Label("\tHouse id: ", width = 30, style = { "color": "Plum", "font_size": 14})
self.house_id_ui = omni.ui.IntField(width = 30, name = "choose_id", style={"color": "Plum"})
self.house_id_ui.model.set_value(0)
ui.Button("+", width = 30, style={"margin_height": 8, "font_size": 14, "color": "Plum", "border_color": 1, "border_width": 1},
clicked_fn=lambda: self.house_id_ui.model.set_value(min(self.house_id_ui.model.get_value_as_int() + 1, 2)))
ui.Button("-", width = 30, style={ "margin_height": 8, "font_size": 14, "color": "Plum", "border_color": 1, "border_width": 1},
clicked_fn=lambda: self.house_id_ui.model.set_value(max(self.house_id_ui.model.get_value_as_int() - 1, 0)))
ui.Button("Add house", name = "add_button", clicked_fn=self.auto_add_house, style={ "color": "Plum"})
with ui.HStack(height=20, visible = False):
ui.Button("Add robot", clicked_fn=self.auto_add_robot, style={ "margin": 4})
ui.Button("Add mission", clicked_fn=self.auto_add_mission, style={ "margin": 4})
# ui.Label(" |", width=10)
with ui.HStack(height=20, visible = False):
ui.Button("Record object", name = "record_button", clicked_fn=self.record_obj_new, style={ "margin": 4})
ui.Button("Record robot", name = "record_button", clicked_fn=self.record_robot_new, style={ "margin": 4})
ui.Label(" |", width=10)
ui.Button("Record house", name = "record_button", clicked_fn=self.record_house_new, style={ "margin": 4})
with ui.HStack(height=20):
ui.Button("Record scene", height = 40, name = "record_button", clicked_fn=self.record_scene, style={ "margin": 4})
with ui.HStack(height=20, visible = False):
ui.Button("Load object", clicked_fn=self.load_obj_new, style={ "margin": 4})
ui.Button("Load robot", clicked_fn=self.load_robot_new, style={ "margin": 4})
# ui.Button("Load mission", clicked_fn=self.load_mission, style={ "margin": 4})
ui.Label(" |", width=10)
ui.Button("Load house", clicked_fn=self.load_house_new, style={ "margin": 4})
ui.Spacer(height = 10)
ui.Line(style_type_name_override="HeaderLine")
with ui.CollapsableFrame("SCENE UTILITY"):
with ui.VStack(height=0, spacing=4):
ui.Line(style_type_name_override="HeaderLine")
# open a new stage
ui.Button("New scene", height = 40, name = "load_button", clicked_fn=lambda : omni.kit.window.file.new(), style={ "margin": 4}, tooltip = "open a new empty stage")
# load recorded scene
ui.Button("Load scene", height = 40, name = "load_button", clicked_fn=self.load_scene, style={ "margin": 4})
# ground plan
CustomBoolWidget(label="Visible ground:", default_value=False, on_checked_fn = self.auto_add_ground)
# light intensity
CustomSliderWidget(min=0, max=3000, label="Light intensity:", default_val=1000, on_slide_fn = self.change_light_intensity)
# sky selection
CustomSkySelectionGroup(on_select_fn=self.randomize_sky)
# house material
CustomBoolWidget(label="Random house material:", default_value=False, on_checked_fn = self.randomize_material)
# water isosurface
CustomBoolWidget(label="Enable isosurface:", default_value=False, on_checked_fn = self.enable_isosurface)
# PLAY group
ui.Spacer(height = 10)
ui.Line(style_type_name_override="HeaderLine")
with ui.CollapsableFrame("PLAY"):
with ui.VStack(height=0, spacing=0):
ui.Line(style_type_name_override="HeaderLine")
ui.Spacer(height = 12)
# play and record
record_group = CustomRecordGroup(
on_click_record_fn=self.start_record,
on_click_stop_fn=self.stop_record,
on_click_replay_fn=self.replay_record,
)
# robot control
control_group = CustomControlGroup()
record_group.control_group = control_group
with ui.CollapsableFrame("Render"):
with ui.VStack(height=0, spacing=0):
CustomRenderTypeSelectionGroup(on_select_fn=self.set_render_type)
ui.Button("Capture image", height = 40, name = "tool_button", clicked_fn=self.render_an_image, style={ "margin": 4}, tooltip = "Capture current screenshot")
# PATH group
ui.Spacer(height = 10)
ui.Line(style_type_name_override="HeaderLine")
with ui.CollapsableFrame("PATH", collapsed = True):
with ui.VStack(height=0, spacing=0):
ui.Line(style_type_name_override="HeaderLine")
ui.Spacer(height = 12)
CustomPathButtonWidget(label="Task folder:", path=DATA_PATH_NEW)
CustomPathButtonWidget(label="Record folder:", path=SAVE_ROOT)
CustomPathButtonWidget(label="Render folder:", path=self.render_folder)
################################################################################################
######################################## Auto task labeling ####################################
################################################################################################
def fill_task_info(self, reset = False):
"""
Automatically (randomly fill task type, housing id, and object id)
:: params:
reset: if true, set all to zeros
"""
task_type_id = np.random.randint(len(self.task_types)) if not reset else 0
object_id = np.random.randint(20) if not reset else 0 # task id
house_id = np.random.randint(3) if not reset else 0 # house id
self.task_type_ui.model.get_item_value_model().set_value(task_type_id)
self.task_id_ui.model.set_value(object_id)
self.house_id_ui.model.set_value(house_id)
def init_auto_tasker(self):
"""
Initialize auto task labeling tool
"""
# update stage
self.stage = omni.usd.get_context().get_stage()
pxr.UsdGeom.SetStageUpAxis(self.stage, pxr.UsdGeom.Tokens.y)
task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int()
task_type = self.task_types[task_index]
task_id = self.task_id_ui.model.get_value_as_int()
robot_id = self.robot_id_ui.model.get_value_as_int()
anchor_id = self.anchor_id_ui.model.get_value_as_int()
mission_id = self.mission_id_ui.model.get_value_as_int()
house_id = self.house_id_ui.model.get_value_as_int()
# meta_id = self.meta_id_ui.model.get_value_as_int()
# FIXME: add annotator
# annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int()
annotator = "MyLuckyUser" # self.annotators[annotator_index]
self.auto_tasker = AutoTasker(task_type, task_id, robot_id, mission_id, house_id, anchor_id, annotator=annotator)
AutoTasker.TASK_DESCRIPTION = self.task_desc_ui.model.get_value_as_string()
def auto_next_obj_only(self):
"""
retrieve the next object index for current task
"""
# new scene
AutoTasker.new_scene()
global OBJ_INDEX
OBJ_INDEX = self.object_id_ui.model.get_value_as_int()
OBJ_INDEX += 1
self.object_id_ui.model.set_value(OBJ_INDEX)
self.init_auto_tasker()
self.auto_tasker.reconfig(OBJ_INDEX)
self.task_desc_ui.model.set_value(AutoTasker.TASK_DESCRIPTION)
def auto_next_task(self):
"""
next task
"""
task_id = self.task_id_ui.model.get_value_as_int()
self.task_id_ui.model.set_value(task_id + 1)
AutoTasker.new_scene()
self.init_auto_tasker()
self.auto_tasker.reconfig(0)
self.task_desc_ui.model.set_value(AutoTasker.TASK_DESCRIPTION)
def auto_next_task(self):
"""
next task
"""
task_id = self.task_id_ui.model.get_value_as_int()
self.task_id_ui.model.set_value(task_id + 1)
AutoTasker.new_scene()
self.init_auto_tasker()
self.auto_tasker.reconfig(0)
self.task_desc_ui.model.set_value(AutoTasker.TASK_DESCRIPTION)
def auto_add_obj(self):
self.init_auto_tasker()
if self.stage.GetPrimAtPath("/World/game"):
dialog = MessageDialog(
title="Add Object",
message=f"Already have `/World/game` in the scene. Please start a new stage.",
disable_cancel_button=True,
ok_handler=lambda dialog: dialog.hide()
)
dialog.show()
return
self.auto_tasker.add_obj()
# self.auto_tasker.build_HUD()
if self.stage.GetPrimAtPath("/World/game"):
self.task_desc_ui.model.set_value("Task object added!")
self.auto_add_robot()
def auto_add_robot(self):
self.init_auto_tasker()
self.auto_tasker.add_robot()
franka_prim = self.stage.GetPrimAtPath("/World/game/franka")
if franka_prim:
self.task_desc_ui.model.set_value("Feel free to move the robot, \nthen you can `Add house`")
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_prim_path_selected(franka_prim.GetPath().pathString, True, True, True, True)
viewport = omni.kit.viewport_legacy.get_viewport_interface()
viewport = viewport.get_viewport_window() if viewport else None
if viewport:
viewport.focus_on_selected()
else:
from omni.kit.viewport.utility import frame_viewport_selection
frame_viewport_selection(force_legacy_api=True)
def auto_add_house(self):
self.init_auto_tasker()
if self.stage.GetPrimAtPath("/World/layout"):
dialog = MessageDialog(
title="Add house",
message=f"Already have `/World/layout` in the scene. Please start a new stage.",
disable_cancel_button=True,
ok_handler=lambda dialog: dialog.hide()
)
dialog.show()
return
self.auto_tasker.add_house()
layout_prim = self.stage.GetPrimAtPath("/World/layout")
if layout_prim:
self.task_desc_ui.model.set_value("House added! Feel feel to move the /World/game and record scene.")
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_prim_path_selected("/World/game", True, True, True, True)
floor_prim = self.stage.GetPrimAtPath("/World/layout/floor")
def auto_add_mission(self):
self.init_auto_tasker()
self.auto_tasker.add_task()
################################################################################################
######################################## Modify Scene ##########################################
################################################################################################
def auto_add_ground(self, visible = False):
"""
Add ground to the scene
"""
self.stage = omni.usd.get_context().get_stage()
if not self.stage.GetPrimAtPath("/World/game"):
carb.log_error("Please add /World/game first!")
self.task_desc_ui.model.set_value(f"Please `Add Object`")
return
from .layout.modify import add_ground_plane
add_ground_plane(visiable=visible)
self.task_desc_ui.model.set_value(f"Add ground to scene (visible : {visible})")
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_prim_path_selected("/World/groundPlane", True, True, True, True)
def randomize_material(self, rand = True):
"""
Randomize house materials
"""
self.stage = omni.usd.get_context().get_stage()
if not self.stage.GetPrimAtPath("/World/layout"):
carb.log_error("Please add /World/layout (load scene) first!")
self.task_desc_ui.model.set_value(f"Please `Load Scene`")
return
self.randomizer = Randomizer()
self.randomizer.randomize_house(rand = rand)
self.task_desc_ui.model.set_value("Added floor/wall material")
def randomize_sky(self, sky_type = None):
"""
Randomize house materials
"""
self.randomizer = Randomizer()
self.randomizer.randomize_sky(sky_type = sky_type)
self.task_desc_ui.model.set_value("Sky added.")
def randomize_light(self):
"""
Randomize house materials
"""
self.randomizer = Randomizer()
self.randomizer.randomize_light()
self.task_desc_ui.model.set_value("Random light")
def change_light_intensity(self, intensity):
"""
Change default light intensity
"""
self.stage = omni.usd.get_context().get_stage()
light_prim = self.stage.GetPrimAtPath("/World/defaultLight")
if not light_prim:
# Create basic DistantLight
omni.kit.commands.execute(
"CreatePrim",
prim_path="/World/defaultLight",
prim_type="DistantLight",
select_new_prim=False,
attributes={pxr.UsdLux.Tokens.angle: 1.0, pxr.UsdLux.Tokens.intensity: 1000},
create_default_xform=True,
)
light_prim = self.stage.GetPrimAtPath("/World/defaultLight")
light_prim.GetAttribute("intensity").Set(float(intensity))
def enable_isosurface(self, enable = False):
"""
enable isosurface for water scene
"""
self.use_isosurface = enable
dialog = MessageDialog(
title="Isosurface",
message=f"Enabled iso surface: {self.use_isosurface} \n Please a [New Scene] and [Load Scene] for water task again.",
disable_cancel_button=True,
ok_handler=lambda dialog: dialog.hide()
)
dialog.show()
################################################################################################
######################################## Load / Record #########################################
################################################################################################
def init_new_house(self):
"""
Initiate HouseNew for recording/loading task info
"""
task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int()
task_type = self.task_types[task_index]
task_id = self.task_id_ui.model.get_value_as_int()
robot_id = self.robot_id_ui.model.get_value_as_int()
anchor_id = self.anchor_id_ui.model.get_value_as_int()
mission_id = self.mission_id_ui.model.get_value_as_int()
house_id = self.house_id_ui.model.get_value_as_int()
annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int()
annotator = self.annotators[annotator_index]
self.house = HouseNew(task_type, task_id, robot_id, mission_id, house_id, anchor_id, annotator)
# self.house.build_HUD()
# print("robot", self.house.robot_id)
def record_scene(self):
"""
Record obj + robot + house
"""
self.init_new_house()
self.house.record_obj_info()
self.house.record_robot_info()
self.house.record_house_info()
self.task_desc_ui.model.set_value("Scene recorded! Please start a new empty scene [Load scene] \n Note: you don't have to save the current stage.")
dialog = MessageDialog(
title="Scene Recorded",
message=f"Scene recorded! \nPlease start a [New scene] and then [Load scene] \nNote: you don't have to save the current stage.",
disable_cancel_button=True,
ok_handler=lambda dialog: dialog.hide()
)
dialog.show()
def record_obj_new(self):
"""
New pipeline to record game objects
"""
self.init_new_house()
self.house.record_obj_info()
self.task_desc_ui.model.set_value("object location recorded!")
def record_robot_new(self):
"""
New pipeline to record game robots
"""
self.init_new_house()
self.house.record_robot_info()
# if BaseChecker.SUCCESS_UI:
# BaseChecker.SUCCESS_UI.model.set_value("robot id (robot variation) recorded")
self.task_desc_ui.model.set_value("robot location recorded!")
def record_house_new(self):
self.init_new_house()
self.house.record_house_info()
# if BaseChecker.SUCCESS_UI:
# BaseChecker.SUCCESS_UI.model.set_value("house-anchor recorded")
self.task_desc_ui.model.set_value("game location in house recorded!")
def load_scene(self):
"""
Load obj + robot + house
"""
self.stage = omni.usd.get_context().get_stage()
pxr.UsdGeom.SetStageUpAxis(self.stage, pxr.UsdGeom.Tokens.y)
if self.stage.GetPrimAtPath("/World/game"):
dialog = MessageDialog(
title="Load scene",
message=f"Already have `/World/game` in the scene. Please start a new stage.",
disable_cancel_button=True,
ok_handler=lambda dialog: dialog.hide()
)
dialog.show()
return
dialog = MessageDialog(
title="Loading scene ......",
message=f"Please wait ......",
disable_cancel_button=True,
ok_handler=lambda dialog: dialog.hide()
)
dialog.show()
self.load_obj_new()
self.load_robot_new()
self.load_house_new()
# focus on game
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_prim_path_selected("/World/game", True, True, True, True)
viewport = omni.kit.viewport_legacy.get_viewport_interface()
viewport = viewport.get_viewport_window() if viewport else None
if viewport:
viewport.focus_on_selected()
else:
from omni.kit.viewport.utility import frame_viewport_selection
frame_viewport_selection(force_legacy_api=True)
selection.clear_selected_prim_paths()
dialog.hide()
dialog2 = MessageDialog(
title="Loading scene ......",
message=f"Loading scene complete!",
disable_cancel_button=True,
ok_handler=lambda dialog2: dialog2.hide()
)
dialog2.show()
def load_obj_new(self):
"""
New pipeline to load game objs
"""
stage = omni.usd.get_context().get_stage()
default_prim_path = stage.GetDefaultPrim().GetPath()
if default_prim_path.pathString == '':
# default_prim_path = pxr.Sdf.Path('/World')
root = pxr.UsdGeom.Xform.Define(stage, "/World").GetPrim()
stage.SetDefaultPrim(root)
default_prim_path = stage.GetDefaultPrim().GetPath()
self.init_new_house()
self.house.load_obj_info(relative=True)
task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int()
task_type = self.task_types[task_index]
# fix linear joint scale
if task_type in ["open_drawer","open_cabinet", "open_door", \
"close_drawer", "close_cabinet", "close_door", "tap_water"]:
if task_type in ["open_door", "close_door"]:
self.fix_linear_joint(fix_driver=True, damping_cofficient=1000)
elif task_type in ["tap_water"]:
self.fix_linear_joint(fix_driver=True, damping_cofficient=100)
else:
self.fix_linear_joint(fix_driver=True, damping_cofficient=10)
if task_type in ["pour_water", "transfer_water", "tap_water"]:
self.add_liquid_to_cup(task_type, self.use_isosurface)
def load_robot_new(self):
"""
New pipeline to load robots objs
"""
self.is_initial_setup = False
self.init_new_house()
self.setup_robot(new_method=True)
franka_prim = omni.usd.get_context().get_stage().GetPrimAtPath("/World/game/franka")
if franka_prim:
add_semantics(franka_prim, "franka")
def load_house_new(self):
self.stage = omni.usd.get_context().get_stage()
self.init_new_house()
self.load_house_successful = self.house.load_house_info()
# if load house successfully, randomize sky, floor, and wall
if self.load_house_successful:
floor_prim = self.stage.GetPrimAtPath("/World/layout/floor")
if floor_prim:
add_semantics(floor_prim, "floor")
furniture_prim = self.stage.GetPrimAtPath("/World/layout/furniture")
if furniture_prim:
add_semantics(furniture_prim, "furniture")
wall_prim = self.stage.GetPrimAtPath("/World/layout/roomStruct")
if wall_prim:
add_semantics(wall_prim, "wall")
# from .layout.randomizer import Randomizer
# if not hasattr(self, "house_randomizer"):
# self.house_randomizer = Randomizer(None)
# self.house_randomizer.randomize_house(randomize_floor=True, randomize_wall=True)
# if IS_IN_CREAT:
# self.house_randomizer.randomize_sky()
self.randomize_material(rand=True)
# self.randomize_sky(sky_type="")
################################################################################################
######################################## Second window #########################################
################################################################################################
# pass
###################################################################################
################################ Robot ######################################
###################################################################################
def setup_robot(self, new_method = False):
"""
Set up robot in the currect example
"""
# get the game xform as the parent for the robot
self.stage = omni.usd.get_context().get_stage()
#game_xform = self.stage.GetPrimAtPath("/World/game")
robot_parent_path = "/World/game"
has_game_xform = True
if not self.stage.GetPrimAtPath(robot_parent_path):
has_game_xform = False
xform_game = pxr.UsdGeom.Xform.Define(self.stage, robot_parent_path)
xform_game.AddTranslateOp().Set(pxr.Gf.Vec3f(0.0, 0.0, 0.0))
xform_game.AddOrientOp().Set(pxr.Gf.Quatf(1.0, 0.0, 0.0, 0.0))
xform_game.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0))
# retreive timeline
# _timeline = omni.timeline.get_timeline_interface()
# _timeline.play() # default not playing
if not new_method:
# old method
# load json info from example
task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int()
task_type = self.task_types[task_index]
task_id = self.task_id_ui.model.get_value_as_int()
house_id = self.house_id_ui.model.get_value_as_int()
object_id = self.object_id_ui.model.get_value_as_int()
task_json = os.path.join(DATA_PATH_ROOT, "tasks", task_type, str(house_id), str(object_id), str(task_id) + ".json")
print("task json: ", task_json)
has_robot_info = False
if os.path.exists(task_json):
# raise Exception( "The json file at path {} provided wasn't found".format(room_layout_json) )
layout = json.load(open(task_json))
if "robot" in layout:
position = layout["robot"]["position"]
rotation = layout["robot"]["rotation"]
has_robot_info = True
# if there is no robot information / or no game_xform
if not has_robot_info or not has_game_xform:
carb.log_warn("Don't know the location/rotation for the robot")
position = [0,0,0]
rotation = [-0.5,0.5,0.5,0.5]
# new robot loading method
else:
#from .layout.house_new import HouseNew
self.init_new_house()
position, rotation = self.house.load_robot_info()
# print("position, rotation ", np.array(position), np.array(rotation))
if False: # (not self.is_initial_setup) and IS_IN_ISAAC_SIM:
# target_path = "/World/game/mobility_Bottle_3618"
target_path = None
for target_prim in self.stage.GetPrimAtPath("/World/game").GetChildren():
if "mobility" in target_prim.GetPath().pathString:
target_path = target_prim.GetPath().pathString
if target_path is None:
raise Exception("Must have a game object with mobility in the scene")
# self.franka = FrankabotKeyboard()
self.franka = FrankabotGamePad(target_path, position=np.array(position), rotation=np.array(rotation), parent_path=robot_parent_path)
else:
franka_path = os.path.join(ROBOT_PATH, "franka/franka.usd")
# load robot
robot_prim = self.stage.GetPrimAtPath(robot_parent_path + "/franka")
if not robot_prim.IsValid():
robot_prim = self.stage.DefinePrim(robot_parent_path + "/franka")
success_bool = robot_prim.GetReferences().AddReference(franka_path)
if not success_bool:
raise Exception("The usd file at path {} provided wasn't found".format(franka_path))
# set robot xform
# robot_xform = pxr.UsdGeom.Xformable.Get(self.stage, robot_prim.GetPath())
# print("position $ rotation: ", position[0], position[1], position[2], rotation)
robot_xform_mat = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \
pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(float(rotation[0]), float(rotation[1]), float(rotation[2]), float(rotation[3]))) * \
pxr.Gf.Matrix4d().SetTranslate([float(position[0]), float(position[1]), float(position[2])])
omni.kit.commands.execute(
"TransformPrimCommand",
path=robot_prim.GetPath().pathString,
new_transform_matrix=robot_xform_mat,
)
# robot_xform.AddTranslateOp().Set(pxr.Gf.Vec3f(float(position[0]), float(position[1]), float(position[2])))
# robot_xform.AddOrientOp().Set(pxr.Gf.Quatf(float(rotation[0]), float(rotation[1]), float(rotation[2]), float(rotation[3])))
# robot_xform.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0))
# selection = omni.usd.get_context().get_selection()
# selection.clear_selected_prim_paths()
# selection.set_prim_path_selected(robot_parent_path + "/franka", True, True, True, True)
# setup physics
from pxr import PhysxSchema, UsdPhysics
physicsScenePath = "/World/physicsScene"
scene = UsdPhysics.Scene.Get(self.stage, physicsScenePath)
if not scene:
scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath)
self._gravityDirection = pxr.Gf.Vec3f(0.0, -1.0, 0.0)
self._gravityMagnitude = 981
scene.CreateGravityDirectionAttr().Set(self._gravityDirection)
scene.CreateGravityMagnitudeAttr().Set(self._gravityMagnitude)
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim())
physxSceneAPI.CreateEnableCCDAttr().Set(True)
physxSceneAPI.GetTimeStepsPerSecondAttr().Set(60)
physxSceneAPI.CreateEnableGPUDynamicsAttr().Set(True)
physxSceneAPI.CreateEnableEnhancedDeterminismAttr().Set(True)
physxSceneAPI.CreateEnableStabilizationAttr().Set(True)
def fix_linear_joint(self, fix_driver = True, damping_cofficient = 1):
"""
Fix the linear joint limit when scaling an object
"""
self.stage = omni.usd.get_context().get_stage()
prim_list = self.stage.TraverseAll()
for prim in prim_list:
if "joint_" in str(prim.GetPath()):
if fix_driver:
# find linear drive
joint_driver = pxr.UsdPhysics.DriveAPI.Get(prim, "linear")
if joint_driver:
joint_driver.CreateDampingAttr(damping_cofficient)
# find linear drive
joint_driver = pxr.UsdPhysics.DriveAPI.Get(prim, "angular")
if joint_driver:
joint_driver.CreateDampingAttr(damping_cofficient)
# find linear joint upperlimit
joint = pxr.UsdPhysics.PrismaticJoint.Get(self.stage, prim.GetPath())
if joint:
upper_limit = joint.GetUpperLimitAttr().Get() #GetAttribute("xformOp:translate").Get()
print(prim.GetPath(), "upper_limit", upper_limit)
mobility_prim = prim.GetParent().GetParent()
mobility_xform = pxr.UsdGeom.Xformable.Get(self.stage, mobility_prim.GetPath())
scale_factor = mobility_xform.GetOrderedXformOps()[2].Get()[0]
print("scale_factor", scale_factor)
joint.CreateUpperLimitAttr(upper_limit * scale_factor / 100)
###################################################################################
################################ Liquid ######################################
###################################################################################
def init_fluid_helper(self, use_isosurface = False):
from .layout.fluid.cup_setup import CupFluidHelper
# cup_id = 0 # self.cup_id_ui.model.get_value_as_int()
# r = self.r_ui.model.get_value_as_float()
# g = self.g_ui.model.get_value_as_float()
# b = self.b_ui.model.get_value_as_float()
self.cup_fluid_helper = CupFluidHelper(use_isosurface)
# def set_up_fluid_helper(self):
# # Fluid System setup
# self.init_fluid_helper()
# self.cup_fluid_helper.create()
def add_liquid_to_cup(self, task_type, use_isosurface = False):
self.init_fluid_helper(use_isosurface)
self.stage = omni.usd.get_context().get_stage()
game_prim = self.stage.GetPrimAtPath("/World/game")
enable_physics = True
if task_type == 'tap_water':
enable_physics = False
for prim in game_prim.GetChildren():
if "mobility_" in prim.GetPath().pathString and task_type in ["pour_water", "transfer_water"]:
self.cup_fluid_helper.modify_cup_scene(prim, add_liquid = True, set_physics = enable_physics)
elif "container_" in prim.GetPath().pathString:
self.cup_fluid_helper.modify_cup_scene(prim, add_liquid = False, set_physics = enable_physics)
###################################################################################
################################ Play and Record #############################
###################################################################################
def init_franka_tensor(self):
"""
Init franka tensor controller
"""
from .param import APP_VERION
assert APP_VERION >= "2022.0.0", "need Omniverse Isaac-Sim/Create in 2022"
task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int()
task_type = self.task_types[task_index]
task_id = self.task_id_ui.model.get_value_as_int()
# robot_id = self.robot_id_ui.model.get_value_as_int()
# mission_id = self.mission_id_ui.model.get_value_as_int()
house_id = self.house_id_ui.model.get_value_as_int()
# anchor_id = self.anchor_id_ui.model.get_value_as_int()
annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int()
annotator = ANNOTATORS[annotator_index]
root_dir = '-'.join([str(os.path.join(SAVE_ROOT, annotator, task_type)),str(task_id), str(house_id)])#, \
#str(robot_id), str(mission_id), str(house_id), str(anchor_id)])
traj_dir = os.path.join(root_dir, TRAJ_FOLDER)
# print("traj_dir", traj_dir)
from .robot_setup.franka_tensor import FrankaTensor
self.ft = FrankaTensor(save_path=traj_dir)
def stop_record(self):
"""
Stop recording button
"""
if not hasattr(self, "ft"):
self.timeline.stop()
carb.log_error( "please load layout and start recording first")
return
self.ft.is_record = False
self.ft.is_replay = False
self.timeline.stop()
self.task_desc_ui.model.set_value("Stop.")
def replay_record(self):
"""
Replay recording button
"""
self.init_franka_tensor()
self.ft.is_replay = True
self.ft.is_record = False
self.ft.load_record()
self.timeline.play()
self.task_desc_ui.model.set_value("Start replaying...")
def start_record(self):
"""
Play and record
"""
self.init_franka_tensor()
self.ft.is_replay = False
self.ft.is_record = True
import shutil
if os.path.exists(self.ft.save_path):
shutil.rmtree(self.ft.save_path)
os.makedirs(self.ft.save_path, exist_ok=True)
self.timeline.play()
self.task_desc_ui.model.set_value("Start recording...")
def set_render_type(self, render_type):
"""
Set up rendering type for current camera
"""
self.render_helper.reset()
self.render_helper.render_type = render_type
print("Setting render_type", self.render_helper.render_type)
def render_an_image(self):
"""
Render an image to render folder according render type
"""
task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int()
task_type = self.task_types[task_index]
task_id = self.task_id_ui.model.get_value_as_int()
house_id = self.house_id_ui.model.get_value_as_int()
self.render_helper.render_image(self.render_folder, prefix = f"{task_type}_{task_id}_{house_id}")
self.task_desc_ui.model.set_value("image captured!")
######################## ui ###############################
def _build_custom_frame_header(self, collapsed, text):
"""
When task layout ui collapse, show id notified for task, object, and house id
"""
if collapsed:
alignment = ui.Alignment.RIGHT_CENTER
width = 8
height = 8
else:
alignment = ui.Alignment.CENTER_BOTTOM
width = 8
height = 8
with ui.HStack():
ui.Spacer(width=8)
with ui.VStack(width=0):
ui.Spacer()
ui.Triangle(
style = {"Triangle": {"background_color": 0xDDDDDDDD}}, width=width, height=height, alignment=alignment
)
ui.Spacer()
ui.Spacer(width=8)
ui.Label(text, width = 100)
if collapsed:
self.id_note_ui = CustomIdNotice()
# print("on_task_layout_ui_collapse", task_block_collapsed)
self.id_note_ui.ui.visible = collapsed
task_index = self.task_type_ui.model.get_item_value_model().get_value_as_int()
task_type = self.task_types[task_index]
task_id = self.task_id_ui.model.get_value_as_int()
robot_id = self.robot_id_ui.model.get_value_as_int()
anchor_id = self.anchor_id_ui.model.get_value_as_int()
mission_id = self.mission_id_ui.model.get_value_as_int()
house_id = self.house_id_ui.model.get_value_as_int()
self.id_note_ui.task_ui.text = task_type
self.id_note_ui.object_ui.text = f"Object: {task_id}"
self.id_note_ui.house_ui.text = f"House: {house_id}"
############################# shot down #########################
def on_shutdown(self):
print("[vrkitchen.indoor.kit] VRKitchen2.0-Indoor-Kit shutdown")
############################# debug #############################
def debug(self):
"""
Debug
"""
print("debug") | 48,704 | Python | 45.697028 | 196 | 0.528129 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/param.py | import omni
import carb
import os
from pathlib import Path
EXTENSION_FOLDER_PATH = Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
ROOT = str(EXTENSION_FOLDER_PATH.parent.parent.resolve())
# ROOT = str(Path(__file__).parent.joinpath("../../../../../").resolve())
print("EXTENSION_FOLDER_PATH", EXTENSION_FOLDER_PATH, "ROOT", ROOT)
IS_IN_ISAAC_SIM = str(carb.settings.get_settings().get("/app/window/title")).startswith("Isaac Sim")
IS_IN_CREAT = str(carb.settings.get_settings().get("/app/window/title")).startswith("Create")
IS_IN_CODE = str(carb.settings.get_settings().get("/app/window/title")).startswith("Code")
APP_VERION = str(carb.settings.get_settings().get("/app/version"))
assert APP_VERION >= "2022.1.0", "Please start Isaac-Sim/Create/Code with version no small than 2022.1.0"
print("APP name: ", str(carb.settings.get_settings().get("/app/window/title")), APP_VERION)
# root = '/home/yizhou/Research/'
# root = '/home/vince/Documents/Research/'
# ROOT = '/home/nikepupu/Desktop' if IS_IN_ISAAC_SIM else 'E:/researches'
# Asset paths
ASSET_PATH = ROOT + "/exts/vrkitchen.indoor.kit/asset/"
SAPIEN_ASSET_PATH = ASSET_PATH + "/Sapien/"
HOUSE_INFO_PATH = ASSET_PATH + "/3DFront/"
CUSTOM_ASSET_PATH = ASSET_PATH + "/Custom/"
# STORAGE_ASSET_PATH = ROOT + "/asset/sapien_parsed/StorageFurniture/"
# Data path
DATA_PATH_ROOT = ROOT + "/data/"
DATA_PATH_NEW = DATA_PATH_ROOT + "/data_auto/"
SAVE_ROOT = DATA_PATH_ROOT + '/data_record/'
RENDER_ROOT = DATA_PATH_ROOT + '/data_render/'
#
ROBOT_PATH = ASSET_PATH + "Robot/"
ORIGINAL_IMAGES_FORLDER = "raw_images"
TRAJ_FOLDER = "trajectory"
DEPTH_IMAGES_FOLDER = "depth_images"
SEMANTIC_IMAGES_FOLDER = "semantic_images"
USE_ISO_SURFACE = False
#Annotator
ANNOTATORS = [
"MyLuckyUser",
]
# Task
TASK_TYPES = ["pickup_object","reorient_object", "pour_water",
"open_drawer"] # ,"open_cabinet", "put_object_into_box", "open_door", "transfer_water",
#"close_drawer", "close_cabinet", "close_door", "take_object_out_box"]
#Objects
OBJECT_TYPES = ["Bottle", "Box", "Door", "Faucet", "LightSwitch", "Microwave", "StorageFurniture"]
# Task objects
GAME_OBJ_NAMES = ["mobility", "switch", "SM_", "watercup", "fluid"]
CONTAINER_NAMES = ["box", "cup"]
OTHER_OBJ_NAMES = ["basin"]
# Physics
RIGIDBODY_OBJ_TYPES = ["Bottle", "SM_"] | 2,364 | Python | 32.309859 | 105 | 0.681895 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/render/helper.py | import math
import time
import typing
import asyncio
import carb
import omni
import numpy as np
from PIL import Image
import os
import omni.syntheticdata as syn
from omni.kit.window.popup_dialog import MessageDialog
class CustomSyntheticDataHelper:
def __init__(self):
# initialize syntheticdata extension
self.app = omni.kit.app.get_app_interface()
ext_manager = self.app.get_extension_manager()
if not ext_manager.is_extension_enabled("omni.syntheticdata"):
ext_manager.set_extension_enabled("omni.syntheticdata", True)
self.reset()
def reset(self):
# viewport
self.render_type = "Rgb"
# viewport = omni.kit.viewport_legacy.get_viewport_interface()
# viewport_handle = viewport.get_instance("Viewport")
from omni.kit.viewport.utility import get_active_viewport
self.viewport = get_active_viewport()
self.viewport_window = omni.kit.viewport.utility.get_viewport_from_window_name() # viewport.get_viewport_window(None)
self.timeline = omni.timeline.get_timeline_interface()
def render_image(self, export_folder = None, prefix = ""):
print("rendering image...")
self.stage = omni.usd.get_context().get_stage()
# get camera
# self.viewport_window.set_texture_resolution(*resolution)
camera_name = self.viewport_window.get_active_camera().pathString.replace("/","")
# set up export folder
if export_folder:
if not os.path.exists(export_folder):
os.makedirs(export_folder, exist_ok=True)
time_str = str(int(self.timeline.get_current_time() * self.stage.GetTimeCodesPerSecond()))
img_save_path = f"{export_folder}/{prefix}_{camera_name}_{self.render_type}_{time_str}.png"
# get render type
# synthetic_type = syn._syntheticdata.SensorType.Rgb
# if self.render_type == "Depth":
# synthetic_type = syn._syntheticdata.SensorType.DepthLinear
# elif self.render_type == "Semantic":
# synthetic_type = syn._syntheticdata.SensorType.SemanticSegmentation
# render and export
async def render_img():
# Render one frame
await omni.kit.app.get_app().next_update_async()
syn.sensors.enable_sensors(
self.viewport,
[
syn._syntheticdata.SensorType.Rgb,
syn._syntheticdata.SensorType.DepthLinear,
syn._syntheticdata.SensorType.SemanticSegmentation,
syn._syntheticdata.SensorType.InstanceSegmentation
],
)
# # await syn.sensors.initialize_async(self.viewport_window, [])
# await syn.sensors.next_sensor_data_async(self.viewport, True)
# if self.render_type == "Depth":
# from omni.syntheticdata.scripts.visualize import get_depth
# data = get_depth(self.viewport_window, mode = "linear")
# # print("img", data.shape)
# img = Image.fromarray(data.astype(np.uint8))
if self.render_type == "Depth":
await syn.sensors.next_sensor_data_async(self.viewport)
data = syn.sensors.get_depth_linear(self.viewport)
print("depthimg", data.shape)
img = Image.fromarray(data.astype(np.uint8))
elif self.render_type == "Semantic":
await syn.sensors.next_sensor_data_async(self.viewport)
data = syn.sensors.get_instance_segmentation(self.viewport, parsed = True)
img = Image.fromarray(data.astype(np.uint8))
else:
await syn.sensors.next_sensor_data_async(self.viewport)
data = syn.sensors.get_rgb(self.viewport)
print("img", data.shape, data.dtype)
img = Image.fromarray(data)
if export_folder:
img.save(img_save_path)
print("image saved at path: ", img_save_path)
dialog = MessageDialog(
title="Image capture",
message=f"Screenshot captured!",
disable_cancel_button=True,
ok_handler=lambda dialog: dialog.hide()
)
dialog.show()
asyncio.ensure_future(render_img())
| 4,492 | Python | 36.756302 | 126 | 0.585931 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/ui/style.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["julia_modeler_style"]
from omni.ui import color as cl
from omni.ui import constant as fl
from omni.ui import url
import omni.kit.app
import omni.ui as ui
import pathlib
EXTENSION_FOLDER_PATH = pathlib.Path(
omni.kit.app.get_app().get_extension_manager().get_extension_path_by_module(__name__)
)
ATTR_LABEL_WIDTH = 150
BLOCK_HEIGHT = 22
TAIL_WIDTH = 35
WIN_WIDTH = 400
WIN_HEIGHT = 930
# Pre-defined constants. It's possible to change them at runtime.
cl.window_bg_color = cl(0.2, 0.2, 0.2, 1.0)
cl.window_title_text = cl(.9, .9, .9, .9)
cl.collapsible_header_text = cl(.8, .8, .8, .8)
cl.collapsible_header_text_hover = cl(.95, .95, .95, 1.0)
cl.main_attr_label_text = cl(.65, .65, .65, 1.0)
cl.main_attr_label_text_hover = cl(.9, .9, .9, 1.0)
cl.multifield_label_text = cl(.65, .65, .65, 1.0)
cl.combobox_label_text = cl(.65, .65, .65, 1.0)
cl.field_bg = cl(0.18, 0.18, 0.18, 1.0)
cl.field_border = cl(1.0, 1.0, 1.0, 0.2)
cl.btn_border = cl(1.0, 1.0, 1.0, 0.4)
cl.slider_fill = cl(1.0, 1.0, 1.0, 0.3)
cl.revert_arrow_enabled = cl(.25, .5, .75, 1.0)
cl.revert_arrow_disabled = cl(.75, .75, .75, 1.0)
cl.transparent = cl(0, 0, 0, 0)
fl.main_label_attr_hspacing = 10
fl.attr_label_v_spacing = 3
fl.collapsable_group_spacing = 2
fl.outer_frame_padding = 15
fl.tail_icon_width = 15
fl.border_radius = 3
fl.border_width = 1
fl.window_title_font_size = 18
fl.field_text_font_size = 14
fl.main_label_font_size = 14
fl.multi_attr_label_font_size = 14
fl.radio_group_font_size = 14
fl.collapsable_header_font_size = 13
fl.range_text_size = 10
url.closed_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/closed.svg"
url.open_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/opened.svg"
url.revert_arrow_icon = f"{EXTENSION_FOLDER_PATH}/icons/revert_arrow.svg"
url.checkbox_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_on.svg"
url.checkbox_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/checkbox_off.svg"
url.radio_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_on.svg"
url.radio_btn_off_icon = f"{EXTENSION_FOLDER_PATH}/icons/radio_btn_off.svg"
url.diag_bg_lines_texture = f"{EXTENSION_FOLDER_PATH}/icons/diagonal_texture_screenshot.png"
####################### Indoor Kit ###########################################
# url.start_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/random.svg"
url.start_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/toolbar_play.svg"
url.replay_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/toolbar_replay.svg"
url.stop_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/toolbar_stop.svg"
url.pause_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/timeline_pause.svg"
url.pencil_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/pencil.svg"
url.open_folder_btn_on_icon = f"{EXTENSION_FOLDER_PATH}/icons/open_folder.svg"
# The main style dict
julia_modeler_style = {
"Button::tool_button": {
"background_color": cl.field_bg,
"margin_height": 8,
"margin_width": 6,
"border_color": cl.btn_border,
"border_width": fl.border_width,
"font_size": fl.field_text_font_size,
},
"CollapsableFrame::group": {
"margin_height": fl.collapsable_group_spacing,
"background_color": cl.transparent,
},
# TODO: For some reason this ColorWidget style doesn't respond much, if at all (ie, border_radius, corner_flag)
"ColorWidget": {
"border_radius": fl.border_radius,
"border_color": cl(0.0, 0.0, 0.0, 0.0),
},
"Field": {
"background_color": cl.field_bg,
"border_radius": fl.border_radius,
"border_color": cl.field_border,
"border_width": fl.border_width,
},
"Field::attr_field": {
"corner_flag": ui.CornerFlag.RIGHT,
"font_size": 2, # fl.field_text_font_size, # Hack to allow for a smaller field border until field padding works
},
"Field::attribute_color": {
"font_size": fl.field_text_font_size,
},
"Field::multi_attr_field": {
"padding": 4, # TODO: Hacky until we get padding fix
"font_size": fl.field_text_font_size,
},
"Field::path_field": {
"corner_flag": ui.CornerFlag.RIGHT,
"font_size": fl.field_text_font_size,
},
"HeaderLine": {"color": cl(.5, .5, .5, .5)},
"Image::collapsable_opened": {
"color": cl.collapsible_header_text,
"image_url": url.open_arrow_icon,
},
"Image::collapsable_opened:hovered": {
"color": cl.collapsible_header_text_hover,
"image_url": url.open_arrow_icon,
},
"Image::collapsable_closed": {
"color": cl.collapsible_header_text,
"image_url": url.closed_arrow_icon,
},
"Image::collapsable_closed:hovered": {
"color": cl.collapsible_header_text_hover,
"image_url": url.closed_arrow_icon,
},
"Image::radio_on": {"image_url": url.radio_btn_on_icon},
"Image::radio_off": {"image_url": url.radio_btn_off_icon},
"Image::revert_arrow": {
"image_url": url.revert_arrow_icon,
"color": cl.revert_arrow_enabled,
},
"Image::revert_arrow:disabled": {
"image_url": url.revert_arrow_icon,
"color": cl.revert_arrow_disabled
},
"Image::revert_arrow_task_type": {
"image_url": url.revert_arrow_icon,
"color": cl.revert_arrow_enabled,
},
"Image::revert_arrow_task_type:disabled": {
"image_url": url.pencil_btn_on_icon,
"color": cl.revert_arrow_disabled
},
"Image::open_folder": {
"image_url": url.open_folder_btn_on_icon,
"color": cl.revert_arrow_disabled
},
"Image::checked": {"image_url": url.checkbox_on_icon},
"Image::unchecked": {"image_url": url.checkbox_off_icon},
"Image::slider_bg_texture": {
"image_url": url.diag_bg_lines_texture,
"border_radius": fl.border_radius,
"corner_flag": ui.CornerFlag.LEFT,
},
"Label::attribute_name": {
"alignment": ui.Alignment.RIGHT_TOP,
"margin_height": fl.attr_label_v_spacing,
"margin_width": fl.main_label_attr_hspacing,
# "color": "lightsteelblue",
"font_size": fl.main_label_font_size,
},
"Label::attribute_name:hovered": {"color": cl.main_attr_label_text_hover},
"Label::collapsable_name": {"font_size": fl.collapsable_header_font_size},
"Label::multi_attr_label": {
"color": cl.multifield_label_text,
"font_size": fl.multi_attr_label_font_size,
},
"Label::radio_group_name": {
"font_size": fl.radio_group_font_size,
"alignment": ui.Alignment.CENTER,
"color": cl.main_attr_label_text,
},
"Label::range_text": {
"font_size": fl.range_text_size,
},
"Label::window_title": {
"font_size": fl.window_title_font_size,
"color": cl.window_title_text,
},
"ScrollingFrame::window_bg": {
"background_color": cl.window_bg_color,
"padding": fl.outer_frame_padding,
"border_radius": 20 # Not obvious in a window, but more visible with only a frame
},
"Slider::attr_slider": {
"draw_mode": ui.SliderDrawMode.FILLED,
"padding": 0,
"color": cl.transparent,
# Meant to be transparent, but completely transparent shows opaque black instead.
"background_color": cl(0.28, 0.28, 0.28, 0.01),
"secondary_color": cl.slider_fill,
"border_radius": fl.border_radius,
"corner_flag": ui.CornerFlag.LEFT, # TODO: Not actually working yet OM-53727
},
# Combobox workarounds
"Rectangle::combobox": { # TODO: remove when ComboBox can have a border
"background_color": cl.field_bg,
"border_radius": fl.border_radius,
"border_color": cl.btn_border,
"border_width": fl.border_width,
},
"ComboBox::dropdown_menu": {
"color": "lightsteelblue", # label color
"padding_height": 1.25,
"margin": 2,
"background_color": cl.field_bg,
"border_radius": fl.border_radius,
"font_size": fl.field_text_font_size,
"secondary_color": cl.transparent, # button background color
},
"Rectangle::combobox_icon_cover": {"background_color": cl.field_bg},
################## VRKitchen Indoor Kit ###############
"Field::choose_id": {
"margin": 8,
},
"Button::record_button": {
"background_color": cl.field_bg,
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 6,
"margin": 4,
"corner_flag": ui.CornerFlag.ALL,
},
"Button::load_button": {
"background_color": cl.field_bg,
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 10,
"margin": 4,
"corner_flag": ui.CornerFlag.ALL,
},
"Button::add_button": {
"background_color": cl.field_bg,
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 2,
"margin": 8,
"corner_flag": ui.CornerFlag.ALL,
},
"Button::control_button": {
"background_color": cl.field_bg,
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 4,
"margin": 2,
"corner_flag": ui.CornerFlag.ALL,
},
"Button::control_button_disabled": {
"background_color": cl(0.1, 0.7, 0.3, 0.4),
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 4,
"margin": 2,
"corner_flag": ui.CornerFlag.ALL,
},
"Button::control_button_pressed1": {
"background_color": cl( 0.7, 0.1, 0.3, 0.3),
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 4,
"margin": 2,
"corner_flag": ui.CornerFlag.ALL,
},
"Button::control_button_pressed2": {
"background_color": cl(0.1, 0.3, 0.7, 0.3),
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 4,
"margin": 2,
"corner_flag": ui.CornerFlag.ALL,
},
"Button::control_button_pressed3": {
"background_color": cl(0.7, 0.3, 0.7, 0.3),
"border_color": cl.btn_border,
"border_width": fl.border_width,
"border_radius": 4,
"margin": 2,
"corner_flag": ui.CornerFlag.ALL,
},
"Image::start_on": {
"image_url": url.start_btn_on_icon,
},
"Image::replay_on": {
"image_url": url.replay_btn_on_icon,
},
"Image::stop_on": {
"image_url": url.stop_btn_on_icon,
},
"Image::pause_on": {
"image_url": url.pause_btn_on_icon,
},
# "Image::radio_off": {"image_url": url.radio_btn_off_icon},
}
| 11,216 | Python | 33.943925 | 121 | 0.601373 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/ui/indoorkit_ui_widget.py | from typing import List, Optional
import omni
import omni.ui as ui
from .style import ATTR_LABEL_WIDTH, cl, fl
from .custom_base_widget import CustomBaseWidget
from ..robot_setup.controller import Controller
SPACING = 5
class TaskTypeComboboxWidget():
"""A customized combobox widget"""
def __init__(self,
model: ui.AbstractItemModel = None,
options: List[str] = None,
default_value=0,
on_restore_fn: callable = None,
**kwargs):
"""
Set up the take type combo box widget
::params:
:on_restore_fn: call when write/restore the widget
"""
self.__default_val = default_value
self.__options = options or ["1", "2", "3"]
self.__combobox_widget = None
self.on_restore_fn = on_restore_fn
# Call at the end, rather than start, so build_fn runs after all the init stuff
# CustomBaseWidget.__init__(self, model=model, **kwargs)
self.existing_model: Optional[ui.AbstractItemModel] = kwargs.pop("model", None)
self.revert_img = None
self.__attr_label: Optional[str] = kwargs.pop("label", "")
self.__frame = ui.Frame()
with self.__frame:
self._build_fn()
def destroy(self):
self.existing_model = None
self.revert_img = None
self.__attr_label = None
self.__frame = None
self.__options = None
self.__combobox_widget = None
@property
def model(self) -> Optional[ui.AbstractItemModel]:
"""The widget's model"""
if self.__combobox_widget:
return self.__combobox_widget.model
@model.setter
def model(self, value: ui.AbstractItemModel):
"""The widget's model"""
self.__combobox_widget.model = value
def _on_value_changed(self, *args):
"""Set revert_img to correct state."""
model = self.__combobox_widget.model
index = model.get_item_value_model().get_value_as_int()
self.revert_img.enabled = self.__default_val != index
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
# self.__combobox_widget.model.get_item_value_model().set_value(
# self.__default_val)
self.revert_img.enabled = False
if self.on_restore_fn:
self.on_restore_fn(True)
else:
self.revert_img.enabled = True
if self.on_restore_fn:
self.on_restore_fn(False)
def _build_body(self):
"""Main meat of the widget. Draw the Rectangle, Combobox, and
set up callbacks to keep them updated.
"""
with ui.HStack():
with ui.ZStack():
# TODO: Simplify when borders on ComboBoxes work in Kit!
# and remove style rule for "combobox" Rect
# Use the outline from the Rectangle for the Combobox
ui.Rectangle(name="combobox",
height=22)
option_list = list(self.__options)
self.__combobox_widget = ui.ComboBox(
0, *option_list,
name="dropdown_menu",
# Abnormal height because this "transparent" combobox
# has to fit inside the Rectangle behind it
height=10
)
# Swap for different dropdown arrow image over current one
with ui.HStack():
ui.Spacer() # Keep it on the right side
with ui.VStack(width=0): # Need width=0 to keep right-aligned
ui.Spacer(height=5)
with ui.ZStack():
ui.Rectangle(width=15, height=15, name="combobox_icon_cover")
ui.Image(name="collapsable_closed", width=12, height=12)
ui.Spacer(width=2) # Right margin
ui.Spacer(width=ui.Percent(5))
self.__combobox_widget.model.add_item_changed_fn(self._on_value_changed)
def _build_head(self):
"""Build the left-most piece of the widget line (label in this case)"""
ui.Label(
self.__attr_label,
width=80,
style = {"color": "lightsteelblue", "margin_height": 2, "alignment": ui.Alignment.RIGHT_TOP}
)
def _build_tail(self):
"""Build the right-most piece of the widget line. In this case,
we have a Revert Arrow button at the end of each widget line.
"""
with ui.HStack(width=0):
# ui.Spacer(width=5)
with ui.VStack(height=0):
ui.Spacer(height=3)
self.revert_img = ui.Image(
name="revert_arrow_task_type",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
width=12,
height=13,
enabled=False,
tooltip="randomly fill (or reset) task type, object id, and house id."
)
ui.Spacer(width=5)
# call back for revert_img click, to restore the default value
self.revert_img.set_mouse_pressed_fn(
lambda x, y, b, m: self._restore_default())
def _build_fn(self):
"""Puts the 3 pieces together."""
with ui.HStack():
self._build_head()
self._build_body()
self._build_tail()
class CustomRecordGroup:
STYLE = {
"Rectangle::image_button": {
"background_color": 0x0,
"border_width": 1.5,
"border_radius": 2.0,
"margin": 4,
"border_color": cl.btn_border,
"corner_flag": ui.CornerFlag.RIGHT,
},
"Rectangle::image_button:hovered": {
"background_color": 0xAAB8B8B8,
"border_width": 0,
"border_radius": 2.0,
},
"Rectangle::image_button:selected": {
"background_color": 0x0,
"border_width": 1,
"border_color": 0xFFC5911A,
"border_radius": 2.0,
},
}
def __init__(self, width = 60, height = 60,
on_click_record_fn: callable = None,
on_click_stop_fn: callable = None,
on_click_replay_fn: callable = None,
):
self.timeline = omni.timeline.get_timeline_interface()
self.on_click_record_fn = on_click_record_fn
self.on_click_stop_fn = on_click_stop_fn
self.on_click_replay_fn = on_click_replay_fn
# another ui for control
self.control_group : CustomControlGroup = None
self._selected = False
with ui.HStack():
with ui.HStack():
with ui.ZStack(width=0, height=0, spacing=0): #
with ui.Placer(offset_x=width, offset_y=0):
self.play_label = ui.Label("Record", width = 60)
with ui.Placer(offset_x=0, offset_y=0):
self.rect_play = ui.Rectangle(name="image_button", width=2 * width, height=height, style=CustomRecordGroup.STYLE)
with ui.Placer(offset_x=5, offset_y=5):
self.image_play = ui.Image(
name="start_on", width=width - 10, height=height - 10, fill_policy=ui.FillPolicy.STRETCH
)
self.rect_play.set_mouse_pressed_fn(lambda x, y, btn, a: self._on_mouse_pressed_play(btn))
with ui.ZStack(width=0, height=0, spacing=0): #
with ui.Placer(offset_x=width, offset_y=0):
self.stop_label = ui.Label("Stop", width = 60)
with ui.Placer(offset_x=0, offset_y=0):
self.rect_stop = ui.Rectangle(name="image_button", width=2 * width, height=height, style=CustomRecordGroup.STYLE)
with ui.Placer(offset_x=5, offset_y=5):
self.image_stop = ui.Image(
name="stop_on", width=width - 10, height=height - 10, fill_policy=ui.FillPolicy.STRETCH
)
self.rect_stop.set_mouse_pressed_fn(lambda x, y, btn, a: self._on_mouse_pressed_stop(btn)) #
with ui.HStack():
with ui.ZStack(width=0, height=0, spacing=0):
with ui.Placer(offset_x=width, offset_y=0):
self.replay_label = ui.Label("Replay", width = 60)
with ui.Placer(offset_x=0, offset_y=0):
self.rect_replay = ui.Rectangle(name="image_button", width= 2 * width, height=height, style=CustomRecordGroup.STYLE)
with ui.Placer(offset_x=10, offset_y=10):
self.image_replay = ui.Image(
name="replay_on", width=width - 20, height=height - 20, fill_policy=ui.FillPolicy.STRETCH
)
self.rect_replay.set_mouse_pressed_fn(lambda x, y, btn, a: self._on_mouse_pressed_replay(btn))
def __del__(self):
# set ui.Image objects to None explicitly to avoid this error:
# Client omni.ui Failed to acquire interface [omni::kit::renderer::IGpuFoundation v0.2] while unloading all plugins
self.image_play = None
def _on_mouse_pressed_play(self, key):
# 0 is for mouse left button
if key == 0:
if self.timeline.is_stopped(): # if stopped, start recording
self.play_label.text = "Pause"
self.image_play.name = "pause_on"
self.on_click_record_fn()
if self.control_group:
self.control_group.enable()
elif self.timeline.is_playing(): # if is playing, pause
self.play_label.text = "Continue"
self.image_play.name = "start_on"
self.timeline.pause()
else: # if is paused, just play
self.play_label.text = "Pause"
self.image_play.name = "pause_on"
self.timeline.play()
def _on_mouse_pressed_replay(self, key):
# 0 is for mouse left button
if key == 0:
if self.timeline.is_stopped(): # if stopped, start recording
self.replay_label.text = "Pause"
self.image_replay.name = "pause_on"
self.on_click_replay_fn()
elif self.timeline.is_playing(): # if is playing, pause
self.replay_label.text = "Continue"
self.image_replay.name = "replay_on"
self.timeline.pause()
else: # if is paused, just play
self.replay_label.text = "Pause"
self.image_replay.name = "pause_on"
self.timeline.play()
def _on_mouse_pressed_stop(self, key):
# print("press stop button", self.timeline.is_playing(), self.timeline.is_stopped())
# 0 is for mouse left button
if key == 0:
self.play_label.text = "Record"
self.image_play.name = "start_on"
self.replay_label.text = "Replay"
self.image_replay.name = "replay_on"
self.on_click_stop_fn()
if self.control_group:
self.control_group.disable()
@property
def selected(self):
return self._selected
@selected.setter
def selected(self, value):
self._selected = value
class CustomControlGroup():
def __init__(self) -> None:
self.collapse_frame = ui.CollapsableFrame("Robot control")
self.collapse_frame.collapsed = False
self.collapse_frame.enabled = True
# ui
with self.collapse_frame:
with ui.VStack(height=0, spacing=0):
with ui.HStack():
ui.Label("position control: ")
self.button_w = ui.Button("W", name = "control_button", tooltip = "move end factor forward")
self.button_s = ui.Button("S", name = "control_button", tooltip = "move end factor backward")
self.button_a = ui.Button("A", name = "control_button", tooltip = "move end factor to left")
self.button_d = ui.Button("D", name = "control_button", tooltip = "move end factor to right")
self.button_q = ui.Button("Q", name = "control_button", tooltip = "move end factor to down")
self.button_e = ui.Button("E", name = "control_button", tooltip = "move end factor to up")
with ui.HStack():
ui.Label("rotation control: ")
self.button_up = ui.Button("UP", name = "control_button", tooltip = "Rotate hand upward")
self.button_down = ui.Button("DOWN", name = "control_button", tooltip = "Rotate hand downard")
self.button_left = ui.Button("LEFT", name = "control_button", tooltip = "Rotate hand to left")
self.button_right = ui.Button("RIGHT", name = "control_button", tooltip = "Rotate hand to right")
with ui.HStack():
ui.Label("gripper control: ")
self.button_control = ui.Button("LEFT CTRL", name = "control_button", tooltip = "Close/Open gripper")
self.button_list = [self.button_w, self.button_s, self.button_a, self.button_d, self.button_q, self.button_e,
self.button_up, self.button_down, self.button_left, self.button_right,
]
self.button_w.set_clicked_fn(lambda : self._on_button("w"))
self.button_s.set_clicked_fn(lambda : self._on_button("s"))
self.button_a.set_clicked_fn(lambda : self._on_button("a"))
self.button_d.set_clicked_fn(lambda : self._on_button("d"))
self.button_q.set_clicked_fn(lambda : self._on_button("q"))
self.button_e.set_clicked_fn(lambda : self._on_button("e"))
self.button_up.set_clicked_fn(lambda : self._on_button("up", 2))
self.button_down.set_clicked_fn(lambda : self._on_button("down", 2))
self.button_left.set_clicked_fn(lambda : self._on_button("left", 2))
self.button_right.set_clicked_fn(lambda : self._on_button("right", 2))
self.button_control.set_clicked_fn(lambda: self._on_button_control())
self.disable()
def enable(self):
"""
Enable itself by showing the robot controling buttons
"""
self.collapse_frame.collapsed = False
self.collapse_frame.enabled = True
self.enable_buttons()
def disable(self):
"""
Disable itself by closing the robot controling buttons
"""
self.collapse_frame.collapsed = True
# self.collapse_frame.enabled = False
def disable_buttons(self):
for button in self.button_list:
button.name = "control_button_disabled"
# button.enabled = False
Controller.reset_movement()
def enable_buttons(self):
for button in self.button_list:
button.enabled = True
button.name = "control_button"
Controller.reset_movement()
def _on_button(self, attr_name:str, style = 1):
attr = getattr(Controller, attr_name)
# print("attr", attr_name, attr)
button = getattr(self, f"button_{attr_name}")
if attr:
setattr(Controller, attr_name, False)
button.name = "control_button"
self.enable_buttons()
else:
self.disable_buttons()
setattr(Controller, attr_name, True)
button.enabled = True
button.name = f"control_button_pressed{style}"
def _on_button_control(self):
if Controller.left_control:
Controller.left_control = False
self.button_control.text = "LEFT CTRL"
self.button_control.name = "control_button"
else:
Controller.left_control = True
self.button_control.text = "Gripper closed"
self.button_control.name = "control_button_pressed3"
class CustomBoolWidget(CustomBaseWidget):
"""A custom checkbox or switch widget"""
def __init__(self,
model: ui.AbstractItemModel = None,
default_value: bool = True,
on_checked_fn: callable = None,
**kwargs):
self.__default_val = default_value
self.__bool_image = None
self.on_checked_fn = on_checked_fn
# Call at the end, rather than start, so build_fn runs after all the init stuff
CustomBaseWidget.__init__(self, model=model, **kwargs)
def destroy(self):
CustomBaseWidget.destroy()
self.__bool_image = None
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
self.__bool_image.checked = self.__default_val
self.__bool_image.name = (
"checked" if self.__bool_image.checked else "unchecked"
)
self.revert_img.enabled = False
def _on_value_changed(self):
"""Swap checkbox images and set revert_img to correct state."""
self.__bool_image.checked = not self.__bool_image.checked
self.__bool_image.name = (
"checked" if self.__bool_image.checked else "unchecked"
)
self.revert_img.enabled = self.__default_val != self.__bool_image.checked
if self.on_checked_fn:
self.on_checked_fn(self.__bool_image.checked)
def _build_body(self):
"""Main meat of the widget. Draw the appropriate checkbox image, and
set up callback.
"""
with ui.HStack():
with ui.VStack():
# Just shift the image down slightly (2 px) so it's aligned the way
# all the other rows are.
ui.Spacer(height=2)
self.__bool_image = ui.Image(
name="checked" if self.__default_val else "unchecked",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
height=16, width=16, checked=self.__default_val
)
# Let this spacer take up the rest of the Body space.
ui.Spacer()
self.__bool_image.set_mouse_pressed_fn(
lambda x, y, b, m: self._on_value_changed())
NUM_FIELD_WIDTH = 50
SLIDER_WIDTH = ui.Percent(100)
FIELD_HEIGHT = 22 # TODO: Once Field padding is fixed, this should be 18
SPACING = 4
TEXTURE_NAME = "slider_bg_texture"
class CustomSliderWidget(CustomBaseWidget):
"""A compound widget for scalar slider input, which contains a
Slider and a Field with text input next to it.
"""
def __init__(self,
model: ui.AbstractItemModel = None,
num_type: str = "int",
min=0.0,
max=1.0,
default_val=0.0,
display_range: bool = False,
on_slide_fn: callable = None,
**kwargs):
self.__slider: Optional[ui.AbstractSlider] = None
self.__numberfield: Optional[ui.AbstractField] = None
self.__min = min
self.__max = max
self.__default_val = default_val
self.__num_type = num_type
self.__display_range = display_range
self.on_slide_fn = on_slide_fn
# Call at the end, rather than start, so build_fn runs after all the init stuff
CustomBaseWidget.__init__(self, model=model, **kwargs)
def destroy(self):
CustomBaseWidget.destroy()
self.__slider = None
self.__numberfield = None
@property
def model(self) -> Optional[ui.AbstractItemModel]:
"""The widget's model"""
if self.__slider:
return self.__slider.model
@model.setter
def model(self, value: ui.AbstractItemModel):
"""The widget's model"""
self.__slider.model = value
self.__numberfield.model = value
def _on_value_changed(self, *args):
"""Set revert_img to correct state."""
if self.__num_type == "float":
index = self.model.as_float
else:
index = self.model.as_int
self.revert_img.enabled = self.__default_val != index
if self.on_slide_fn:
self.on_slide_fn(index)
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
self.model.set_value(self.__default_val)
self.revert_img.enabled = False
def _build_display_range(self):
"""Builds just the tiny text range under the slider."""
with ui.HStack():
ui.Label(str(self.__min), alignment=ui.Alignment.LEFT, name="range_text")
if self.__min < 0 and self.__max > 0:
# Add middle value (always 0), but it may or may not be centered,
# depending on the min/max values.
total_range = self.__max - self.__min
# subtract 25% to account for end number widths
left = 100 * abs(0 - self.__min) / total_range - 25
right = 100 * abs(self.__max - 0) / total_range - 25
ui.Spacer(width=ui.Percent(left))
ui.Label("0", alignment=ui.Alignment.CENTER, name="range_text")
ui.Spacer(width=ui.Percent(right))
else:
ui.Spacer()
ui.Label(str(self.__max), alignment=ui.Alignment.RIGHT, name="range_text")
ui.Spacer(height=.75)
def _build_body(self):
"""Main meat of the widget. Draw the Slider, display range text, Field,
and set up callbacks to keep them updated.
"""
with ui.HStack(spacing=0):
# the user provided a list of default values
with ui.VStack(spacing=3, width=ui.Fraction(3)):
with ui.ZStack():
# Put texture image here, with rounded corners, then make slider
# bg be fully transparent, and fg be gray and partially transparent
with ui.Frame(width=SLIDER_WIDTH, height=FIELD_HEIGHT,
horizontal_clipping=True):
# Spacing is negative because "tileable" texture wasn't
# perfectly tileable, so that adds some overlap to line up better.
with ui.HStack(spacing=-12):
for i in range(50): # tiling the texture
ui.Image(name=TEXTURE_NAME,
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_CROP,
width=50,)
slider_cls = (
ui.FloatSlider if self.__num_type == "float" else ui.IntSlider
)
self.__slider = slider_cls(
height=FIELD_HEIGHT,
min=self.__min, max=self.__max, name="attr_slider"
)
if self.__display_range:
self._build_display_range()
with ui.VStack(width=ui.Fraction(1)):
model = self.__slider.model
model.set_value(self.__default_val)
field_cls = (
ui.FloatField if self.__num_type == "float" else ui.IntField
)
# Note: This is a hack to allow for text to fill the Field space more, as there was a bug
# with Field padding. It is fixed, and will be available in the next release of Kit.
with ui.ZStack():
# height=FIELD_HEIGHT-1 to account for the border, so the field isn't
# slightly taller than the slider
ui.Rectangle(
style_type_name_override="Field",
name="attr_field",
height=FIELD_HEIGHT - 1
)
with ui.HStack(height=0):
ui.Spacer(width=2)
self.__numberfield = field_cls(
model,
height=0,
style={
"background_color": cl.transparent,
"border_color": cl.transparent,
"padding": 4,
"font_size": fl.field_text_font_size,
},
)
if self.__display_range:
ui.Spacer()
model.add_value_changed_fn(self._on_value_changed)
class CustomSkySelectionGroup(CustomBaseWidget):
def __init__(self,
on_select_fn: callable = None
) -> None:
self.on_select_fn = on_select_fn
self.sky_type = ""
CustomBaseWidget.__init__(self, label = "Sky type:")
def _build_body(self):
with ui.HStack():
self.button_clear = ui.Button("Sunny", name = "control_button")
self.button_cloudy = ui.Button("Cloudy", name = "control_button")
self.button_overcast = ui.Button("Overcast", name = "control_button")
self.button_night = ui.Button("Night", name = "control_button")
self.button_clear.set_clicked_fn(lambda : self._on_button("clear"))
self.button_cloudy.set_clicked_fn(lambda : self._on_button("cloudy"))
self.button_overcast.set_clicked_fn(lambda : self._on_button("overcast"))
self.button_night.set_clicked_fn(lambda : self._on_button("night"))
self.button_list = [self.button_clear, self.button_cloudy, self.button_overcast, self.button_night]
def enable_buttons(self):
for button in self.button_list:
button.enabled = True
button.name = "control_button"
def _on_button(self, sky_type:str):
if self.on_select_fn:
self.on_select_fn(sky_type.capitalize())
self.enable_buttons()
button = getattr(self, f"button_{sky_type}")
button.name = f"control_button_pressed{2}"
self.revert_img.enabled = True
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
self.revert_img.enabled = False
self.enable_buttons()
self.on_select_fn("")
class CustomIdNotice():
def __init__(self) -> None:
self.ui = ui.HStack()
with self.ui:
ui.Spacer(width=4)
self.task_ui = ui.Button("pickup_object", name = "control_button", style = {"color": "lightsteelblue", "border_color": "lightsteelblue"}, enabled = False)
ui.Spacer(width=4)
self.object_ui = ui.Button("object: 0", name = "control_button", style = {"color": "DarkSalmon", "border_color": "DarkSalmon"}, enabled = False)
ui.Spacer(width=4)
self.house_ui = ui.Button("house: 1", name = "control_button", style = {"color": "Plum", "border_color": "Plum"}, enabled = False)
self.ui.visible = False
class CustomRenderTypeSelectionGroup(CustomBaseWidget):
def __init__(self,
on_select_fn: callable = None
) -> None:
self.on_select_fn = on_select_fn
self.sky_type = ""
CustomBaseWidget.__init__(self, label = "Render type:")
def _build_body(self):
with ui.HStack():
self.button_rgb = ui.Button("RGB", name = "control_button_pressed3")
self.button_depth= ui.Button("Depth", name = "control_button")
self.button_semantic = ui.Button("Semantic", name = "control_button")
self.button_rgb.set_clicked_fn(lambda : self._on_button("rgb"))
self.button_depth.set_clicked_fn(lambda : self._on_button("depth"))
self.button_semantic.set_clicked_fn(lambda : self._on_button("semantic"))
self.button_list = [self.button_rgb, self.button_depth, self.button_semantic]
def enable_buttons(self):
for button in self.button_list:
button.enabled = True
button.name = "control_button"
def _on_button(self, render_type:str):
if self.on_select_fn:
self.on_select_fn(render_type.capitalize())
self.enable_buttons()
button = getattr(self, f"button_{render_type}")
button.name = f"control_button_pressed{3}"
self.revert_img.enabled = True
def _restore_default(self):
"""Restore the default value."""
if self.revert_img.enabled:
self.revert_img.enabled = False
self.enable_buttons()
self._on_button("rgb")
import subprocess, os, platform
class CustomPathButtonWidget:
"""A compound widget for holding a path in a StringField, and a button
that can perform an action.
TODO: Get text ellision working in the path field, to start with "..."
"""
def __init__(self,
label: str,
path: str,
btn_callback: callable = None):
self.__attr_label = label
self.__pathfield: ui.StringField = None
self.__path = path
self.__btn = None
self.__callback = btn_callback
self.__frame = ui.Frame()
with self.__frame:
self._build_fn()
def destroy(self):
self.__pathfield = None
self.__btn = None
self.__callback = None
self.__frame = None
@property
def model(self) -> Optional[ui.AbstractItem]:
"""The widget's model"""
if self.__pathfield:
return self.__pathfield.model
@model.setter
def model(self, value: ui.AbstractItem):
"""The widget's model"""
self.__pathfield.model = value
def get_path(self):
return self.model.as_string
def _build_fn(self):
"""Draw all of the widget parts and set up callbacks."""
with ui.HStack():
ui.Label(
self.__attr_label,
name="attribute_name",
width=120,
)
self.__pathfield = ui.StringField(
name="path_field",
enabled = False,
)
ui.Spacer(width = 8)
# # TODO: Add clippingType=ELLIPSIS_LEFT for long paths
self.__pathfield.model.set_value(self.__path)
self.folder_img = ui.Image(
name="open_folder",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
width=12,
height=18,
)
self.folder_img.set_mouse_pressed_fn(lambda x, y, b, m: self.open_path(self.__path))
def open_path(self, path):
if platform.system() == "Darwin": # macOS
subprocess.call(("open", path))
elif platform.system() == "Windows": # Windows
os.startfile(path)
else: # linux variants
subprocess.call(("xdg-open", path))
| 31,182 | Python | 38.12547 | 166 | 0.540344 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/ui/custom_base_widget.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
__all__ = ["CustomBaseWidget"]
from typing import Optional
import omni.ui as ui
from .style import ATTR_LABEL_WIDTH
class CustomBaseWidget:
"""The base widget for custom widgets that follow the pattern of Head (Label),
Body Widgets, Tail Widget"""
def __init__(self, *args, model=None, **kwargs):
self.existing_model: Optional[ui.AbstractItemModel] = kwargs.pop("model", None)
self.revert_img = None
self.__attr_label: Optional[str] = kwargs.pop("label", "")
self.__frame = ui.Frame()
with self.__frame:
self._build_fn()
def destroy(self):
self.existing_model = None
self.revert_img = None
self.__attr_label = None
self.__frame = None
def __getattr__(self, attr):
"""Pretend it's self.__frame, so we have access to width/height and
callbacks.
"""
return getattr(self.__frame, attr)
def _build_head(self):
"""Build the left-most piece of the widget line (label in this case)"""
ui.Label(
self.__attr_label,
name="attribute_name",
width=120,
)
def _build_body(self):
"""Build the custom part of the widget. Most custom widgets will
override this method, as it is where the meat of the custom widget is.
"""
ui.Spacer()
def _build_tail(self):
"""Build the right-most piece of the widget line. In this case,
we have a Revert Arrow button at the end of each widget line.
"""
with ui.HStack(width=0):
ui.Spacer(width=5)
with ui.VStack(height=0):
ui.Spacer(height=3)
self.revert_img = ui.Image(
name="revert_arrow",
fill_policy=ui.FillPolicy.PRESERVE_ASPECT_FIT,
width=12,
height=13,
enabled=False,
)
ui.Spacer(width=5)
# call back for revert_img click, to restore the default value
self.revert_img.set_mouse_pressed_fn(
lambda x, y, b, m: self._restore_default())
def _build_fn(self):
"""Puts the 3 pieces together."""
with ui.HStack():
self._build_head()
self._build_body()
self._build_tail()
| 2,769 | Python | 32.373494 | 87 | 0.590105 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto_config.py | # automatically generation configs meta data for task generation
import json
import copy
g_meta_json_path = "./configs.json"
# initail and target value pair for continous task
g_init_target_value_pair = [
(0, 0.25), (0, 0.5), (0, 0.75), (0, 1),
(0.25, 0.5), (0.25, 0.75), (0.25, 1),
(0.5, 0.75), (0.5, 1),
(0.75, 1)
]
g_mission_template = {
"size": 0,
"orient": [0, 0, 0.7071068, 0.7071068],
"robot_offset": [-40, 0, 0],
"robot_orient": [0.7071068, -0.7071068,0, 0],
"task_type": "",
"task_id": "",
"robot_id": "",
"mission_id": "",
"goal":{
"description":"Open the door a little.",
"condition": {
"init_value": -1,
"type": "rotation",
"target": "",
"joint":"",
"target_value": 0
}
}
}
def add_continuous_meta_open_mission(task_type, meta_json_path = g_meta_json_path):
"""
add continous mission types for open task
"""
# load json
assert task_type in ["open_door", "open_drawer", "open_cabinet", "close_door", "pour_water",
"close_drawer", "close_cabinet", "transfer_water", "tap_water"]
meta_json = json.load(open(meta_json_path))
# if task_type not in meta_json:
# clean
meta_json[task_type] = []
task_missions = meta_json[task_type]
for init_value, target_value in g_init_target_value_pair:
mission = copy.deepcopy(g_mission_template)
goal = mission["goal"]
condition = goal["condition"]
if task_type == "open_door":
#mission["robot_offset"] = [-40, 0, 0]
mission["robot_offset"] = [50, 0, 0]
mission["robot_orient"] = [0,0,0.7071068,0.7071068]
goal["description"] = "Open the door"
condition["type"] = "rotation"
condition["init_value"] = init_value
condition["target_value"] = target_value
elif task_type == "close_door":
mission["robot_offset"] = [70, 0, 0]
mission["robot_orient"] = [0,0,0.7071068,0.7071068]
goal["description"] = "close the door"
condition["type"] = "rotation"
condition["init_value"] = target_value
condition["target_value"] = init_value
elif task_type == "pour_water":
# only pour half and empty
if not (init_value, target_value) in [(0.5, 1), (0, 1)]:
continue
mission["robot_offset"] = [-30, 0, 0]
goal["description"] = "Pour the liquid out of the contrainer."
condition["type"] = "liquid"
condition["init_value"] = target_value
condition["target_value"] = init_value
mission["size"] = 1.0
mission["orient"] = [1, 0, 0, 0]
elif task_type == "transfer_water":
# only pour half and empty
if not (init_value, target_value) in [(0, 0.25), (0, 0.5), (0, 0.75), (0, 1)]:
continue
mission["robot_offset"] = [-30, 0, 0]
goal["description"] = "Pour the liquid into another contrainer."
condition["type"] = "liquid"
# condition["init_value"] = target_value
condition["target_value"] = target_value
mission["size"] = 1.0
mission["orient"] = [1, 0, 0, 0]
elif task_type == "close_drawer":
condition["type"] = "linear"
mission["robot_offset"] = [-70, 0, 0]
goal["description"] = "close the drawer"
condition["init_value"] = target_value
condition["target_value"] = init_value
mission["size"] = 70
elif task_type == "open_drawer":
condition["type"] = "linear"
mission["robot_offset"] = [-50, 0, 0]
goal["description"] = "Open the drawer"
condition["init_value"] = init_value
condition["target_value"] = target_value
mission["size"] = 70
elif task_type == "open_cabinet":
condition["type"] = "rotation"
mission["robot_offset"] = [-50, 0, 0]
goal["description"] = "Open the cabinet"
condition["init_value"] = init_value
condition["target_value"] = target_value
mission["size"] = 70
elif task_type == "close_cabinet":
condition["type"] = "rotation"
mission["robot_offset"] = [-870, 0, 0]
goal["description"] = "Close the cabinet"
condition["init_value"] = target_value
condition["target_value"] = init_value
mission["size"] = 70
elif task_type == "tap_water":
# only pour half and empty
if not (init_value, target_value) in [(0, 0.25), (0, 0.5), (0, 0.75), (0, 1)]:
continue
mission["robot_offset"] = [-30, 0, 0]
goal["description"] = "Get tap water."
condition["type"] = "liquid"
condition["init_value"] = init_value
condition["target_value"] = target_value
mission["size"] = 20
mission["orient"] = [0.7071068,-0.7071068,0,0]
task_missions.append(mission)
print("task_missions", task_missions)
with open(meta_json_path, "w") as f:
json.dump(meta_json, f, indent = 4)
if __name__ == "__main__":
print("genrating continous mission")
add_continuous_meta_open_mission("open_door")
| 5,589 | Python | 34.605095 | 97 | 0.510646 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto.py | # auto task generating
import os
import json
import numpy as np
import asyncio
import omni
import pxr
import carb
from omni.physx.scripts import physicsUtils
from ..param import IS_IN_ISAAC_SIM, DATA_PATH_NEW, CUSTOM_ASSET_PATH, ROBOT_PATH, SAPIEN_ASSET_PATH, IS_IN_CREAT, \
GAME_OBJ_NAMES, CONTAINER_NAMES, OTHER_OBJ_NAMES, HOUSE_INFO_PATH
from ..task_check import BaseChecker #, JointChecker, GraspChecker, OrientChecker, ContainerChecker
from .meta import AUTOTASK_META
# if IS_IN_CREAT:
# import omni.kit.viewport_widgets_manager as wm
# from ..ui.hud import LabelWidget
class AutoTasker():
TASK_DESCRIPTION = ""
TASK_ID = ""
def __init__(self,
task_type:str,
task_id:int,
robot_id:int = 0,
mission_id:int = 0,
house_id:int = 0,
anchor_id:int = 0,
meta_id : int = 0, # to retrieve which config from meta data
annotator : int = 0,
) -> None:
self.task_type = task_type
self.task_id = str(task_id)
self.robot_id = str(robot_id)
self.mission_id = str(mission_id)
self.house_id = str(house_id)
self.anchor_id = str(anchor_id)
self.meta_id = mission_id # meta_id
self.data_path = DATA_PATH_NEW
# scene
self.stage = omni.usd.get_context().get_stage()
##
self.annotator = annotator
# get objects
self.probe_obj_folder()
def probe_obj_folder(self):
"""
check task folder
"""
task_type_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type)
if not os.path.exists(task_type_folder):
os.makedirs(task_type_folder)
task_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type, str(self.task_id))
if not os.path.exists(task_folder):
os.makedirs(task_folder)
"""
Get furniture
"""
if self.task_type in ["open_drawer", "open_cabinet", "close_drawer", "close_cabinet"]:
self.obj_type = "StorageFurniture"
self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type)
elif self.task_type in ["pickup_object", "reorient_object"]:
self.obj_type = "Bottle"
self.obj_folder = os.path.join(CUSTOM_ASSET_PATH, self.obj_type)
elif self.task_type in ["put_object_into_box", "take_object_out_box"]:
self.obj_type = "Box"
self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type)
elif self.task_type in ["open_door", "close_door"]:
self.obj_type = "Door"
self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type)
elif self.task_type in ["pour_water", "transfer_water"]:
self.obj_type = "Cup"
self.obj_folder = os.path.join(CUSTOM_ASSET_PATH, self.obj_type)
elif self.task_type in ["tap_water"]:
self.obj_type = "Faucet"
self.obj_folder = os.path.join(SAPIEN_ASSET_PATH, self.obj_type)
else:
raise Exception(f"current task type not supported: {self.task_type}")
objs = [ item for item in os.listdir(self.obj_folder) if item.isnumeric() ]
self.obj_list = sorted( objs, key=lambda x: int(x))
self.obj_id = self.obj_list[int(self.task_id)]
self.target_obj_path = "/mobility_" + self.obj_type + "_" + str(self.obj_id)
def reconfig(self, obj_index):
"""
Reconfig obj from object index
"""
self.obj_index = obj_index
self.obj_id = self.obj_list[int(obj_index)]
self.target_obj_path = "/mobility_" + self.obj_type + "_" + str(self.obj_id)
print("AUTOTASK_META[self.task_type][self.meta_id]", AUTOTASK_META[self.task_type][self.meta_id])
AutoTasker.TASK_DESCRIPTION = AUTOTASK_META[self.task_type][self.meta_id]["goal"]["description"]
print("AutoTasker.TASK_DESCRIPTION", AutoTasker.TASK_DESCRIPTION)
def add_obj(self):
"""
Add object to the scene
"""
self.stage = omni.usd.get_context().get_stage()
# set up game root
default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString
## this is necessary because for standalone this might not be /World
if not default_prim_path_str:
default_prim_path_str = "/World"
self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True)
# move obj to the correct place
xform_game = self.stage.GetPrimAtPath(self.xform_game_path)
if not xform_game:
xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.xform_game_path)
# set game xform
game_xform = pxr.Gf.Matrix4d().SetScale([1, 1, 1]) * \
pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate([0,0,0])
omni.kit.commands.execute(
"TransformPrimCommand",
path=self.xform_game_path,
new_transform_matrix=game_xform,
)
# set obj prim path
mobility_prim_path = xform_game.GetPath().pathString + self.target_obj_path
print("mobility_prim_path", mobility_prim_path)
prim = self.stage.GetPrimAtPath(mobility_prim_path)
if not prim.IsValid():
prim = self.stage.DefinePrim(mobility_prim_path)
if self.task_type in ["pour_water", "transfer_water"]:
obj_usd_path = os.path.join(self.obj_folder, self.obj_id, "cup.usd")
else:
obj_usd_path = os.path.join(self.obj_folder, self.obj_id, "mobility.usd")
# import obj
success_bool = prim.GetReferences().AddReference(obj_usd_path)
if not success_bool:
raise Exception(f"Cannot import obj usd at path {obj_usd_path}")
# set up scale
if self.task_type in ["open_door", "close_door"]:
from .utils import calculate_door_size
scale = calculate_door_size(prim)
else:
scale = [AUTOTASK_META[self.task_type][self.meta_id]["size"]]*3
if prim.HasAttribute("xformOp:scale"):
prim.GetAttribute("xformOp:scale").Set(pxr.Gf.Vec3f(scale))
else:
obj_xform = pxr.Gf.Matrix4d().SetScale(scale)
omni.kit.commands.execute(
"TransformPrimCommand",
path=prim.GetPath().pathString,
new_transform_matrix=obj_xform,
)
# set up orient
#if self.task_type "reorient_object":
orient = AUTOTASK_META[self.task_type][self.meta_id]["orient"]
print("orient: ", orient)
mat = pxr.Gf.Matrix4f(pxr.UsdGeom.Xformable(prim).ComputeLocalToWorldTransform(0))
obj_xform = pxr.Gf.Matrix4f().SetScale(scale) * pxr.Gf.Matrix4f().SetRotate(pxr.Gf.Quatf(*orient))
new_xform = obj_xform
# new_xform = obj_xform * mat
print("new_xform", prim, obj_xform, mat, "rot", new_xform.ExtractRotationQuat(), "scale:", scale)
omni.kit.commands.execute(
"TransformPrimCommand",
path=prim.GetPath().pathString,
new_transform_matrix=new_xform,
)
# other imports
if self.task_type in ["put_object_into_box", "transfer_water", "tap_water"]:
self.add_auxilary_object()
# unbind material
if self.task_type in ["transfer_water", "pour_water"]:
print("unbind material")
omni.kit.commands.execute(
'BindMaterial',
prim_path=prim.GetPath().pathString + "/cupShape",
material_path=None,
strength=pxr.UsdShade.Tokens.strongerThanDescendants
)
def add_auxilary_object(self):
"""
Add object to the scene
"""
self.stage = omni.usd.get_context().get_stage()
# set up game root
default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString
## this is necessary because for standalone this might not be /World
if not default_prim_path_str:
default_prim_path_str = "/World"
self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True)
# move obj to the correct place
xform_game = self.stage.GetPrimAtPath(self.xform_game_path)
if not xform_game:
raise Exception(f"must have /World/game prim")
if self.task_type == "put_object_into_box":
aux_folder = os.path.join(CUSTOM_ASSET_PATH, "standalone")
aux_folder_objs = os.listdir(aux_folder)
aux_obj_name = aux_folder_objs[self.obj_index + 12]
aux_prim_path = xform_game.GetPath().pathString + "/mobility_standalone_" + aux_obj_name
obj_usd_path = os.path.join(aux_folder, aux_obj_name, "mobility.usd")
position = [-20,0,0]
else:
aux_folder = os.path.join(CUSTOM_ASSET_PATH, "Cup")
aux_folder_objs = sorted(os.listdir(aux_folder), key=lambda x:int(x))
aux_obj_name = str(int(self.task_id))
aux_prim_path = xform_game.GetPath().pathString + "/container_Cup_" + aux_obj_name
obj_usd_path = os.path.join(aux_folder, aux_obj_name, "cup.usd")
position = [0,0,-20]
# print("aux_prim_path", aux_prim_path)
prim = self.stage.GetPrimAtPath(aux_prim_path)
if not prim.IsValid():
prim = self.stage.DefinePrim(aux_prim_path)
success_bool = prim.GetReferences().AddReference(obj_usd_path)
if not success_bool:
raise Exception(f"Cannot import obj usd at path {obj_usd_path}")
# offset
if True:
purposes = [pxr.UsdGeom.Tokens.default_]
bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes)
game_prim = self.stage.GetPrimAtPath(self.xform_game_path)
bboxes = bboxcache.ComputeWorldBound(game_prim)
# print("bboxes", bboxes)
game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()]
else:
game_bboxes = omni.usd.get_context().compute_path_world_bounding_box(self.xform_game_path)
position[1] += game_bboxes[0][1] # the same y
position[0] += game_bboxes[0][0] # offset x
position[2] += game_bboxes[0][2] # offset x
# set up scale
obj_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]).SetRotate(pxr.Gf.Quatf(1,0,0,0)).SetTranslate(position)
omni.kit.commands.execute(
"TransformPrimCommand",
path=prim.GetPath().pathString,
new_transform_matrix=obj_xform,
)
# unbind material
if self.task_type in ["transfer_water", "pour_water"]:
print("unbind material")
omni.kit.commands.execute(
'BindMaterial',
prim_path=prim.GetPath().pathString + "/cupShape",
material_path=None,
strength=pxr.UsdShade.Tokens.strongerThanDescendants
)
def add_robot(self):
"""
Add robot to the scene:
1. load robot
2. calculate position
"""
self.stage = omni.usd.get_context().get_stage()
franka_path = os.path.join(ROBOT_PATH, "franka/franka.usd")
self.xform_game_path = "/World/game"
# position, rotation
position = [i for i in AUTOTASK_META[self.task_type][self.meta_id]["robot_offset"]]
rotation = [i for i in AUTOTASK_META[self.task_type][self.meta_id]["robot_orient"]]
# offset
if True: ##IS_IN_ISAAC_SIM:
purposes = [pxr.UsdGeom.Tokens.default_]
bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes)
prim = self.stage.GetPrimAtPath(self.xform_game_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
game_bboxes = [bboxes.ComputeAlignedRange().GetMin(),bboxes.ComputeAlignedRange().GetMax()]
else:
game_bboxes = omni.usd.get_context().compute_path_world_bounding_box(self.xform_game_path)
print("game_bboxes", game_bboxes)
position[1] += game_bboxes[0][1]
# print("game_bboxes", game_bboxes, position)
if position[0] != 0 :
position[0] += game_bboxes[0][0]
if position[2] != 0 :
position[2] += game_bboxes[0][2]
# load robot
robot_prim = self.stage.GetPrimAtPath(self.xform_game_path + "/franka")
if not robot_prim.IsValid():
robot_prim = self.stage.DefinePrim(self.xform_game_path + "/franka")
print("add robot at path: ", franka_path)
success_bool = robot_prim.GetReferences().AddReference(franka_path)
if not success_bool:
raise Exception("The usd file at path {} provided wasn't found".format(franka_path))
# set robot xform
robot_xform = pxr.UsdGeom.Xformable.Get(self.stage, robot_prim.GetPath())
robot_xform.ClearXformOpOrder()
# print("position $ rotation: ", position[0], position[1], position[2], rotation)
robot_xform.AddTranslateOp().Set(pxr.Gf.Vec3f(float(position[0]), float(position[1]), float(position[2])))
robot_xform.AddOrientOp().Set(pxr.Gf.Quatf(float(rotation[0]), float(rotation[1]), float(rotation[2]), float(rotation[3])))
robot_xform.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0))
#selection = omni.usd.get_context().get_selection()
#selection.clear_selected_prim_paths()
#selection.set_prim_path_selected(robot_parent_path + "/franka", True, True, True, True)
def add_house(self):
"""
Add house from house_d
"""
print("auto add house??")
# scene
self.stage = omni.usd.get_context().get_stage()
self.layer = self.stage.GetRootLayer()
house_path = os.path.join(HOUSE_INFO_PATH, self.house_id, "layout.usd")
# omni.kit.commands.execute(
# "CreateSublayer",
# layer_identifier=self.layer.identifier,
# sublayer_position=0,
# new_layer_path=house_path,
# transfer_root_content=False,
# create_or_insert=False,
# layer_name="house",
# )
# move obj to the correct place
house_prim_path = "/World/layout"
house_prim = self.stage.GetPrimAtPath(house_prim_path)
if not house_prim.IsValid():
house_prim = self.stage.DefinePrim(house_prim_path)
success_bool = house_prim.GetReferences().AddReference(house_path)
if not success_bool:
raise Exception(f"The house is not load at {house_path}")
if not self.task_type in ["tap_water", "transfer_water", "pour_water"]:
from omni.physx.scripts.utils import setStaticCollider
# static collider
furniture_prim = self.stage.GetPrimAtPath(house_prim_path + "/furniture")
setStaticCollider(furniture_prim, approximationShape="none")
# TODO: check room_struct collider
room_struct_prim = self.stage.GetPrimAtPath(house_prim_path + "/roomStruct")
setStaticCollider(room_struct_prim, approximationShape="none")
# put game onto ground
game_prim_path = "/World/game"
game_prim = self.stage.GetPrimAtPath(game_prim_path)
if game_prim:
if True: #IS_IN_ISAAC_SIM:
purposes = [pxr.UsdGeom.Tokens.default_]
bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes)
bboxes = bboxcache.ComputeWorldBound(game_prim)
# print("bboxes", bboxes)
y = bboxes.ComputeAlignedRange().GetMin()[1]
else:
# prim_path = stage.GetDefaultPrim().GetPath().pathString
usd_context = omni.usd.get_context()
bboxes = usd_context.compute_path_world_bounding_box(game_prim_path)
y = bboxes[0][1]
game_xform = pxr.Gf.Matrix4d().SetScale([1, 1, 1]) * \
pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate([0,-y,0])
omni.kit.commands.execute(
"TransformPrimCommand",
path=game_prim_path,
new_transform_matrix=game_xform,
)
# add ground
ground_prim = self.stage.GetPrimAtPath("/World/groundPlane")
if not ground_prim:
physicsUtils.add_ground_plane(self.stage, "/World/groundPlane", "Y", 1000.0,
pxr.Gf.Vec3f(0.0, 0.0, 0), pxr.Gf.Vec3f(0.2))
ground_prim = self.stage.GetPrimAtPath("/World/groundPlane")
# prim_list = list(self.stage.TraverseAll())
# prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
# for prim in prim_list:
ground_prim.GetAttribute('visibility').Set('invisible')
def add_task(self):
"""
Add task to current scene
"""
self.stage = omni.usd.get_context().get_stage()
self.task_checker = BaseChecker(self.task_type, self.task_id, self.robot_id, self.mission_id, annotator = "Yizhou", run_time = False)
# if self.task_type in ["open_drawer", "open_cabinet", "open_door", "close_door"]:
# self.task_checker = JointChecker(self.task_type, self.task_id, self.robot_id, self.mission_id)
# elif self.task_type == "pickup_object":
# self.task_checker = GraspChecker(self.task_type, self.task_id, self.robot_id, self.mission_id)
# elif self.task_type == "reorient_object":
# self.task_checker = OrientChecker(self.task_type, self.task_id, self.robot_id, self.mission_id)
# elif self.task_type in ["put_object_into_box"]:
# self.task_checker = ContainerChecker(self.task_type, self.task_id, self.robot_id, self.mission_id)
# else:
# raise Exception(f"Current task type {self.task_type} not supported")
# modify task from template
# print(AUTOTASK_META[self.task_type][self.meta_index]["task_template"])
self.task_checker.current_mission = AUTOTASK_META[self.task_type][self.meta_id]
condition = self.task_checker.current_mission["goal"]["condition"]
# get target
target_prim = None
for prim in self.stage.GetPrimAtPath("/World/game").GetChildren():
for game_name in GAME_OBJ_NAMES:
if game_name in prim.GetPath().pathString:
target_prim = prim
break
condition["target"] = target_prim.GetPath().pathString.split("/")[-1]
# other condition
if self.task_type in ["open_drawer", "open_cabinet", "open_door", "close_door", "close_drawer", "close_cabinet"]:
selection = omni.usd.get_context().get_selection()
assert len(selection.get_selected_prim_paths()) == 1, "Please select one joint!"
joint_path = selection.get_selected_prim_paths()[0]
joint_name = joint_path.split("/")[-1]
# print("joint_name:", joint_name)
self.task_checker.current_mission["goal"]
condition["joint"] = joint_name
elif self.task_type in ["put_object_into_box", "transfer_water", "take_object_out_box", "tap_water"]:
container_prim = None
for prim in self.stage.GetPrimAtPath("/World/game").GetChildren():
for game_name in CONTAINER_NAMES:
if game_name in prim.GetPath().pathString.lower():
container_prim = prim
break
if not container_prim:
raise Exception(f"Container prim must exist at under /World/game")
condition["container"] = container_prim.GetPath().pathString.split("/")[-1]
# save mission
self.task_checker.current_mission["goal"]["description"] = AutoTasker.TASK_DESCRIPTION
print("current_mission", self.task_checker.current_mission)
self.task_checker.current_mission["goal"]["condition"] = condition
self.task_checker.save_mission()
@classmethod
def new_scene(cls):
async def open_new_scene():
await omni.usd.get_context().new_stage_async()
await omni.kit.app.get_app().next_update_async()
asyncio.ensure_future(open_new_scene())
# def build_HUD(self):
# if IS_IN_CREAT or IS_IN_ISAAC_SIM:
# gui_path = self.stage.GetDefaultPrim().GetPath().pathString + "/GUI"
# gui = self.stage.GetPrimAtPath(gui_path)
# if not gui:
# gui = pxr.UsdGeom.Xform.Define(self.stage, gui_path)
# gui_location = pxr.Gf.Vec3f(0, 50, 0)
# gui.AddTranslateOp().Set(gui_location)
# self.wiget_id = wm.add_widget(gui_path, LabelWidget(f"Object id: {self.obj_id}"), wm.WidgetAlignment.TOP)
| 21,575 | Python | 41.98008 | 141 | 0.584056 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/utils.py | # utility function
import re
import omni
import pxr
from ..param import IS_IN_CREAT
def calculate_door_size(prim, scale = 1):
"""
calculate door size to scale it to the proper size for 3DFront
"""
target_box_size = [10, 73.157, 209] # 3D-FRONT door frame size
if False: #IS_IN_CREAT:
usd_context = omni.usd.get_context()
prim_bboxes = usd_context.compute_path_world_bounding_box(prim.GetPath().pathString)
# In create
else:
purposes = [pxr.UsdGeom.Tokens.default_]
bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
prim_bboxes = [bboxes.ComputeAlignedRange().GetMin(), bboxes.ComputeAlignedRange().GetMax()]
print("prim_bboxes", prim_bboxes)
s_x = target_box_size[0] / (prim_bboxes[1][0] - prim_bboxes[0][0]) * scale
s_y = target_box_size[1] / (prim_bboxes[1][1] - prim_bboxes[0][1]) * scale
s_z = target_box_size[2] / (prim_bboxes[1][2] - prim_bboxes[0][2]) * scale
# if prim_bboxes[1][1] - prim_bboxes[0][1] < prim_bboxes[1][2] - prim_bboxes[0][2]:
# s_y, s_z = s_z, s_y
print("[1, s_y, s_z]", s_x, s_y, s_z)
return [1, s_y, s_z]
| 1,263 | Python | 37.303029 | 100 | 0.610451 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/meta.py | import json
from pathlib import Path
import os
auto_folder = str(Path(__file__).parent.resolve()).replace("\\", "/")
# print("auto_folder", auto_folder)
AUTOTASK_META = json.load(open(os.path.join(auto_folder,"configs.json"))) | 231 | Python | 22.199998 | 74 | 0.692641 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto_suggest.py | # task labeling suggestion
from logging import root
from omni import ui
import os
import json
import carb
from ..param import DATA_PATH_NEW, TASK_TYPES, ANNOTATORS
def generate_suggestion_text_from_list(id_list):
if len(id_list) == 0:
return "no suggestion"
return ",".join([str(_) for _ in id_list])
class AutoSuggest():
def __init__(self) -> None:
pass
def read_ui(self):
self.task_type_index = self.suggest_task_type_ui.model.get_item_value_model().get_value_as_int()
self.task_type = TASK_TYPES[self.task_type_index - 1]
self.task_id = self.suggest_task_id_ui.model.get_value_as_int()
self.robot_id = self.suggest_robot_id_ui.model.get_value_as_int()
self.mission_id = self.suggest_mission_id_ui.model.get_value_as_int()
self.house_id = self.suggest_house_id_ui.model.get_value_as_int()
self.anchor_id = self.suggest_anchor_id_ui.model.get_value_as_int()
self.annotator_index = self.annotator_ui.model.get_item_value_model().get_value_as_int()
self.annotator = ANNOTATORS[self.annotator_index]
def reset_ui(self):
self.suggest_task_type_ui.model.get_item_value_model().set_value(0)
self.suggest_task_id_ui.model.set_value(-1)
self.suggest_robot_id_ui.model.set_value(-1)
self.suggest_mission_id_ui.model.set_value(-1)
self.suggest_house_id_ui.model.set_value(-1)
self.suggest_anchor_id_ui.model.set_value(-1)
self.suggest_task_id_text_ui.model.set_value("")
self.suggest_robot_id_text_ui.model.set_value("")
self.suggest_mission_id_text_ui.model.set_value("")
self.suggest_anchor_id_text_ui.model.set_value("")
self.suggest_house_id_text_ui.model.set_value("")
self.info_ui.model.set_value("")
def suggest_trial_num(self):
from ..param import SAVE_ROOT
root_dir = '-'.join([self.task_type, str(self.task_id), str(self.robot_id), str(self.mission_id), str(self.house_id), \
str(self.anchor_id) ])
folders = os.listdir(SAVE_ROOT)
folders = [folder for folder in folders if folder.startswith(root_dir)]
return len(folders)
def suggest_task(self):
self.read_ui()
task_ids = os.listdir(os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type))
task_ids.sort(key=lambda x: int(x))
self.suggest_task_id_text_ui.model.set_value(generate_suggestion_text_from_list(task_ids))
def suggest_robot(self):
self.read_ui()
robot_file = os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type, str(self.task_id), "robots.json")
if os.path.exists(robot_file):
robot_ids = list(json.load(open(robot_file)).keys())
else:
carb.log_warn(f"No robots found for task {self.task_type}: {self.task_id}")
robot_ids = []
# print(robot_ids)
self.suggest_robot_id_text_ui.model.set_value(generate_suggestion_text_from_list(robot_ids))
def suggest_anchor_id(self):
self.read_ui()
house_folder = os.path.join(DATA_PATH_NEW, self.annotator, "house")
house_folders = os.listdir(house_folder)
keys = []
# folder: 0, 1, 2 etc...
display = []
for folder in house_folders:
path = str(os.path.join(house_folder, folder, "anchor.json" ))
if os.path.exists(path):
with open(path) as f:
data = json.load(f)
keys.extend(list(data.keys()))
for name in keys:
tmp = name.split()
assert (len(tmp) == 4)
task_type = tmp[0]
task_id = tmp[1]
robot_id = tmp[2]
anchor_id = tmp[3]
if task_type == self.task_type and str(task_id) == str(self.task_id) and str(robot_id) == str(self.robot_id):
display.append(anchor_id)
self.suggest_anchor_id_text_ui.model.set_value(generate_suggestion_text_from_list(display))
def suggest_houseID(self):
self.read_ui()
house_folder = os.path.join(DATA_PATH_NEW, self.annotator, "house")
house_folders = os.listdir(house_folder)
keys = []
# folder: 0, 1, 2 etc...
display = []
for folder in house_folders:
path = str(os.path.join(house_folder, folder, "anchor.json" ))
if os.path.exists(path):
with open(path) as f:
data = json.load(f)
keys.extend(list(data.keys()))
for name in keys:
tmp = name.split()
assert (len(tmp) == 4)
task_type = tmp[0]
task_id = tmp[1]
robot_id = tmp[2]
anchor_id = tmp[3]
if task_type == self.task_type and str(task_id) == str(self.task_id) and str(robot_id) == str(self.robot_id):
display.append(folder)
self.suggest_house_id_text_ui.model.set_value(generate_suggestion_text_from_list(display))
def suggest_mission(self):
self.read_ui()
mission_file = os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type, str(self.task_id), "missions.json")
mission_ids = []
if os.path.exists(mission_file):
mission_info = json.load(open(mission_file))
# identifier_prefix = self.task_type + " " + str(self.task_id) + " " + str(self.robot_id)
identifier_prefix = self.task_type + " " + str(self.task_id) #+ " " + str(self.robot_id)
for key in mission_info:
if key.startswith(identifier_prefix):
mission_ids.append(key.split()[-1])
else:
carb.log_warn(f"No mission found for task {self.task_type}: {self.task_id}")
self.suggest_mission_id_text_ui.model.set_value(generate_suggestion_text_from_list(mission_ids))
def suggest_goal(self):
self.read_ui()
task_folder = os.path.join(DATA_PATH_NEW, self.annotator, "task", self.task_type, str(self.task_id))
if not os.path.exists(task_folder):
carb.log_warn(f"Task folder not exist at {task_folder}")
self.info_ui.model.set_value("Please add mission.")
mission_file_path = os.path.join(task_folder, "missions.json")
if os.path.exists(mission_file_path):
missions = json.load(open(mission_file_path))
carb.log_info(f"Loading missions.json at path {mission_file_path}")
mission_identifier_prefix = self.task_type + " " + str(self.task_id) + " "
mission_identifier_suffix = str(self.mission_id)
for key, value in missions.items():
if key.startswith(mission_identifier_prefix) and key.endswith(mission_identifier_suffix):
current_task = missions[key]
self.info_ui.model.set_value(json.dumps(current_task["goal"], indent = 2))
else:
self.info_ui.model.set_value("Please add mission.")
| 7,252 | Python | 41.415204 | 129 | 0.576117 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/autotask/auto_label.py | import omni
import numpy as np
try:
import pandas as pd
except:
omni.kit.pipapi.install("pandas")
import pandas as pd
GOODLE_SHEET_INFO = {
"close_cabinet": "187VN5J70tEH6ByemAs60FRA2uxE5UmtMr2rBZ0DCOAs",
"close_door": "1Lm-nqYdeUfjGZc2WyqJCG5JcI1z5zDhfeoxZiUX7VKE",
"close_drawer": "1OMmuQNKcvbc-CQm67CQbQSmiQGRMVXtNYYXgTsNg9NE",
"open_cabinet": "1SWXaK5v701wMklIMu4MTgh8Wes5WS9bd_YTrH9-DPdw",
"open_drawer": "1DHYxbRRs0i11rEmDKJ7XK4H0UTTct2QpPTpIPkHnImU",
"pickup_object": "1mq7qCTsJWKnr1-MWA7kzOehZM6fw-o8iHpqKAS6PM44",
"pour_water": "1mS1HUljpu2tZCfiHNvHc2FfrsvGFzwyXRm6pqj3uzZU",
"reorient_object": "1VyoSXjUxp5ef2RPGRxovIv3SA5rr-gm66sjABegqcwM",
"transfer_water": "1fKLFHfF3LsYIWlheqQwGHIf6Bpn05BnT-AQheANyO6o",
"tap_water": "1kgXT6baclDuvyCe4ijJgrR1xTDbkZggxP7d5gQpWR8w",
"open_door": "1fKp1vzDMeoR0lPspqtVZTaHdNhCyXdJ6SN2EnIjQ6CA",
}
# for key in GOODLE_SHEET_INFO:
# sheet_id = GOODLE_SHEET_INFO[key]
# test = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv")
# print(test.head())
class AutoLabeler():
def __init__(self, task_type) -> None:
# load task
self.task_type = task_type
self.cache = {}
# for task_type_cache in GOODLE_SHEET_INFO.keys():
# cache_id = GOODLE_SHEET_INFO[task_type_cache]
# try:
# self.cache[task_type_cache] = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{cache_id}/export?format=csv")
# except:
# print("service not available: ", task_type_cache)
# load data
if self.task_type:
sheet_id = GOODLE_SHEET_INFO[self.task_type]
self.data = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{sheet_id}/export?format=csv")
self.cache[task_type] = self.data
# load id
self.current_id = -1
def set_task_type(self, task_type):
if task_type not in self.cache:
cache_id = GOODLE_SHEET_INFO[task_type]
try:
self.cache[task_type] = pd.read_csv(f"https://docs.google.com/spreadsheets/d/{cache_id}/export?format=csv")
except:
print("service not available: ", task_type)
self.data = self.cache[task_type]
def set_id(self, id):
"""
set current id
"""
self.current_id = id
def find_row_num(self, task_id, robot_id, mission_id, house_id, trial_id):
cond = np.where( (self.data['task_id'] == int(task_id)) & (self.data['robot_id'] == int(robot_id)) &
(self.data['mission_id'] == int(mission_id)) & (self.data['house_id'] == int(house_id)) & (self.data['trial_id'] == int(trial_id))
)
try:
return int(cond[0])+2
except:
return -1
def load_row(self):
"""
Load task information from row_id
"""
assert self.current_id >= 0
if self.current_id >= len(self.data):
raise Exception(f"Note: current labeling is done {self.task_type}: {self.current_id} / {len(self.data)}")
id = self.current_id
task_id = self.data["task_id"][id]
robot_id = self.data["robot_id"][id]
mission_id = self.data["mission_id"][id]
house_id = self.data["house_id"][id]
trial_id = self.data["trial_id"][id]
return int(task_id), int(robot_id), int(mission_id), int(house_id), int(trial_id)
def next(self):
"""
find next id
"""
if self.current_id >= 0:
self.current_id += 1
else:
"""
find current labeling index
"""
for i in range(len(self.data)):
if pd.isnull(self.data['progress'][i]):
self.current_id = i
return
| 3,942 | Python | 33.893805 | 144 | 0.576865 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/house.py | import os
import json
import omni
import pxr
import carb
# phyxc
from omni.physx.scripts.utils import setCollider, setRigidBody, setStaticCollider, set_physics_scene_asyncsimrender
from ..param import SAPIEN_ASSET_PATH, HOUSE_INFO_PATH, DATA_PATH_ROOT, RIGIDBODY_OBJ_TYPES, GAME_OBJ_NAMES
from .utils import rename_prim, rotationXYZ_to_quaternion
# from omni.isaac.core.utils.stage import (
# get_current_stage,
# )
from pxr import UsdGeom, UsdLux, Gf, Vt, UsdPhysics, PhysxSchema, Usd, UsdShade, Sdf
class House():
def __init__(self,
data_path:str = DATA_PATH_ROOT,
sapien_asset_path:str = SAPIEN_ASSET_PATH,
house_info_path:str = HOUSE_INFO_PATH):
self.data_path = data_path
self.sapien_asset_path = sapien_asset_path
self.house_info_path = house_info_path
self.layout = {
"id":0,
"params":{
# "SCENE_ASSET_PATH":self.data_path,
"SAPIEN_ASSET_PATH":self.sapien_asset_path,
"HOUSE_INFO_PATH":self.house_info_path,
},
"asset":{
"room_name":"",
"sapien":[],
},
"layout_offsets":[]
}
def set_id(self, example_id):
"""
Set up example id
"""
self.example_id = example_id
self.layout["id"] = example_id
def set_task(self, task_type, task_id = None):
"""
Set up task type
"""
self.layout["task"] = task_type
def get_furniture_info(self):
"""
Get furniture information especially for collision from current scene
"""
self.stage = omni.usd.get_context().get_stage()
# furniture parent
furni_parent = self.stage.GetPrimAtPath("/World/layout/furniture")
additional_collisions = []
for prim in furni_parent.GetChildren():
if prim.HasAPI(pxr.UsdPhysics.RigidBodyAPI) or prim.HasAPI(pxr.UsdPhysics.CollisionAPI):
# prim.GetAttribute("physics:rigidBodyEnabled").Set(False)
print("collision prim name", prim.GetPath(), prim.GetAttribute("physics:rigidBodyEnabled").Get())
# robot_prim.GetAttribute("xformOp:orient").Get()
additional_collisions.append(prim.GetPath().pathString)
self.layout["asset"]["furniture_collisions"] = additional_collisions
def get_robot_info(self, robot_prim_path = "/World/game/franka"):
"""
Get robot information at robot_prim_path
"""
self.stage = omni.usd.get_context().get_stage()
robot_prim = self.stage.GetPrimAtPath(robot_prim_path)
if not robot_prim or not pxr.UsdGeom.Xform.Get(self.stage, robot_prim_path):
raise Exception(f"Must have a robot with XForm at path {robot_prim_path}")
quad = robot_prim.GetAttribute("xformOp:orient").Get()
if not quad:
rotateXYZ = robot_prim.GetAttribute("xformOp:rotateXYZ").Get()
quad = rotationXYZ_to_quaternion(rotateXYZ)
translate = robot_prim.GetAttribute("xformOp:translate").Get()
scale = robot_prim.GetAttribute("xformOp:scale").Get()
quad = eval(str(quad))
# print(quad)
robot_info = {
"position": [round(translate[0], 3), round(translate[1],3), round(translate[2], 3)],
"rotation": [round(quad[0], 3), round(quad[1], 3), round(quad[2], 3), round(quad[3], 3)],
}
return robot_info
def add_asset_info(self):
"""
Add other asset infomation
"""
# move to randomizer
pass
def get_asset_info(self, append = False):
"""
Get mobility, and furniture information from current scene
:param::
append: append room information if True else delete json
"""
self.stage = omni.usd.get_context().get_stage()
room_layout_json = os.path.join(self.data_path, "house", str(self.example_id) + ".json")
# if layout json already exists, record game/parent offset as obj randomization
if os.path.exists(room_layout_json):
carb.log_warn(f"room info already exists at {room_layout_json}")
# append other information into json
if append:
self.layout = json.load(open(room_layout_json))
self.add_asset_info()
return
else:
# delete json and start another
os.remove(room_layout_json)
# Get room name
room_path = self.stage.GetRootLayer().realPath
# print("room_path: ", room_path)
if room_path:
relative_path = omni.client.make_relative_url(self.house_info_path, room_path)
print("room_name: ", relative_path)
self.layout["asset"]["room_name"] = relative_path
else:
self.layer = self.stage.GetRootLayer()
# print("layer: ", )
for ref in self.layer.GetExternalReferences():
if "layout" in str(ref):
#PathUtils.compute_relative_path(self.house_info_path,str(ref))
relative_path = omni.client.make_relative_url(self.house_info_path, str(ref))
relative_path.replace("\\\\", "/")
self.layout["asset"]["room_name"] = relative_path
break
# Get sapien asset name
prims = [self.stage.GetDefaultPrim()]
game_prim = self.stage.GetPrimAtPath("/World/game")
if game_prim:
prims.append(game_prim)
for game_prim in prims:
for prim in game_prim.GetChildren():
# if prim is game obj, record information
is_game_obj = False
for game_name in GAME_OBJ_NAMES:
if game_name in prim.GetPath().pathString:
is_game_obj = True
break
if is_game_obj:
reference, _ = omni.usd.get_composed_references_from_prim(prim)[0]
print("mobility reference: ", reference.assetPath)
# get obj type from paths
path_splits = reference.assetPath.split("/")
if 'sapien_parsed' in path_splits:
# sapien objs
obj_type = reference.assetPath.split("/")[-3]
obj_id = int(reference.assetPath.split("/")[-2])
assetPath = None
elif 'omniverse:' in path_splits:
# obj from omniverse cloud
assetPath = reference.assetPath
obj_type = path_splits[-2]
obj_id = 0
else:
# custom objs
assetPath = "/".join(path_splits[-3:])
obj_type = path_splits[-3]
obj_id = path_splits[-2]
obj_info = {
"asset_path": assetPath,
"obj_type": obj_type,
"obj_id": obj_id,
}
# for attr in prim.GetAttributes():
# print(attr)
if prim.HasAttribute("xformOp:orient"):
quad = prim.GetAttribute("xformOp:orient").Get()
else:
rotateXYZ = prim.GetAttribute("xformOp:rotateXYZ").Get()
quad = rotationXYZ_to_quaternion(rotateXYZ)
translate = prim.GetAttribute("xformOp:translate").Get()
scale = prim.GetAttribute("xformOp:scale").Get()
quad = eval(str(quad))
# print("quad", quad)
obj_info["xformOp:translate"] = [translate[0], translate[1], translate[2]]
obj_info["xformOp:orient"] = [quad[0], quad[1], quad[2], quad[3]]
obj_info["xformOp:scale"] = [scale[0],scale[1],scale[2]]
self.layout["asset"]["sapien"].append(obj_info)
# print("get mobility info ???")
# get robot information if don't have
# if "robot" not in self.layout:
# if self.stage.GetPrimAtPath("/World/game/franka"):
# # if has robot
# self.get_robot_info()
# get additional furniture collision information if don't have
# if "furniture_collisions" not in self.layout["asset"]:
# self.get_furniture_info()
print("get mobility info", self.layout)
def save_asset_info(self):
"""
Save asset at data_path
"""
print("saveing file at " + str(self.layout["id"]) + ".json")
with open(os.path.join(self.data_path, "house", str(self.layout["id"]) + ".json"), "w") as output_file:
json.dump(self.layout, output_file, sort_keys=True, indent=4)
def _setup_physics_material(self, path):
"""
Set up physic material for prim at Path
"""
# def _setup_physics_material(self, path: Sdf.Path):
from pxr import UsdGeom, UsdLux, Gf, Vt, UsdPhysics, PhysxSchema, Usd, UsdShade, Sdf
from omni.physx.scripts import physicsUtils
stage = omni.usd.get_context().get_stage()
_material_static_friction = 1.0
_material_dynamic_friction = 1.0
_material_restitution = 0.0
_physicsMaterialPath = None
if _physicsMaterialPath is None:
_physicsMaterialPath = stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial")
UsdShade.Material.Define(stage, _physicsMaterialPath)
material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(_physicsMaterialPath))
material.CreateStaticFrictionAttr().Set(_material_static_friction)
material.CreateDynamicFrictionAttr().Set(_material_dynamic_friction)
material.CreateRestitutionAttr().Set(_material_restitution)
collisionAPI = UsdPhysics.CollisionAPI.Get(stage, path)
prim = stage.GetPrimAtPath(path)
if not collisionAPI:
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# apply material
# physicsUtils.add_physics_material_to_prim(stage, prim, _physicsMaterialPath)
def load_asset_info(self, house_id, object_id = None):
"""
load asset from data path
"""
room_layout_json = os.path.join(self.data_path, "house", str(house_id) + ".json")
print("hosue id", str(house_id), "data path: wtf", room_layout_json)
if not os.path.exists(room_layout_json):
raise Exception( "The json file at path {} provided wasn't found".format(room_layout_json) )
# load json
self.layout = json.load(open(room_layout_json))
# get currect stage and layer
self.stage = omni.usd.get_context().get_stage()
self.layer = self.stage.GetRootLayer()
# load house info
house_path = os.path.join(self.house_info_path, self.layout["asset"]["room_name"].replace("\\","/"))
# print('self.layout["asset"]["room_name"]',self.layout["asset"]["room_name"])
print("house_path: ", house_path)
omni.kit.commands.execute(
"CreateSublayer",
layer_identifier=self.layer.identifier,
sublayer_position=0,
new_layer_path=house_path,
transfer_root_content=False,
create_or_insert=False,
layer_name="",
)
# set up furniture root
default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString
## this is necessary because for standalone this might not be /World
if not default_prim_path_str:
default_prim_path_str = "/World"
self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True)
if not self.stage.GetPrimAtPath(self.xform_game_path):
xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.xform_game_path)
xform_game.AddTranslateOp().Set(pxr.Gf.Vec3f(0.0, 0.0, 0.0))
xform_game.AddOrientOp().Set(pxr.Gf.Quatf(1.0, 0.0, 0.0, 0.0))
xform_game.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0))
# # Everything has to have collision
# furni_parent = self.stage.GetPrimAtPath("/World/furniture")
# for prim in furni_parent.GetChildren():
# setCollider(prim, "convexDecomposition")
# floor_prim = self.stage.GetPrimAtPath("/World/floors")
# setCollider(floor_prim, "convexDecomposition")
# add collision infomation
if "furniture_collisions" in self.layout["asset"]:
for furni_path in self.layout["asset"]["furniture_collisions"]:
prim = self.stage.GetPrimAtPath(furni_path)
setCollider(prim, "convexDecomposition")
print("try to set collider: ", furni_path)
setRigidBody(prim, "convexDecomposition", False)
physicsAPI = UsdPhysics.RigidBodyAPI.Apply(prim)
physicsAPI.CreateRigidBodyEnabledAttr(False)
# physicsAPI.CreateDisableGravityAttr(True)
print("set rigid body: ", furni_path)
# load furniture info
for obj in self.layout["asset"]["sapien"]:
# filter object only necessary for currect task
if object_id != None:
if obj['obj_id'] != object_id:
continue
# get asset path
if "asset_path" in obj and obj["asset_path"] is not None:
if "omniverse:" in obj["asset_path"]:
# cloud obj
obj_usd_path = obj["asset_path"]
else:
# custom object
obj_usd_path = os.path.join(self.sapien_asset_path, "../custom", obj["asset_path"])
else:
# sapien object
obj_usd_path = os.path.join(self.sapien_asset_path, obj["obj_type"], str(obj["obj_id"]), "mobility.usd")
print("obj_usd_path", obj_usd_path)
# load data
mobility_prim_path = xform_game.GetPath().pathString + "/mobility"
prim = self.stage.GetPrimAtPath(mobility_prim_path)
if not prim.IsValid():
prim = self.stage.DefinePrim(mobility_prim_path)
success_bool = prim.GetReferences().AddReference(obj_usd_path)
if not success_bool:
raise Exception("The usd file at path {} provided wasn't found".format(obj_usd_path))
# set xform
# obj_xform = pxr.UsdGeom.Xformable.Get(self.stage, prim.GetPath())
# translate_component = obj_xform.GetOrderedXformOps()[0]
# orient_component = obj_xform.GetOrderedXformOps()[1]
# scale_component = obj_xform.GetOrderedXformOps()[2]
translate = obj["xformOp:translate"]
# translate_component.Set(tuple(translate))
orient = eval(obj["xformOp:orient"]) if isinstance(obj["xformOp:orient"], str) else obj["xformOp:orient"]
rotation = pxr.Gf.Quatd(orient[0], orient[1], orient[2], orient[3])
# orient_component.Set(rotation)
scale = obj["xformOp:scale"]
# scale_component.Set(tuple(scale))
xform = pxr.Gf.Matrix4d().SetScale(scale) * pxr.Gf.Matrix4d().SetRotate(rotation) * pxr.Gf.Matrix4d().SetTranslate(translate)
omni.kit.commands.execute(
"TransformPrimCommand",
path=prim.GetPath(),
new_transform_matrix=xform,
)
## or
# xform_geom.AddTranslateOp().Set(position)
# xform_geom.AddOrientOp().Set(orientation)
# xform_geom.AddScaleOp().Set(scale)
# set collision & rigidbody
should_add_rigidbody = False
for collision_type in RIGIDBODY_OBJ_TYPES:
if collision_type in obj["obj_type"]:
should_add_rigidbody = True
break
if should_add_rigidbody:
setRigidBody(prim, "convexDecomposition", False)
# set up physcial materials
# self._setup_physics_material(prim.GetPath())
# rename path
# TODO: set up name rules
old_prim_name = prim.GetPath().pathString
new_prim_path = prim.GetPath().GetParentPath().AppendChild("mobility_" + obj["obj_type"] + "_" + str(obj["obj_id"]))
new_prim_name = omni.usd.get_stage_next_free_path(self.stage, new_prim_path.pathString, False)
carb.log_info("rename:" + old_prim_name + ";" + new_prim_name)
rename_prim(old_prim_name, new_prim_name)
default_prim_path_str = self.stage.GetDefaultPrim().GetPath().pathString
## this is necessary because for standalone this might not be /World
if not default_prim_path_str:
default_prim_path_str = "/World"
#set up physics scene
# from omni.physx.scripts import utils
_gravityMagnitude = 100.0 # IN CM/s2 - use a lower gravity to avoid fluid compression at 60 FPS
_gravityDirection = Gf.Vec3f(0.0, -1.0, 0.0)
_solver = "TGS"
_gpuMaxNumPartitions = 4
physicsScenePath = os.path.join(default_prim_path_str, "physicsScene")
scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath)
scene.CreateGravityDirectionAttr().Set(_gravityDirection)
scene.CreateGravityMagnitudeAttr().Set(_gravityMagnitude)
set_physics_scene_asyncsimrender(scene.GetPrim())
physxAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim())
physxAPI.CreateSolverTypeAttr(_solver)
physxAPI.CreateGpuMaxNumPartitionsAttr(_gpuMaxNumPartitions)
def add_distraction_objects(self):
pass
| 18,324 | Python | 40.647727 | 137 | 0.560194 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/utils.py | # utility functions
import omni
import pxr
from pxr import Gf, Semantics
import carb
import json
import numpy as np
def add_semantics(prim, semantic_label):
if not prim.HasAPI(Semantics.SemanticsAPI):
sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(semantic_label)
def rename_prim(old_prim_name, new_prim_name):
# old_prim_name = prim.GetPath().pathString
# new_prim_name = prim.GetPath().GetParentPath()
# new_prim_name = new_prim_name.AppendChild("Door1")
# new_prim_name = omni.usd.get_stage_next_free_path(self.stage, new_prim_name.pathString, False)
# print("new_prim_name: ", new_prim_name)
move_dict = {old_prim_name: new_prim_name}
if pxr.Sdf.Path.IsValidPathString(new_prim_name):
move_dict = {old_prim_name: new_prim_name}
omni.kit.commands.execute("MovePrims", paths_to_move=move_dict, on_move_fn=None)
else:
carb.log_error(f"Cannot rename {old_prim_name} to {new_prim_name} as its not a valid USD path")
def freeze_prim(prim, scale = [1, 1, 1]):
"""
Perform free transform command to current x_form_prim
"""
stage = omni.usd.get_context().get_stage()
omni.kit.undo.begin_group()
prim_name = prim.GetPath().pathString
temp_name = prim_name + "_temp"
rename_prim(prim_name, temp_name)
temp_prim = stage.GetPrimAtPath(temp_name)
# transform to the correct scale
prim_xform = Gf.Matrix4d().SetScale(scale)
omni.kit.commands.execute(
"TransformPrimCommand",
path=temp_name,
new_transform_matrix=prim_xform,
)
# create an unit xform
omni.kit.commands.execute(
"CreatePrim",
prim_path=prim_name,
prim_type="Xform",
select_new_prim=False,
)
move_dict = {}
for prim in temp_prim.GetChildren():
old_prim_name = prim.GetPath().pathString
new_prim_name = old_prim_name.replace("_temp", "")
move_dict[old_prim_name] = new_prim_name
omni.kit.commands.execute("MovePrims", paths_to_move=move_dict, keep_world_transform = True, on_move_fn=None)
# print(0/0)
omni.kit.commands.execute("DeletePrims", paths=[temp_prim.GetPath()])
# return new root prim
return stage.GetPrimAtPath(prim_name)
def rotationXYZ_to_quaternion(rotationXYZ):
translate = Gf.Vec3d(0, 0, 0)
euler = rotationXYZ
scale = Gf.Vec3d(1, 1, 1)
rotation = (
Gf.Rotation(Gf.Vec3d.ZAxis(), euler[2])
* Gf.Rotation(Gf.Vec3d.YAxis(), euler[1])
* Gf.Rotation(Gf.Vec3d.XAxis(), euler[0])
)
xform = Gf.Matrix4d().SetScale(scale) * Gf.Matrix4d().SetRotate(rotation) * Gf.Matrix4d().SetTranslate(translate)
return xform.ExtractRotationQuat()
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
if isinstance(obj, np.floating):
# 👇️ alternatively use str()
return float(obj)
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj) | 3,272 | Python | 31.73 | 117 | 0.640587 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/randomizer.py | import omni
import pxr
from pxr import Gf
import carb
import os
import random
import json
from omni.kit.material.library import get_material_prim_path, create_mdl_material
from ..param import IS_IN_ISAAC_SIM, SAPIEN_ASSET_PATH, HOUSE_INFO_PATH, DATA_PATH_ROOT
class Randomizer():
def __init__(self, task_json_path=None, random_seed = 1) -> None:
# self.house = house
# self.layout = self.house.layout if house is not None else {}
self.task_json_path = task_json_path
self.random_seed = random_seed
# randomize index
self.light_rnd = -1 # light randomized index
self.location_rnd = -1 # game loc randomized index
self.material_rnd = -1 # material randomized index
if task_json_path:
if not os.path.exists(self.task_json_path):
raise Exception( "The json file at path {} provided wasn't found".format(self.task_json_path))
self.task_json = json.load(open(self.task_json_path))
else:
self.task_json = {}
# init randomization
if "random" not in self.task_json:
self.random_info = {
"lights":[],
"materials":{},
"locations":[{
"translate":[0,0,0],
"orient":[1,0,0,0],
"scale":[1.0,1.0,1.0]
}],
}
self.task_json["random"] = self.random_info
else:
self.random_info = self.task_json["random"]
# material
self.material_dict = {}
# @staticmethod
def get_water_material(self):
from pxr import Tf, Sdf, Usd, UsdShade
# self.setup_material_helper()
# print()
water_url = 'http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/Base/Natural/Water.mdl'
water_mtl_name = water_url.split("/")[-1][:-4]
# print("material dict: ", self.material_dict)
water_material_prim_path = get_material_prim_path(water_mtl_name)
# omni.kit.commands.execute(
# "CreatePrim", prim_path=water_material_prim_path, prim_type="Scope", select_new_prim=False
# )
def on_create(path):
pass
return create_mdl_material(omni.usd.get_context().get_stage(), water_url, water_mtl_name, on_create)
# stage = omni.usd.get_context().get_stage()
# if stage.HasDefaultPrim():
# mtl_path = omni.usd.get_stage_next_free_path(
# stage, "{}/Looks/{}".format(stage.GetDefaultPrim().GetPath(), Tf.MakeValidIdentifier(water_mtl_name)), False
# )
# else:
# mtl_path = omni.usd.get_stage_next_free_path(
# stage, "/Looks/{}".format(Tf.MakeValidIdentifier(water_mtl_name)), False
# )
# omni.kit.commands.execute("CreateMdlMaterialPrim", mtl_url=water_url, mtl_name=water_mtl_name,
# mtl_path=water_material_prim_path, select_new_prim=False)
# return water_material_prim_path
# omni.kit.commands.execute(
# "CreateMdlMaterialPrim",
# mtl_url=water_url,
# mtl_name=water_mtl_name,
# mtl_path=water_material_prim_path,
# select_new_prim=False,
# )
# omni.kit.commands.execute(
# 'BindMaterial',
# prim_path=prim.GetPath(),
# material_path = water_material_prim_path,
# strength=pxr.UsdShade.Tokens.strongerThanDescendants
# )
return water_material_prim_path
def set_seed(self, seed):
self.random_seed = seed
def randomize_light(self):
"""
Randomize light intensity
"""
self.random_info["lights"] = [0, 200, 400, 600, 800, 1000] # light intensity indexes
self.light_rnd = random.choice([_ for _ in range(len(self.random_info["lights"]))])
self.stage = omni.usd.get_context().get_stage()
self.default_prim = self.stage.GetDefaultPrim()
# print("?", self.default_prim.GetPath().pathString + "/defaultLight")
light_prim = self.stage.GetPrimAtPath(self.default_prim.GetPath().pathString + "/defaultLight")
assert light_prim.GetTypeName() == "DistantLight"
light_prim.GetAttribute("intensity").Set(self.random_info["lights"][self.light_rnd])
def randomize_game_location(self):
"""
Randomize light intensity
"""
assert len(self.random_info["locations"]) > 0
self.location_rnd = (self.location_rnd + 1) % len(self.random_info["locations"])
self.stage = omni.usd.get_context().get_stage()
self.default_prim = self.stage.GetDefaultPrim()
game_prim = self.stage.GetPrimAtPath(self.default_prim.GetPath().pathString + "/game")
game_layout = self.random_info["locations"][self.location_rnd]
assert "translate" in game_layout and "orient" in game_layout
translate = game_layout["translate"]
orient = game_layout["orient"]
rotation = Gf.Quatd(orient[0], orient[1], orient[2], orient[3])
# TODO: check whether scale can be randomized
scale = (1.0, 1.0, 1.0)
print("location")
xform = Gf.Matrix4d().SetScale(scale) * Gf.Matrix4d().SetRotate(rotation) * Gf.Matrix4d().SetTranslate(translate)
omni.kit.commands.execute(
"TransformPrimCommand",
path=game_prim.GetPath(),
new_transform_matrix=xform,
)
def setup_material_helper(self):
"""
set up material randomizer
"""
self.stage = omni.usd.get_context().get_stage()
# check if has material
if len(self.material_dict) > 0:
return
carb.log_info("loading necleu materials")
# load from saved params
try:
# load the materials from nucleus url link
mat_root_path = "http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/"
carb.log_info(f"Collecting files for {mat_root_path}")
result1, entries = omni.client.list(mat_root_path)
from .material.param import NECLEUS_MATERIALS
self.material_dict = NECLEUS_MATERIALS
except:
# load the materials from nucleus url link
mat_root_path = "http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/"
carb.log_info(f"Collecting files for {mat_root_path}")
result1, entries = omni.client.list(mat_root_path)
if result1 != omni.client.Result.OK:
raise Exception(f"nucleus connect error at path: {mat_root_path}")
for e in entries:
print("result: ", e.relative_path)
material_type_folder = mat_root_path + e.relative_path + "/"
result2, mat_type_entries = omni.client.list(material_type_folder)
for mat_type_e in mat_type_entries:
if mat_type_e.relative_path not in self.material_dict:
self.material_dict[mat_type_e.relative_path] = []
material_folder = material_type_folder + mat_type_e.relative_path + "/"
result3, mat_entries = omni.client.list(material_folder)
for mat_e in mat_entries:
if mat_e.relative_path.endswith(".mdl"):
mat_path = material_folder + mat_e.relative_path
self.material_dict[mat_type_e.relative_path].append(mat_path)
# filter_out_empty
temp_dict = {}
for key in self.material_dict:
if len(self.material_dict[key]) > 0:
temp_dict[key] = self.material_dict[key]
self.material_dict = temp_dict
# mtl_created_list = []
# omni.kit.commands.execute(
# "CreateAndBindMdlMaterialFromLibrary",
# mdl_name='http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/Base/Architecture/Ceiling_Tiles.mdl',
# mtl_name='Ceiling_Tiles',
# mtl_created_list=mtl_created_list,
# bind_selected_prims=True,
# select_new_prim=False,
# )
def randomize_house(self, rand = True, randomize_floor =True, randomize_wall = True):
"""
randomize house's floor and wall
by default, we only randomize floor
"""
self.setup_material_helper()
floor_parent = self.stage.GetPrimAtPath("/World/layout/floors")
wall_parent = self.stage.GetPrimAtPath("/World/layout/structure") # roomStruct
self.random_info["floor_materials"] = [x for k in ["Wood"] for x in self.material_dict[k]] # Carpet
self.random_info["wall_materials"] = [x for k in ["Wall_Board"] for x in self.material_dict[k]] # "Masonry", "Architecture"
# print(self.random_info["floor_materials"])
# len_floor = len(self.random_info["floor_materials"])
# len_wall = len(self.random_info["wall_materials"])
wall_mtl_url = random.choice(self.random_info["wall_materials"]) if rand else self.random_info["wall_materials"][0]
floor_mtl_url = random.choice(self.random_info["floor_materials"]) if rand else self.random_info["floor_materials"][0]
wall_mtl_name = wall_mtl_url.split("/")[-1][:-4]
floor_mtl_name = floor_mtl_url.split("/")[-1][:-4]
# change mtl
new_looks_path1, wall_material_prim_path = get_material_prim_path(wall_mtl_name)
if new_looks_path1 and randomize_wall:
omni.kit.commands.execute(
"CreatePrim", prim_path=new_looks_path1, prim_type="Scope", select_new_prim=False
)
new_looks_path2, floor_material_prim_path = get_material_prim_path(floor_mtl_name)
if new_looks_path2 and randomize_floor:
omni.kit.commands.execute(
"CreatePrim", prim_path=new_looks_path2, prim_type="Scope", select_new_prim=False
)
for prim in floor_parent.GetChildren():
if prim is None:
raise Exception("no house in scene!")
carb.log_info("changing material at path: " + prim.GetPath().pathString)
if floor_material_prim_path:
omni.kit.commands.execute(
"CreateMdlMaterialPrim",
mtl_url=floor_mtl_url,
mtl_name=floor_mtl_name,
mtl_path=floor_material_prim_path,
select_new_prim=False,
)
omni.kit.commands.execute(
'BindMaterial',
prim_path=prim.GetPath(),
material_path=floor_material_prim_path,
strength=pxr.UsdShade.Tokens.strongerThanDescendants
)
for prim in wall_parent.GetChildren():
if prim is None:
raise Exception("no house in scene!")
carb.log_info("changing material at path: " + prim.GetPath().pathString)
if wall_material_prim_path:
omni.kit.commands.execute(
"CreateMdlMaterialPrim",
mtl_url=wall_mtl_url,
mtl_name=wall_mtl_name,
mtl_path=wall_material_prim_path,
select_new_prim=False,
)
omni.kit.commands.execute(
'BindMaterial',
prim_path=prim.GetPath(),
material_path=wall_material_prim_path,
strength=pxr.UsdShade.Tokens.strongerThanDescendants
)
def randomize_material(self):
"""
randomize material for mobility
"""
self.setup_material_helper()
# print("house material_dict: ", self.material_dict)
# print(os.getcwd())
# if selected, update selection materials
prim_paths = omni.usd.get_context().get_selection().get_selected_prim_paths()
if prim_paths and len(prim_paths) > 0:
pass
else:
# find target object
target_obj_id = str(self.task_json["object_id"])
obj_prim = None
self.stage = omni.usd.get_context().get_stage()
game_parent = self.stage.GetPrimAtPath("/World/game")
for prim in game_parent.GetChildren():
# if no materials
if target_obj_id in prim.GetPath().pathString:
obj_prim = prim
break
# print("obj_path_string", obj_prim.GetPath().pathString)
if len(self.random_info["materials"]) == 0:
material_list = [x for v in self.material_dict.values() for x in v]
mat_urls = random.sample(material_list, 10) # random sample ten materials 80% train 20% test
self.random_info["materials"] = {"train":mat_urls[:8], "test":mat_urls[8:]}
# self.save_asset_info()
# if has materials, load train material type
self.material_rnd = (1 + self.material_rnd) % len(self.random_info["materials"]["train"])
mtl_url = self.random_info["materials"]["train"][self.material_rnd] #random.choice(self.random_info["materials"]["train"])
mtl_name = mtl_url.split("/")[-1][:-4]
if obj_prim is None:
raise Exception(f"must load mobility first (object id){target_obj_id}")
carb.log_info("changing material at path: " + obj_prim.GetPath().pathString)
# change mtl
new_looks_path, material_prim_path = get_material_prim_path(mtl_name)
if new_looks_path:
omni.kit.commands.execute(
"CreatePrim", prim_path=new_looks_path, prim_type="Scope", select_new_prim=False
)
if material_prim_path:
omni.kit.commands.execute(
"CreateMdlMaterialPrim",
mtl_url=mtl_url,
mtl_name=mtl_name,
mtl_path=material_prim_path,
select_new_prim=False,
)
omni.kit.commands.execute(
'BindMaterial',
prim_path=obj_prim.GetPath(),
material_path=material_prim_path,
strength=pxr.UsdShade.Tokens.strongerThanDescendants
)
# mat_type = random.choice(list(self.material_dict.keys()))
# mtl_url = random.choice(self.material_dict[mat_type])
# mtl_name = mtl_url.split("/")[-1][:-4]
# # mtl_url = "http://localhost:8080/omniverse://127.0.0.1/NVIDIA/Materials/Base/Architecture/Ceiling_Tiles.mdl"
# # mtl_name = "Ceiling_Tiles"
# new_looks_path, material_prim_path = get_material_prim_path(mtl_name)
# if new_looks_path:
# omni.kit.commands.execute(
# "CreatePrim", prim_path=new_looks_path, prim_type="Scope", select_new_prim=False
# )
# if material_prim_path:
# omni.kit.commands.execute(
# "CreateMdlMaterialPrim",
# mtl_url=mtl_url,
# mtl_name=mtl_name,
# mtl_path=material_prim_path,
# select_new_prim=False,
# )
# for prim_path in prim_paths:
# omni.kit.commands.execute(
# 'BindMaterial',
# prim_path=prim_path,
# material_path=material_prim_path,
# strength=pxr.UsdShade.Tokens.strongerThanDescendants
# )
def record_game_offset(self):
# record game xform position and rotation
self.stage = omni.usd.get_context().get_stage()
game_prim = self.stage.GetPrimAtPath("/World/game") #pxr.UsdGeom.Xform.Get(self.stage, "/World/game")
if game_prim:
quad = game_prim.GetAttribute("xformOp:orient").Get()
translate = game_prim.GetAttribute("xformOp:translate").Get()
# print("game_prim", game_prim, eval(str(quad)))
quad = eval(str(quad))
layout_offset = {
"translate": [translate[0], translate[1], translate[2]],
"orient": [quad[0], quad[1], quad[2], quad[3]],
"scale": [1.0, 1.0, 1.0],
}
# check if currect layout offset is already recorded
layout_offset_already_recorded = False
#if "layout_offsets" in self.random_info["locations"]:
for offset in self.random_info["locations"]:
#if offset == layout_offset:
print("offset", offset)
if offset["translate"] == layout_offset["translate"] and \
offset["orient"] == layout_offset["orient"] and \
offset["scale"] == layout_offset["scale"]:
layout_offset_already_recorded = True
break
# if not in record, add offset record
if not layout_offset_already_recorded:
self.random_info["locations"].append(layout_offset)
print("New game offset recorded at: ", layout_offset)
def record_randomization(self):
with open(self.task_json_path, "w") as f:
json.dump(self.task_json, f, indent=4)
def randomize_sky(self, sky_type:str = None, url= "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Skies/Dynamic/"):
"""
Add sky to the environment
"""
# return
# FIXME: not compatible with new version
self.stage = omni.usd.get_context().get_stage()
ENVIRONMENT_ROOT = "/Environment"
sky_prim_path = f"{ENVIRONMENT_ROOT}/sky"
# disable light
# light_prim_path = "/World/defaultLight"
# light_prim = self.stage.GetPrimAtPath(light_prim_path)
# if light_prim:
# light_prim.GetAttribute('visibility').Set('invisible')
if sky_type:
sky_name = f"{sky_type}Sky" if not sky_type == "Overcast" else "Overcast"
else:
sky_list = ["ClearSky","CloudySky","Overcast","NightSky"]
sky_name = random.choice(sky_list)
sky_url = f"{url}{sky_name}.usd"
# if found existing env, return
sky_prim = self.stage.GetPrimAtPath(sky_prim_path)
if sky_prim:
carb.log_warn("Sky already in the env")
sky_prim.GetReferences().ClearReferences()
else:
sky_prim = self.stage.DefinePrim(sky_prim_path, "Xform")
if len(sky_type) == 0:
# invalid sky type:
return
sky_prim.GetReferences().AddReference(sky_url)
rot = pxr.Gf.Vec3d(0, 0, 0)
properties = sky_prim.GetPropertyNames()
if "xformOp:rotateXYZ" in properties:
rotation = sky_prim.GetAttribute("xformOp:rotateXYZ")
rotation.Set(rot)
elif "xformOp:rotateZYX" in properties:
rotation = sky_prim.GetAttribute("xformOp:rotateZYX")
rotation.Set(rot)
elif "xformOp:transform" in properties:
carb.log_info("Object missing rotation op. Adding it.")
xform = pxr.UsdGeom.Xformable(sky_prim)
xform_op = xform.AddXformOp(pxr.UsdGeom.XformOp.TypeRotateXYZ, pxr.UsdGeom.XformOp.PrecisionDouble, "")
rotate = Gf.Vec3d(rot[0], rot[1], rot[2])
xform_op.Set(rotate)
# if IS_IN_ISAAC_SIM:
# from omni.isaac.core.utils.stage import add_reference_to_stage
# add_reference_to_stage(sky_url ,sky_prim_path)
# else:
# omni.kit.commands.execute("CreateUsdSkyPrimCommand", sky_url=sky_url, sky_path=sky_prim_path)
# too light, lower intensity to pretect eyes
#
# domelight_prim = self.stage.GetPrimAtPath("/Environment/sky/DomeLight")
# domelight_prim.GetAttribute("intensity").Set(0)
| 20,304 | Python | 40.354379 | 142 | 0.556984 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/house_new.py | from cgitb import enable
import os
import json
from typing import Container
import numpy as np
import asyncio
import omni
import pxr
import carb
from omni.physx.scripts import physicsUtils
from omni.physx.scripts.utils import setCollider, setRigidBody, setStaticCollider
from omni.usd import get_world_transform_matrix, get_local_transform_matrix
from ..param import DATA_PATH_NEW, ASSET_PATH, HOUSE_INFO_PATH, IS_IN_ISAAC_SIM, RIGIDBODY_OBJ_TYPES, GAME_OBJ_NAMES, \
IS_IN_CREAT, CONTAINER_NAMES, OTHER_OBJ_NAMES
from .utils import rename_prim, rotationXYZ_to_quaternion, freeze_prim
from .modify import modify_game_obj_prim
# if IS_IN_CREAT or IS_IN_ISAAC_SIM:
# import omni.kit.viewport_widgets_manager as wm
# from ..ui.hud import LabelWidget
from .utils import NpEncoder
class House():
def __init__(self,
task_type:str,
task_id:int,
robot_id:int = 0,
mission_id:int = 0,
house_id:int = 0,
anchor_id:int=0,
annotator="",
):
self.task_type = task_type
self.task_id = str(task_id)
self.data_path = DATA_PATH_NEW
self.robot_id = str(robot_id)
self.anchor_id = str(anchor_id)
self.mission_id = str(mission_id)
self.house_id = str(house_id)
self.annotator = str(annotator)
# task saving dicts/lists
self.object_info = []
self.robot_info = {}
self.make_task_saving_folder()
# house saving dict
self.house_appearance = {}
self.house_task_anchor = {}
self.object_prims = []
def make_task_saving_folder(self):
"""
check task saving folder
"""
task_type_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type)
if not os.path.exists(task_type_folder):
os.makedirs(task_type_folder)
task_folder = os.path.join(self.data_path, self.annotator, "task", self.task_type, str(self.task_id))
if not os.path.exists(task_folder):
os.makedirs(task_folder)
def record_obj_info(self):
"""
record game object information and save
"""
# scene
self.stage = omni.usd.get_context().get_stage()
# Get sapien asset name
#prims = [self.stage.GetDefaultPrim()]
game_prim = self.stage.GetPrimAtPath("/World/game")
if not game_prim:
raise Exception("Please move object and robot under /World/game")
#print("prims", prims)
for prim in game_prim.GetChildren():
# print("prim ", prim.GetPath())
# if prim is game obj, record information
is_game_obj = False
for game_name in GAME_OBJ_NAMES + CONTAINER_NAMES + OTHER_OBJ_NAMES:
if game_name in prim.GetPath().pathString.lower():
is_game_obj = True
break
if is_game_obj:
reference, _ = omni.usd.get_composed_references_from_prim(prim)[0]
print("mobility reference: ", reference.assetPath)
relative_path = omni.client.make_relative_url(ASSET_PATH, reference.assetPath)
relative_path = relative_path.replace("\\\\","/").replace("\\","/")
# get obj type from paths
path_splits = relative_path.split("/")
# print("path_splits", path_splits)
# asset_path = "/".join(path_splits[2:])
obj_info = {
"asset_path": relative_path,
"obj_type": path_splits[-3],
"obj_id": path_splits[-2],
"materials":[],
}
mat = get_world_transform_matrix(prim)
if prim.HasAttribute("xformOp:orient"):
quad = prim.GetAttribute("xformOp:orient").Get()
else:
rotateXYZ = prim.GetAttribute("xformOp:rotateXYZ").Get()
quad = rotationXYZ_to_quaternion(rotateXYZ)
# quad = prim.GetAttribute("xformOp:orient").Get() # eval(str(mat.ExtractRotationQuat())) #eval(str(mat.ExtractRotation().GetQuat()))
quad = eval(str(quad))
translate = mat.ExtractTranslation()
scale = prim.GetAttribute("xformOp:scale").Get()
#print("translate", translate)
#print("quad", prim.GetPath(), quad)
obj_info["translate"] = [translate[0], translate[1], translate[2]]
obj_info["orient"] = [quad[0], quad[1], quad[2], quad[3]]
obj_info["scale"] = [scale[0],scale[1],scale[2]]
print("obj_info", obj_info)
# task_identity = obj_info["obj_type"] + obj_info["obj_id"]
self.object_info.append(obj_info)
# IMPORTANT: if the object is unbalanced scale, freeze object by
# To enter this condition is very strict: open/close door, wrong proportion of scale
# 1. Create a new xform
# 2. Move the object under the unit xform
# 3. Save the obj as another usd variance
game_obj_info = self.object_info[0]
game_obj_scale = game_obj_info["scale"]
if self.task_type in ["open_door", "close_door"]:
need_freeze = abs(game_obj_scale[0] / game_obj_scale[1]) > 1.2 or \
abs(game_obj_scale[0] / game_obj_scale[1]) < 0.8 or \
abs(game_obj_scale[1] / game_obj_scale[2]) > 1.2 or \
abs(game_obj_scale[1] / game_obj_scale[2]) < 0.8 or \
abs(game_obj_scale[0] / game_obj_scale[2]) > 1.2 or \
abs(game_obj_scale[0] / game_obj_scale[2]) < 0.8
if need_freeze:
carb.log_warn("Found non-unit scale object, freezing transfrom...")
original_usd_path = os.path.join(ASSET_PATH, game_obj_info["asset_path"])
var_usd_path = original_usd_path.replace("mobility",
f"mobility_{self.annotator}_{self.task_type}_{self.task_id}_{self.robot_id}_{self.mission_id}_{self.house_id}_{self.anchor_id}")
import shutil
shutil.copyfile(original_usd_path, var_usd_path)
omni.usd.get_context().close_stage()
omni.usd.get_context().open_stage(var_usd_path)
stage = omni.usd.get_context().get_stage()
default_prim = stage.GetDefaultPrim()
# default_prim.GetAttribute("xformOp:scale").Set(pxr.Gf.Vec3f(1, 2, 1))
new_prim = freeze_prim(default_prim, game_obj_scale)
pxr.UsdPhysics.ArticulationRootAPI.Apply(new_prim)
stage.SetDefaultPrim(new_prim)
omni.usd.get_context().save_stage()
# time.sleep(1.0)
# omni.usd.get_context().close_stage()
relative_path = omni.client.make_relative_url(ASSET_PATH, var_usd_path)
relative_path.replace("\\", "/")
game_obj_info["asset_path"] = relative_path
new_size = (game_obj_scale[0] * game_obj_scale[1] * game_obj_scale[2]) ** (1/3)
game_obj_info["scale"] = [1 / new_size , 1 / new_size , 1 / new_size]
# save obj info
if len(self.object_info) > 0:
if self.house_id != "-1" and self.anchor_id != "-1":
obj_identifier = f"{self.house_id} {self.anchor_id}"
task_obj_path = os.path.join(self.data_path, self.annotator,"task", self.task_type, self.task_id, "objects_with_rooms.json")
objects_with_rooms = {} if not os.path.exists(task_obj_path) else json.load(open(task_obj_path))
objects_with_rooms[obj_identifier] = self.object_info
with open(task_obj_path, "w") as f:
json.dump(objects_with_rooms, f, indent=4, cls=NpEncoder)
else:
task_obj_path = os.path.join(self.data_path, self.annotator,"task", self.task_type, self.task_id, "objects.json")
with open(task_obj_path, "w") as f:
json.dump(self.object_info, f, indent=4, cls=NpEncoder)
carb.log_info(f"current objects info saving at: {task_obj_path}")
def load_obj_info(self, relative = False):
"""
Load objects for the task
if relative: put obj at the original position
"""
# scene
self.stage = omni.usd.get_context().get_stage()
# set up game root
default_prim_path_str = "/World"
self.xform_game_path = default_prim_path_str + "/game" # omni.usd.get_stage_next_free_path(self.stage, "/World/game", True)
# check if in house
self.object_info = None
if self.house_id != "-1" and self.anchor_id != "-1":
obj_identifier = f"{self.house_id} {self.anchor_id}"
task_obj_path = os.path.join(self.data_path, self.annotator,"task", self.task_type, self.task_id, "objects_with_rooms.json")
objects_with_rooms = {} if not os.path.exists(task_obj_path) else json.load(open(task_obj_path))
if obj_identifier in objects_with_rooms:
self.object_info = objects_with_rooms[obj_identifier]
if self.object_info is None:
task_obj_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "objects.json")
if not os.path.exists(task_obj_path):
raise Exception( "The json file at path {} provided wasn't found".format(task_obj_path) )
# load object info
self.object_info = json.load(open(task_obj_path))
for obj_idx, obj in enumerate(self.object_info):
# load object usd
obj_usd_path = os.path.join(ASSET_PATH, obj["asset_path"])
translate = obj["translate"]
orient = obj["orient"]
rotation = pxr.Gf.Quatd(orient[0], orient[1], orient[2], orient[3])
scale = obj["scale"]
# move game xform to the first object
# set up parent
if obj_idx == 0:
xform_game = self.stage.GetPrimAtPath(self.xform_game_path)
if not xform_game:
xform_game = pxr.UsdGeom.Xform.Define(self.stage, self.xform_game_path)
self.game_translate = translate if not relative else [0,0,0]
game_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \
pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate(self.game_translate)
omni.kit.commands.execute(
"TransformPrimCommand",
path=self.xform_game_path,
new_transform_matrix=game_xform,
)
# xform_game.AddTranslateOp().Set(pxr.Gf.Vec3f(*translate))
# xform_game.AddOrientOp().Set()
# xform_game.AddScaleOp().Set(pxr.Gf.Vec3f(1.0, 1.0, 1.0))
# move obj to the correct place
mobility_prim_path = xform_game.GetPath().pathString + "/mobility"
prim = self.stage.GetPrimAtPath(mobility_prim_path)
if not prim.IsValid():
prim = self.stage.DefinePrim(mobility_prim_path)
success_bool = prim.GetReferences().AddReference(obj_usd_path)
# print("get prim children", prim.GetChildren())
if not success_bool:
raise Exception("The usd file at path {} provided wasn't found".format(obj_usd_path))
# relative translate
if obj_idx == 0: # main object
rel_translate = [0,0,0]
else:
rel_translate = [self.game_translate[i] + obj["translate"][i] for i in range(3)]
xform = pxr.Gf.Matrix4d().SetScale(scale) * pxr.Gf.Matrix4d().SetRotate(rotation) * pxr.Gf.Matrix4d().SetTranslate(rel_translate)
omni.kit.commands.execute(
"TransformPrimCommand",
path=prim.GetPath(),
new_transform_matrix=xform,
)
if obj["obj_type"].lower() in GAME_OBJ_NAMES or obj_idx == 0: # main object
obj_prefix = "mobility_"
elif obj["obj_type"].lower() in CONTAINER_NAMES:
obj_prefix = "container_"
else:
obj_prefix = "other_"
# if IS_IN_ISAAC_SIM:
# add_update_semantics(prim, obj["obj_type"])
# TODO: set up name rules
old_prim_name = prim.GetPath().pathString
new_prim_path = prim.GetPath().GetParentPath().AppendChild(obj_prefix + obj["obj_type"] + "_" + str(obj["obj_id"]))
new_prim_name = omni.usd.get_stage_next_free_path(self.stage, new_prim_path.pathString, False)
# carb.log_info("rename:" + old_prim_name + ";" + new_prim_name ";" + prim.GetPath().pathString)
rename_prim(old_prim_name, new_prim_name)
target_obj_prim = self.stage.GetPrimAtPath(new_prim_name)
modify_game_obj_prim(target_obj_prim)
print("modify prim name: ", new_prim_name)
self.object_prims.append(new_prim_name)
def record_robot_info(self, robot_prim_path = "/World/game/franka"):
"""
Record robots infomation, and save it RELATIVE position from the main game obj
:params:
robot_prim_path: default robot path
"""
self.stage = omni.usd.get_context().get_stage()
# Get sapien asset name
#prims = [self.stage.GetDefaultPrim()]
game_prim = self.stage.GetPrimAtPath("/World/game")
if not game_prim:
raise Exception("Please move object and robot under /World/game")
#for game_prim in prims:
for prim in game_prim.GetChildren():
# print("prim ", prim.GetPath())
# if prim is game obj, record information
is_game_obj = False
for game_name in GAME_OBJ_NAMES:
if game_name in prim.GetPath().pathString:
is_game_obj = True
break
if is_game_obj:
mat = omni.usd.utils.get_world_transform_matrix(prim)
game_translate = mat.ExtractTranslation()
break
if not game_translate:
raise Exception("Before recording robot, there must be a game object")
# then, find robot and calcuate relative postion
"""
Get robot information at robot_prim_path
"""
robot_prim = self.stage.GetPrimAtPath(robot_prim_path)
if not robot_prim or not pxr.UsdGeom.Xform.Get(self.stage, robot_prim_path):
raise Exception(f"Must have a robot with XForm at path {robot_prim_path}")
# get robot world transform
# if IS_IN_ISAAC_SIM:
# from omni.isaac.core.prims import XFormPrim
# pos, rot = XFormPrim(robot_prim_path).get_local_pose()
# translate = np.array(pos)
# quad = np.array(rot)
# else:
mat = get_local_transform_matrix(robot_prim)
translate = mat.ExtractTranslation()
quad = eval(str(mat.ExtractRotation().GetQuat()))
rob_info = {
"type":"franka",
"translate": [round(translate[0], 3), round(translate[1],3), round(translate[2], 3)],
"orient": [round(quad[0], 3), round(quad[1], 3), round(quad[2], 3), round(quad[3], 3)],
}
if self.house_id != "-1" and self.anchor_id != "-1":
task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots_with_rooms.json")
robot_identifier = f"{self.robot_id} {self.house_id} {self.anchor_id} {self.mission_id}"
objects_with_rooms = {} if not os.path.exists(task_robot_path) else json.load(open(task_robot_path))
objects_with_rooms[robot_identifier] = rob_info
with open(task_robot_path, "w") as f:
json.dump(objects_with_rooms, f, indent=4, cls=NpEncoder)
else:
task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots.json")
if os.path.exists(task_robot_path):
self.robot_info = json.load(open(task_robot_path))
robot_identifier = str(self.robot_id)
self.robot_info[robot_identifier] = rob_info
with open(task_robot_path, "w") as f:
json.dump(self.robot_info, f, indent=4, cls=NpEncoder)
carb.log_info(f"Saving robot json file at {task_robot_path}")
def load_robot_info(self):
"""
Load robot for currect task
"""
# if append house and anchor info
rot_info = None
if self.house_id != "-1" and self.anchor_id != "-1":
task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots_with_rooms.json")
robot_identifier = f"{self.robot_id} {self.house_id} {self.anchor_id}"
robot_identifier = f"{self.robot_id} {self.house_id} {self.anchor_id} {self.mission_id}"
objects_with_rooms = {} if not os.path.exists(task_robot_path) else json.load(open(task_robot_path))
if robot_identifier in objects_with_rooms:
rot_info = objects_with_rooms[robot_identifier]
if rot_info is None:
task_robot_path = os.path.join(self.data_path, self.annotator, "task", self.task_type, self.task_id, "robots.json")
if not os.path.exists(task_robot_path):
raise Exception( "The json file at path {} provided wasn't found".format(task_robot_path) )
# load json information
self.robot_info = json.load(open(task_robot_path))
# assert self.robot_id in self.robot_info, \
# f"Please record robot id variation first {self.task_type}, task_id {self.task_id}, robot_id {self.robot_id}"
if self.robot_id in self.robot_info:
rot_info = self.robot_info[self.robot_id]
else:
return None, None
return rot_info["translate"], rot_info["orient"]
def record_house_info(self):
"""
Record house information
::params:
anchor_id: postion of the game root
"""
# scene
self.stage = omni.usd.get_context().get_stage()
relative_path = None # house/layer asset relative path
# Get room name
room_path = self.stage.GetRootLayer().realPath
# print("room_path: ", room_path)
if room_path:
relative_path = omni.client.make_relative_url(HOUSE_INFO_PATH, room_path)
relative_path = relative_path.replace("\\\\", "/").replace("\\", "/")
# print("room_name: ", relative_path)
# self.layout["asset"]["room_name"] = relative_path
else:
self.layer = self.stage.GetRootLayer()
# print("layer: ", )
for ref in self.layer.GetExternalReferences():
if "layout" in str(ref):
#PathUtils.compute_relative_path(self.house_info_path,str(ref))
relative_path = omni.client.make_relative_url(HOUSE_INFO_PATH, str(ref))
relative_path = relative_path.replace("\\\\", "/").replace("\\", "/")
# print("relative_path", relative_path)
# self.layout["asset"]["room_name"] = relative_path
break
# make house saving folder
assert relative_path is not None
house_id = relative_path.split("/")[-2]
house_folder = os.path.join(self.data_path, self.annotator,"house", house_id)
if not os.path.exists(house_folder):
os.makedirs(house_folder)
# # make appearance
# appearance_json_path = os.path.join(house_folder, "appearance.json")
# if os.path.exists(appearance_json_path):
# self.house_appearance = json.load(open(appearance_json_path))
# self.house_appearance["asset_path"] = relative_path
# with open(appearance_json_path, "w") as f:
# json.dump(self.house_appearance, f, indent=4)
# carb.log_info(f"Saving hosue appearce json file at {appearance_json_path}")
# find game, task, anchor information
default_prim_path_str = "/World" #self.stage.GetDefaultPrim().GetPath().pathString
game_prim = self.stage.GetPrimAtPath(default_prim_path_str + "/game")
# if game information exists
if game_prim:
# load anchor
anchor_json_path = os.path.join(house_folder, "anchor.json")
if os.path.exists(anchor_json_path):
self.house_task_anchor = json.load(open(anchor_json_path))
# get game transform
mat = omni.usd.utils.get_world_transform_matrix(game_prim)
quad = eval(str(mat.ExtractRotation().GetQuat()))
translate = mat.ExtractTranslation()
translate = [i for i in translate]
anchor_info = {
"task_type": self.task_type,
"task_id": self.task_id,
"robot_id": self.robot_id,
"anchor_id": self.anchor_id,
"game_location": {
"translate": translate,
"orient":quad,
}
}
anchor_info["additional_collisions"] = [] # self.get_furniture_collisions()
# print("anchor_info", anchor_info)
anchor_identifier = self.task_type + " " + self.task_id + " " + self.robot_id + " " + self.anchor_id
self.house_task_anchor[anchor_identifier] = anchor_info
with open(anchor_json_path, "w") as f:
json.dump(self.house_task_anchor, f, indent=4, cls=NpEncoder)
carb.log_info(f"Saving anchor json file at {anchor_json_path}")
def load_house_info(self, enable_collision=True):
"""
load house infomation from house_id, and anchor_id
"""
print("loading house")
# scene
self.stage = omni.usd.get_context().get_stage()
# self.layer = self.stage.GetRootLayer()
house_path = os.path.join(HOUSE_INFO_PATH, self.house_id, "layout.usd")
# omni.kit.commands.execute(
# "CreateSublayer",
# layer_identifier=self.layer.identifier,
# sublayer_position=0,
# new_layer_path=house_path,
# transfer_root_content=False,
# create_or_insert=False,
# layer_name="house",
# )
# Check anchor exists, if not, then only the scene
house_folder = os.path.join(self.data_path, self.annotator, "house", self.house_id)
anchor_json_path = os.path.join(house_folder, "anchor.json")
if not os.path.exists(anchor_json_path):
carb.log_warn("No anchor file found, record anchor information first")
return False
# print("anchor_json_path: ", anchor_json_path)
try:
self.house_task_anchor = json.load(open(anchor_json_path))
except:
carb.log_error("anchro_json path not correct: " + str(anchor_json_path))
return False
anchor_identifier_prefix = self.task_type + " " + self.task_id # + " " + self.robot_id + " " + self.anchor_id
has_anchor = False
for key in self.house_task_anchor:
if key.startswith(anchor_identifier_prefix):
has_anchor = True
anchor_identifier = key
break
if not has_anchor:
carb.log_warn(f"No anchor id: {self.anchor_id}, please record anchor at {anchor_json_path}")
return False
# move obj to the correct place
house_prim_path = "/World/layout"
house_prim = self.stage.GetPrimAtPath(house_prim_path)
if not house_prim.IsValid():
house_prim = self.stage.DefinePrim(house_prim_path)
success_bool = house_prim.GetReferences().AddReference(house_path)
if not success_bool:
raise Exception(f"The house is not load at {house_path}")
# static collider
# print("set collisiton")
# furniture_prim = self.stage.GetPrimAtPath(house_prim_path + "/furniture/furniture_87879")
# setStaticCollider(furniture_prim, approximationShape="convexDecomposition")
furniture_prim = self.stage.GetPrimAtPath(house_prim_path + "/furniture")
# if furniture_prim:
# setStaticCollider(furniture_prim, approximationShape="convexHull")
# else:
# return False
# if not self.task_type in ["tap_water", "transfer_water", "pour_water"] and enable_collision:
# room_struct_prim = self.stage.GetPrimAtPath(house_prim_path + "/roomStruct")
# setStaticCollider(room_struct_prim, approximationShape="none")
# check task/task_type/robot
anchor_info = self.house_task_anchor[anchor_identifier]
# if anchor_info["task_type"] != self.task_type or \
# anchor_info["task_id"] != self.task_id or \
# anchor_info["robot_id"] != self.robot_id:
# raise Exception("Anchor information at {} does not match UI inputs".format(anchor_json_path))
# find game, task, anchor information
default_prim_path_str = "/World"
game_prim = self.stage.GetPrimAtPath(default_prim_path_str + "/game")
# if game information exists
if not game_prim:
carb.log_error(f"must have game obj at path {default_prim_path_str} + /game ")
return False
print("anchor_info", anchor_info)
orient = anchor_info["game_location"]["orient"]
translate = anchor_info["game_location"]["translate"]
rotation = pxr.Gf.Quatd(orient[0], orient[1], orient[2], orient[3])
game_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \
pxr.Gf.Matrix4d().SetRotate(rotation) * pxr.Gf.Matrix4d().SetTranslate(translate)
omni.kit.commands.execute(
"TransformPrimCommand",
path=default_prim_path_str + "/game",
new_transform_matrix=game_xform,
)
# set up additional collision
# for furni_path in anchor_info["additional_collisions"]:
# prim = self.stage.GetPrimAtPath(furni_path)
# # set rigidbody and disable it, only leave with collision
# setRigidBody(prim, "convexDecomposition", False)
# prim.GetAttribute("physics:rigidBodyEnabled").Set(False)
# print("try to set collider: ", furni_path)
## add ground
ground_prim = self.stage.GetPrimAtPath(default_prim_path_str + '/groundPlane')
if not ground_prim:
physicsUtils.add_ground_plane(self.stage, '/groundPlane', "Y", 1000.0,
pxr.Gf.Vec3f(0.0, 0.0, 0), pxr.Gf.Vec3f(0.2))
ground_prim = self.stage.GetPrimAtPath(default_prim_path_str + '/groundPlane')
# prim_list = list(self.stage.TraverseAll())
# prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
# for prim in prim_list:
ground_prim.GetAttribute('visibility').Set('invisible')
# if ground_prim:
# omni.kit.commands.execute("DeletePrims", paths=[ground_prim.GetPath()])
# ground_prim = self.stage.GetPrimAtPath("/World/groundPlane")
# if ground_prim:
# omni.kit.commands.execute("DeletePrims", paths=[ground_prim.GetPath()])
# gui = self.stage.GetPrimAtPath("/World/GUI")
# if gui:
# omni.kit.commands.execute("DeletePrims", paths=[gui.GetPath()])
return True
#----------------------------------------utils---------------------------------------------
def get_furniture_collisions(self):
"""
Get furniture information especially for collision from current scene
"""
# scene # furniture parent
self.stage = omni.usd.get_context().get_stage()
additional_collisions = []
furni_parent = self.stage.GetPrimAtPath("/World/furniture")
# if has furniture
if furni_parent:
for prim in furni_parent.GetChildren():
if prim.HasAPI(pxr.UsdPhysics.RigidBodyAPI) or prim.HasAPI(pxr.UsdPhysics.CollisionAPI):
# prim.GetAttribute("physics:rigidBodyEnabled").Set(False)
print("collision prim name", prim.GetPath(), prim.GetAttribute("physics:rigidBodyEnabled").Get())
# robot_prim.GetAttribute("xformOp:orient").Get()
additional_collisions.append(prim.GetPath().pathString)
return additional_collisions
def regularizing_game_robot_obj_location(self):
"""
Regulariting game/robot/obj locations: put /World/game translate as the obj location
"""
carb.log_info("Regularizing game/robot/obj locations")
# move game to main object
stage = omni.usd.get_context().get_stage()
game_prim = stage.GetPrimAtPath("/World/game")
if game_prim:
for obj_prim in game_prim.GetChildren():
if "mobility" in obj_prim.GetPath().pathString:
pos = pxr.UsdGeom.Xformable(obj_prim).ComputeLocalToWorldTransform(0).ExtractTranslation()
# rot = pos = pxr.UsdGeom.Xformable(obj_prim).ComputeLocalToWorldTransform(0).ExtractRotation().GetQuat()
# print("pos", pos, "rot", rot)
pos = [i for i in pos]
game_xform = pxr.Gf.Matrix4d().SetScale([1,1,1]) * \
pxr.Gf.Matrix4d().SetRotate(pxr.Gf.Quatf(1.0,0.0,0.0,0.0)) * pxr.Gf.Matrix4d().SetTranslate(pos)
omni.kit.commands.execute(
"TransformPrimCommand",
path=game_prim.GetPath().pathString,
new_transform_matrix=game_xform,
)
obj_prim.GetAttribute("xformOp:translate").Set(pxr.Gf.Vec3f(0.0, 0.0, 0.0))
# also transfer the location of the robot
robot_prim = stage.GetPrimAtPath("/World/game/franka")
if robot_prim:
robot_translate = robot_prim.GetAttribute("xformOp:translate").Get()
new_robot_translate = [robot_translate[i] - pos[i] for i in range(3)]
robot_prim.GetAttribute("xformOp:translate").Set(pxr.Gf.Vec3f(*new_robot_translate))
break
def house_anchor_id_suggestion(self):
"""
Get house ids that are possible for current task_type/task_id/anchor
"""
suggested_house_ids = []
suggested_anchor_ids = []
anchor_identifier_prefix = self.task_type + " " + self.task_id + " " + self.robot_id
house_root = os.path.join(self.data_path, self.annotator, "house")
print("os.listdir(house_root)", house_root)
for house_name in os.listdir(house_root):
anchor_json_path = os.path.join(house_root, house_name, "anchor.json")
if not os.path.exists(anchor_json_path):
carb.log_warn("please add anchor.json to current task")
return ""
with open(anchor_json_path, "r") as f:
anchor_info = json.load(f)
for identifier in anchor_info.keys():
if identifier.startswith(anchor_identifier_prefix):
suggested_house_ids.append(house_name)
anchod_id = identifier.split()[-1]
suggested_anchor_ids.append(anchod_id)
return [str((i,j)) for i,j in zip(suggested_house_ids, suggested_anchor_ids)]
# def build_HUD(self):
# if IS_IN_CREAT or IS_IN_ISAAC_SIM:
# self.stage = omni.usd.get_context().get_stage()
# gui_path = self.stage.GetDefaultPrim().GetPath().pathString + "/GUI"
# gui = self.stage.GetPrimAtPath(gui_path)
# if not gui:
# gui = pxr.UsdGeom.Xform.Define(self.stage, gui_path)
# gui_location = pxr.Gf.Vec3f(0, 100, 100)
# gui.AddTranslateOp().Set(gui_location)
# self.wiget_id = wm.add_widget(gui_path, LabelWidget(f"House id: {self.house_id}"), wm.WidgetAlignment.TOP)
| 33,507 | Python | 45.474341 | 152 | 0.555854 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/modify.py | import omni
import pxr
import carb
from pxr import UsdPhysics, UsdShade, Gf, Semantics
from omni.physx.scripts import physicsUtils
from omni.physx.scripts.utils import setCollider, setRigidBody, setStaticCollider, removeCollider
from ..param import IS_IN_ISAAC_SIM
from .utils import add_semantics
if IS_IN_ISAAC_SIM:
from omni.isaac.core.utils.semantics import add_update_semantics
def modify_game_obj_prim(prim):
"""
modify game object attributes:
if Bottle, add rigibody, physical material, and mass
"""
# add game object semantic
add_semantics(prim, "game_obj")
# print("modifyiing: " + prim.GetPath().pathString)
if "Bottle" in prim.GetPath().pathString or "standalone" in prim.GetPath().pathString:
"""
Set bottle rigidbox and physical material
"""
setRigidBody(prim, "convexDecomposition", False)
#prim.GetAttribute("physics:rigidBodyEnabled").Set(False)
setup_physics_material(prim)
add_mass_to_prim(prim)
# stage = omni.usd.get_context().get_stage()
# physicsUtils.add_ground_plane(stage, "/groundPlane", "Y", 750.0, Gf.Vec3f(0.0, -10.0, 0), Gf.Vec3f(0.5))
# if 'Faucet' in prim.GetPath().pathString:
# setup_physics_material(prim)
# add_mass_to_prim(prim)
if IS_IN_ISAAC_SIM and "Bottle" in prim.GetPath().pathString :
add_update_semantics(prim, "Bottle")
if "StorageFurniture" in prim.GetPath().pathString:
"""
Set up physical material for handles
"""
# setup_physics_material(prim)
# add_physical_material_to("coll")
fix_handle('StorageFurniture')
# remove_collider_to("visuals")
# if IS_IN_ISAAC_SIM:
# add_update_semantics(prim, "StorageFurniture")
# add_semantics("handle")
if "Basin" in prim.GetPath().pathString:
approximationShape = "convexDecomposition"
# convex decomp basin
stage = omni.usd.get_context().get_stage()
collision_api = UsdPhysics.MeshCollisionAPI.Get(stage, prim.GetPath())
if not collision_api:
collision_api = UsdPhysics.MeshCollisionAPI.Apply(prim)
collision_api.CreateApproximationAttr().Set(approximationShape)
# set up physical metarial
# add_physical_material_to("Basin")
if IS_IN_ISAAC_SIM:
add_update_semantics(prim, "Basin")
elif "Faucet" in prim.GetPath().pathString:
from .fluid.cup_data import FAUCET_INFO
faucet_id = prim.GetPath().pathString.split("_")[-1]
inflow_position = FAUCET_INFO[faucet_id]["inflow_pos"]
omni.kit.commands.execute(
"CreatePrim",
prim_path="/World/game/inflow",
prim_type="Xform",
select_new_prim=False,
)
inflow_xform = pxr.Gf.Matrix4d().SetTranslate(inflow_position)
omni.kit.commands.execute(
"TransformPrimCommand",
path="/World/game/inflow",
new_transform_matrix=inflow_xform,
)
stage = omni.usd.get_context().get_stage()
import re
link_pattern = re.compile('.*'+'link_[0-9]+$')
links = list(filter( lambda x : link_pattern.findall(x.GetPath().pathString) , list(stage.TraverseAll()) ))
for link in links:
add_mass_to_prim(link, 0.1)
if IS_IN_ISAAC_SIM:
add_update_semantics(prim, "Faucet")
def add_mass_to_prim(prim, mass:float=0.02, density:float=1):
stage = omni.usd.get_context().get_stage()
mass_api = UsdPhysics.MassAPI.Get(stage, prim.GetPath())
if not mass_api:
mass_api = UsdPhysics.MassAPI.Apply(prim)
mass_api.CreateMassAttr().Set(mass)
# mass_api.CreateDensityAttr().Set(density)
else:
mass_api.GetMassAttr().Set(mass)
# mass_api.GetDensityAttr().Set(density)
def setup_physics_material(prim):
"""
Set up physic material for prim at Path
"""
# def _setup_physics_material(self, path: Sdf.Path):
stage = omni.usd.get_context().get_stage()
_material_static_friction = 100.0
_material_dynamic_friction = 100.0
_material_restitution = 0.0
_physicsMaterialPath = None
if _physicsMaterialPath is None:
# _physicsMaterialPath = stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial")
_physicsMaterialPath = prim.GetPath().AppendChild("physicsMaterial")
# print("physics_material_path: ", _physicsMaterialPath)
UsdShade.Material.Define(stage, _physicsMaterialPath)
material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(_physicsMaterialPath))
material.CreateStaticFrictionAttr().Set(_material_static_friction)
material.CreateDynamicFrictionAttr().Set(_material_dynamic_friction)
material.CreateRestitutionAttr().Set(_material_restitution)
collisionAPI = UsdPhysics.CollisionAPI.Get(stage, prim.GetPath())
# prim = stage.GetPrimAtPath(path)
if not collisionAPI:
collisionAPI = UsdPhysics.CollisionAPI.Apply(prim)
# apply material
physicsUtils.add_physics_material_to_prim(stage, prim, _physicsMaterialPath)
print("physics material: path: ", _physicsMaterialPath)
def add_ground_plane(prim_path = "/World/game", visiable = False):
stage = omni.usd.get_context().get_stage()
ground_prim = stage.GetPrimAtPath("/World/groundPlane")
if not ground_prim: #IS_IN_ISAAC_SIM:
purposes = [pxr.UsdGeom.Tokens.default_]
bboxcache = pxr.UsdGeom.BBoxCache(pxr.Usd.TimeCode.Default(), purposes)
prim = stage.GetPrimAtPath(prim_path)
bboxes = bboxcache.ComputeWorldBound(prim)
# print("bboxes", bboxes)
y = bboxes.ComputeAlignedRange().GetMin()[1]
physicsUtils.add_ground_plane(stage, "/World/groundPlane", "Y", 750.0, pxr.Gf.Vec3f(0.0, y, 0), pxr.Gf.Vec3f(0.2))
# select ground
selection = omni.usd.get_context().get_selection()
selection.clear_selected_prim_paths()
selection.set_prim_path_selected("/World/groundPlane", True, True, True, True)
ground_prim = stage.GetPrimAtPath("/World/groundPlane")
visibility = "visible" if visiable else 'invisible'
ground_prim.GetAttribute('visibility').Set(visibility)
# prim_list = list(stage.TraverseAll())
# prim_list = [ item for item in prim_list if 'groundPlane' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
# for prim in prim_list:
# prim.GetAttribute('visibility').Set('invisible')
# else:
# # prim_path = stage.GetDefaultPrim().GetPath().pathString
# usd_context = omni.usd.get_context()
# bboxes = usd_context.compute_path_world_bounding_box(prim_path)
# physicsUtils.add_ground_plane(stage, "/groundPlane", "Y", 750.0, pxr.Gf.Vec3f(0.0, bboxes[0][1], 0), pxr.Gf.Vec3f(0.2))
def add_physical_material_to(keyword:str):
"""
Set up physical material
"""
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and 'visuals' not in item.GetPath().pathString ]
for prim in prim_list:
setup_physics_material(prim)
print("add physics material to handle")
setStaticCollider(prim, approximationShape = "convexDecomposition")
def fix_handle(keyword):
"""
Set up physical material
and change collision type ot covex decomposition
"""
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
#=========================
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and \
'handle' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
# print("prim_list: ", prim_list)
for prim in prim_list:
setStaticCollider(prim, approximationShape = "convexDecomposition")
setup_physics_material(prim)
# table = {}
# for prim_path in prim_list:
# prefix, suffix = "/".join(prim_path.split('/')[:-1]), prim_path.split('/')[-1]
# if prefix not in table:
# table[prefix] = []
# table[prefix].append(suffix)
# for prefix, value in table.items():
# handle = value[-1]
# import os
# from omni.isaac.core.utils.prims import get_prim_at_path
# handle_path =str(os.path.join(prefix, handle))
# handle_prim = get_prim_at_path(handle_path)
# setup_physics_material(handle_prim)
# setStaticCollider(handle_prim, approximationShape = "convexDecomposition")
#=================================
# prim_list = list(stage.TraverseAll())
# prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString and \
# 'visuals' in item.GetPath().pathString and item.GetTypeName() == 'Mesh' ]
# print(prim_list)
# for prim in prim_list:
# setup_physics_material(prim)
# setStaticCollider(prim, approximationShape = "convexDecomposition")
def remove_collider_to(keyword:str):
"""
Set up physical material
"""
stage = omni.usd.get_context().get_stage()
prim_list = list(stage.TraverseAll())
prim_list = [ item for item in prim_list if keyword in item.GetPath().pathString ]
for prim in prim_list:
removeCollider(prim.GetPath().pathString)
| 9,558 | Python | 38.829167 | 133 | 0.633919 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/param.py | from ..param import ROOT, APP_VERION | 36 | Python | 35.999964 | 36 | 0.777778 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/cup_setup.py | import math
import os
from ..param import ROOT as root
from ...param import IS_IN_ISAAC_SIM, APP_VERION, USE_ISO_SURFACE
import carb
import omni
import pxr
from pxr import Gf, UsdPhysics, Sdf, Usd, UsdGeom, PhysxSchema, Vt
from omni.physx.scripts import utils, physicsUtils
if APP_VERION.startswith("2022"):
from omni.physx.scripts import particleUtils
import numpy as np
from .constants import PARTICLE_PROPERTY
# from omni.isaac.core.utils.stage import add_reference_to_stage
from .schemaHelpers import addPhysxParticleSystem, addPhysxParticlesSimple, PhysxParticleInstancePrototype
from .utils import generate_cylinder_y, generate_inside_point_cloud, get_quat_from_extrinsic_xyz_rotation
from .cup_data import CUP_PARTICLE_INFO
def setGridFilteringPass(gridFilteringFlags: int, passIndex: int, operation: int, numRepetitions: int = 1):
numRepetitions = max(0, numRepetitions - 1)
shift = passIndex * 4
gridFilteringFlags &= ~(3 << shift)
gridFilteringFlags |= (((operation) << 2) | numRepetitions) << shift
return gridFilteringFlags
class CupFluidHelper():
def __init__(self, use_isosurface = USE_ISO_SURFACE, cup_id = 0, r = 0.1, g = 0.4, b =0.6, material = None, height = None) -> None:
self.stage = omni.usd.get_context().get_stage()
self.cup_id = cup_id
self.rgb =[r,g,b]
self.material = material
self.height = height
self.use_isosurface = use_isosurface
def create(self):
# needs to be called first: set_up_fluid_physical_scene
self.set_up_fluid_physical_scene()
self.set_cup()
self.set_up_particle_system()
self.set_color()
self.set_particle_offset()
def modify_cup_scene(self, cup_prim, add_liquid = True, set_physics=True):
"""
Modify cup scene given the cup_prim,
1. setup physical scene and fluid scene
2. add particles
:param::
: cup_prim
"""
print("modify cup at path: ", cup_prim.GetPath().pathString)
game_prim = cup_prim.GetParent()
# set up physical
self.set_up_fluid_physical_scene()
carb.log_warn("APP_VERION 1: " + APP_VERION)
# modify particleSystemStr
if add_liquid:
particleSystemStr = "/World/Fluid" # game_prim.GetPath().AppendPath("Fluid").pathString
self.particleSystemPath = pxr.Sdf.Path(particleSystemStr)
self.particleInstanceStr = game_prim.GetPath().AppendPath("Particles").pathString
# modify cup
cup_shape_prim_path = cup_prim.GetPath().AppendPath("cupShape").pathString
cup_shape_prim = self.stage.GetPrimAtPath(cup_shape_prim_path)
cup_volume_prim_path = cup_prim.GetPath().AppendPath("cup_volume").pathString
cup_volume_prim = self.stage.GetPrimAtPath(cup_volume_prim_path)
if not cup_shape_prim:
raise Exception(f"Cup shape must exist at path {cup_shape_prim_path}")
# if IS_IN_ISAAC_SIM :
# from omni.isaac.core.utils.semantics import add_update_semantics
# add_update_semantics(cup_shape_prim, "Cup")
# utils.setPhysics(prim=cup_shape_prim, kinematic=False)
# utils.setCollider(prim=cup_shape_prim, approximationShape="convexDecomposition")
# if not set_physics:
# physicsAPI = UsdPhysics.RigidBodyAPI.Apply(cup_shape_prim)
# physicsAPI.CreateRigidBodyEnabledAttr(False)
physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Get(self.stage, cup_shape_prim.GetPath())
if not physxCollisionAPI:
physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Apply(cup_shape_prim)
self._setup_physics_material(cup_shape_prim.GetPath())
# Mug parameters
restOffset = PARTICLE_PROPERTY._cup_rest_offset
contactOffset = PARTICLE_PROPERTY._cup_contact_offset
assert physxCollisionAPI.GetRestOffsetAttr().Set(restOffset)
assert physxCollisionAPI.GetContactOffsetAttr().Set(contactOffset)
assert cup_shape_prim.CreateAttribute("physxMeshCollision:minThickness", pxr.Sdf.ValueTypeNames.Float).Set(0.001)
self._fluidPositionOffset = Gf.Vec3f(0,0,0)
massAPI = UsdPhysics.MassAPI.Apply(cup_shape_prim)
massAPI.GetMassAttr().Set(PARTICLE_PROPERTY._cup_mass)
# utils.setPhysics(prim=cup_prim, kinematic=False)
utils.removeRigidBody(cup_shape_prim)
utils.setRigidBody(cup_prim, "convexDecomposition", False)
utils.removeCollider(cup_volume_prim)
# add material
# create material 2
mtl_created_list = []
omni.kit.commands.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniGlass.mdl",
mtl_name="OmniGlass",
mtl_created_list=mtl_created_list,
)
mtl_path = mtl_created_list[0]
omni.kit.commands.execute(
"BindMaterial", prim_path=pxr.Sdf.Path(cup_shape_prim_path), material_path=mtl_path, strength=pxr.UsdShade.Tokens.strongerThanDescendants
)
if add_liquid:
self.volume_mesh = pxr.UsdGeom.Mesh.Get(self.stage, cup_prim.GetPath().AppendPath(f"cup_volume"))
self.set_up_particle_system()
carb.log_warn("APP_VERION 1: " + APP_VERION)
self.set_color()
from omni.physx import acquire_physx_interface
physx = acquire_physx_interface()
physx.overwrite_gpu_setting(1)
physx.reset_simulation()
def set_up_fluid_physical_scene(self, gravityMagnitude = PARTICLE_PROPERTY._gravityMagnitude):
"""
Fluid / PhysicsScene
"""
default_prim_path = self.stage.GetDefaultPrim().GetPath()
if default_prim_path.pathString == '':
# default_prim_path = pxr.Sdf.Path('/World')
root = UsdGeom.Xform.Define(self.stage, "/World").GetPrim()
self.stage.SetDefaultPrim(root)
default_prim_path = self.stage.GetDefaultPrim().GetPath()
# if self.stage.GetPrimAtPath("/World/physicsScene"):
# self.physicsScenePath = default_prim_path.AppendChild("physicsScene")
# return
particleSystemStr = default_prim_path.AppendPath("Fluid").pathString
self.physicsScenePath = default_prim_path.AppendChild("physicsScene")
self.particleSystemPath = pxr.Sdf.Path(particleSystemStr)
self.particleInstanceStr = default_prim_path.AppendPath("Particles").pathString
# Physics scene
self._gravityMagnitude = gravityMagnitude # IN CM/s2 - use a lower gravity to avoid fluid compression at 60 FPS
self._gravityDirection = Gf.Vec3f(0.0, -1.0, 0.0)
physicsScenePath = default_prim_path.AppendChild("physicsScene")
if self.stage.GetPrimAtPath("/World/physicsScene"):
scene = UsdPhysics.Scene.Get(self.stage, physicsScenePath)
else:
scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath)
scene.CreateGravityDirectionAttr().Set(self._gravityDirection)
scene.CreateGravityMagnitudeAttr().Set(self._gravityMagnitude)
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim())
physxSceneAPI.CreateEnableCCDAttr().Set(True)
physxSceneAPI.GetTimeStepsPerSecondAttr().Set(60)
physxSceneAPI.CreateEnableGPUDynamicsAttr().Set(True)
physxSceneAPI.CreateEnableEnhancedDeterminismAttr().Set(True)
def set_up_particle_system(self):
self._fluidSphereDiameter = PARTICLE_PROPERTY._fluidSphereDiameter
self._particleSystemSchemaParameters = PARTICLE_PROPERTY._particleSystemSchemaParameters
self._particleSystemAttributes = PARTICLE_PROPERTY._particleSystemAttributes
if APP_VERION.startswith("2022"):
self._particleSystem = particleUtils.add_physx_particle_system(
self.stage, self.particleSystemPath, **self._particleSystemSchemaParameters, simulation_owner=Sdf.Path(self.physicsScenePath.pathString)
)
# materialPathStr = "/World/Looks/OmniGlass"
# particleUtils.add_pbd_particle_material(self.stage, materialPathStr, **PARTICLE_PROPERTY._particleMaterialAttributes)
# physicsUtils.add_physics_material_to_prim(self.stage, self._particleSystem.GetPrim(), materialPathStr)
else:
addPhysxParticleSystem(
self.stage, self.particleSystemPath, **self._particleSystemSchemaParameters, \
scenePath=pxr.Sdf.Path(self.physicsScenePath.pathString)
)
particleSystem = self.stage.GetPrimAtPath(self.particleSystemPath)
if APP_VERION.startswith("2022"):
pass
else:
for key, value in self._particleSystemAttributes.items():
particleSystem.GetAttribute(key).Set(value)
particleInstancePath = pxr.Sdf.Path(self.particleInstanceStr)
proto = PhysxParticleInstancePrototype()
proto.selfCollision = True
proto.fluid = True
proto.collisionGroup = 0
proto.mass = PARTICLE_PROPERTY._particle_mass
protoArray = [proto]
positions_list = []
velocities_list = []
protoIndices_list = []
lowerCenter = pxr.Gf.Vec3f(0, 0, 0)
particle_rest_offset = self._particleSystemSchemaParameters["fluid_rest_offset"]
####################################
if not hasattr(self, "volume_mesh") or self.volume_mesh is None: # not "volume_container" in CUP_PARTICLE_INFO[self.cup_id]:
################DATA####################
if self.height is None:
cylinder_height = CUP_PARTICLE_INFO[self.cup_id]["cylinder_height"]
else:
cylinder_height = self.height
cylinder_radius = CUP_PARTICLE_INFO[self.cup_id]["cylinder_radius"]
positions_list = generate_cylinder_y(lowerCenter, h=cylinder_height, radius=cylinder_radius, sphereDiameter=particle_rest_offset * 2.0)
# positions_list = generate_inside_mesh(lowerCenter, h=cylinder_height, radius=cylinder_radius,
# sphereDiameter=particle_rest_offset * 2.0, mesh= self.mesh, scale=self.scale)
else:
self.cloud_points = np.array(self.volume_mesh.GetPointsAttr().Get())
# two crowded, add 0.08
positions_list = generate_inside_point_cloud(sphereDiameter=particle_rest_offset * (2.0 + 0.08), cloud_points = self.cloud_points, scale=1.0)
for _ in range(len(positions_list)):
# print("position:", positions_list[_])
velocities_list.append(pxr.Gf.Vec3f(0, 0, 0))
protoIndices_list.append(0)
# print("positions_list", len(positions_list))
# positions_list -= np.array([228, 0, -231])
# positions_list = positions_list.tolist()
self.positions_list = positions_list
protoIndices = pxr.Vt.IntArray(protoIndices_list)
positions = pxr.Vt.Vec3fArray(positions_list)
velocities = pxr.Vt.Vec3fArray(velocities_list)
# if APP_VERION.startswith("2022"):
# particleUtils.add_physx_particleset_pointinstancer(
# self.stage,
# particleInstancePath,
# positions,
# velocities,
# self.particleSystemPath,
# self_collision=True,
# fluid=True,
# particle_group=0,
# particle_mass=PARTICLE_PROPERTY._particle_mass,
# density=0.0,
# )
# else:
# addPhysxParticlesSimple(
# self.stage, particleInstancePath, protoArray, protoIndices, positions, velocities, self.particleSystemPath
# )
if self.use_isosurface:
print("isosurface settings")
particle_system = self._particleSystem
mtl_created = []
omni.kit.commands.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniSurfacePresets.mdl",
mtl_name="OmniSurface_ClearWater",
mtl_created_list=mtl_created,
)
pbd_particle_material_path = mtl_created[0]
omni.kit.commands.execute(
"BindMaterial", prim_path=self.particleSystemPath, material_path=pbd_particle_material_path
)
# Create a pbd particle material and set it on the particle system
particleUtils.add_pbd_particle_material(
self.stage,
pbd_particle_material_path,
cohesion=0.01,
viscosity=0.0091,
surface_tension=0.0074,
friction=0.1,
)
physicsUtils.add_physics_material_to_prim(self.stage, particle_system.GetPrim(), pbd_particle_material_path)
particle_system.CreateMaxVelocityAttr().Set(20)
# add particle anisotropy
anisotropyAPI = PhysxSchema.PhysxParticleAnisotropyAPI.Apply(particle_system.GetPrim())
anisotropyAPI.CreateParticleAnisotropyEnabledAttr().Set(True)
aniso_scale = 5.0
anisotropyAPI.CreateScaleAttr().Set(aniso_scale)
anisotropyAPI.CreateMinAttr().Set(1.0)
anisotropyAPI.CreateMaxAttr().Set(2.0)
# add particle smoothing
smoothingAPI = PhysxSchema.PhysxParticleSmoothingAPI.Apply(particle_system.GetPrim())
smoothingAPI.CreateParticleSmoothingEnabledAttr().Set(True)
smoothingAPI.CreateStrengthAttr().Set(0.5)
fluidRestOffset = self._particleSystemSchemaParameters["rest_offset"]
# apply isosurface params
isosurfaceAPI = PhysxSchema.PhysxParticleIsosurfaceAPI.Apply(particle_system.GetPrim())
isosurfaceAPI.CreateIsosurfaceEnabledAttr().Set(True)
isosurfaceAPI.CreateMaxVerticesAttr().Set(1024 * 1024)
isosurfaceAPI.CreateMaxTrianglesAttr().Set(2 * 1024 * 1024)
isosurfaceAPI.CreateMaxSubgridsAttr().Set(1024 * 4)
isosurfaceAPI.CreateGridSpacingAttr().Set(fluidRestOffset * 1.5)
isosurfaceAPI.CreateSurfaceDistanceAttr().Set(fluidRestOffset * 1.6)
isosurfaceAPI.CreateGridFilteringPassesAttr().Set("")
isosurfaceAPI.CreateGridSmoothingRadiusAttr().Set(fluidRestOffset * 2)
isosurfaceAPI.CreateNumMeshSmoothingPassesAttr().Set(1)
primVarsApi = UsdGeom.PrimvarsAPI(particle_system)
primVarsApi.CreatePrimvar("doNotCastShadows", Sdf.ValueTypeNames.Bool).Set(True)
self.stage.SetInterpolationType(Usd.InterpolationTypeHeld)
particleUtils.add_physx_particleset_pointinstancer(
stage=self.stage,
path= particleInstancePath, #
positions=Vt.Vec3fArray(positions),
velocities=Vt.Vec3fArray(velocities),
particle_system_path=self.particleSystemPath,
self_collision=True,
fluid=True,
particle_group=0,
particle_mass=PARTICLE_PROPERTY._particle_mass,
density=0.0,
)
# if self.use_isosurface:
# particle_instance_prim = self.stage.GetPrimAtPath(particleInstancePath.pathString)
# # set partile up offset
# particles = pxr.UsdGeom.Xformable(particle_instance_prim)
# particles.AddTranslateOp()
def set_color(self):
# Set color
color_rgb = self.rgb#[0.1, 0.4, 0.6]
color = pxr.Vt.Vec3fArray([pxr.Gf.Vec3f(color_rgb[0], color_rgb[1], color_rgb[2])])
colorPathStr = self.particleInstanceStr + "/particlePrototype0"
gprim = pxr.UsdGeom.Sphere.Define(self.stage, pxr.Sdf.Path(colorPathStr))
gprim.CreateDisplayColorAttr(color)
# prototypePathStr = particleInstanceStr + "/particlePrototype0"
# gprim = UsdGeom.Sphere.Define(stage, Sdf.Path(prototypePathStr))
# gprim.CreateVisibilityAttr("invisible")
# TODO: debug transperency
gprim.CreateDisplayOpacityAttr([float(0.1)])
if self.use_isosurface:
gprim.GetPrim().GetAttribute('visibility').Set('invisible')
# usdPrim = stage.GetPrimAtPath(particleInstancePath)
usdPrim = self.stage.GetPrimAtPath(colorPathStr)
usdPrim.CreateAttribute("enableAnisotropy", pxr.Sdf.ValueTypeNames.Bool, True).Set(True)
usdPrim.CreateAttribute("radius", pxr.Sdf.ValueTypeNames.Double, True).Set(0.3)
gprim.GetRadiusAttr().Set(self._fluidSphereDiameter)
def set_cup(self):
# get cup info from data
abspath = CUP_PARTICLE_INFO[self.cup_id]["usd_path"]
mesh_name = CUP_PARTICLE_INFO[self.cup_id]["mesh_name"]
scale = CUP_PARTICLE_INFO[self.cup_id]["scale"]
particle_offset = CUP_PARTICLE_INFO[self.cup_id]["particle_offset"]
cup_offset = CUP_PARTICLE_INFO[self.cup_id]["cup_offset"]
self.scale = scale
default_prim_path = self.stage.GetDefaultPrim().GetPath()
self.stage.DefinePrim(default_prim_path.AppendPath(f"Cup")).GetReferences().AddReference(abspath)
mug = pxr.UsdGeom.Mesh.Get(self.stage, default_prim_path.AppendPath(f"Cup/{mesh_name}"))
utils.setPhysics(prim=mug.GetPrim(), kinematic=False)
utils.setCollider(prim=mug.GetPrim(), approximationShape="convexDecomposition")
if "volume_container" in CUP_PARTICLE_INFO[self.cup_id]:
volume_container = CUP_PARTICLE_INFO[self.cup_id]["volume_container"]
self.volume_mesh = pxr.UsdGeom.Mesh.Get(self.stage, default_prim_path.AppendPath(f"Cup/{volume_container}"))
prim = mug.GetPrim()
self.mug = mug
# self._setup_rb_collision_parameters(mug.GetPrim(), restOffset=self._mugRestOffset, contactOffset=self._mugContactOffset)
physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Get(self.stage, prim.GetPath())
if not physxCollisionAPI:
physxCollisionAPI = pxr.PhysxSchema.PhysxCollisionAPI.Apply(prim)
self._setup_physics_material(prim.GetPath())
# Mug parameters
restOffset = 0.0
contactOffset = 1.0
assert physxCollisionAPI.GetRestOffsetAttr().Set(restOffset)
assert physxCollisionAPI.GetContactOffsetAttr().Set(contactOffset)
assert prim.CreateAttribute("physxMeshCollision:minThickness", pxr.Sdf.ValueTypeNames.Float).Set(0.001)
# assert (
# mug.GetPrim().CreateAttribute("physxMeshCollision:maxConvexHulls", Sdf.ValueTypeNames.Float).Set(32)
# )
self._mugInitPos = Gf.Vec3f(cup_offset[0], cup_offset[1], cup_offset[2]) * scale
self._mugInitRot = get_quat_from_extrinsic_xyz_rotation(angleYrad=-0.7 * math.pi)
self._fluidPositionOffset = Gf.Vec3f(particle_offset[0], particle_offset[1], particle_offset[2])
self._mugScale = Gf.Vec3f(scale)
self._mugOffset = Gf.Vec3f(0, 0, 0) * scale
self.transform_mesh(mug, self._mugInitPos + self._mugOffset * 0, self._mugInitRot, self._mugScale)
massAPI = UsdPhysics.MassAPI.Apply(prim)
massAPI.GetMassAttr().Set(PARTICLE_PROPERTY._cup_mass)
def transform_mesh(self, mesh, loc, orient=pxr.Gf.Quatf(1.0), scale=pxr.Gf.Vec3d(1.0, 1.0, 1.0)):
for op in mesh.GetOrderedXformOps():
if op.GetOpType() == pxr.UsdGeom.XformOp.TypeTranslate:
op.Set(loc)
if op.GetOpType() == pxr.UsdGeom.XformOp.TypeOrient:
op.Set(orient)
if op.GetOpType() == pxr.UsdGeom.XformOp.TypeScale:
op.Set(scale)
def _setup_physics_material(self, path: pxr.Sdf.Path):
# and ground plane
self._material_static_friction = 10.0
self._material_dynamic_friction = 10.0
self._material_restitution = 0.0
self._physicsMaterialPath = None
if self._physicsMaterialPath is None:
self._physicsMaterialPath = self.stage.GetDefaultPrim().GetPath().AppendChild("physicsMaterial")
pxr.UsdShade.Material.Define(self.stage, self._physicsMaterialPath)
material = pxr.UsdPhysics.MaterialAPI.Apply(self.stage.GetPrimAtPath(self._physicsMaterialPath))
material.CreateStaticFrictionAttr().Set(self._material_static_friction)
material.CreateDynamicFrictionAttr().Set(self._material_dynamic_friction)
material.CreateRestitutionAttr().Set(self._material_restitution)
collisionAPI = pxr.UsdPhysics.CollisionAPI.Get(self.stage, path)
prim = self.stage.GetPrimAtPath(path)
if not collisionAPI:
collisionAPI = pxr.UsdPhysics.CollisionAPI.Apply(prim)
# apply material
physicsUtils.add_physics_material_to_prim(self.stage, prim, self._physicsMaterialPath)
| 21,056 | Python | 43.144654 | 154 | 0.645802 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/new_faucet.py | import carb
import math
from pathlib import Path
import pxr
from pxr import Usd, UsdLux, UsdGeom, Sdf, Gf, Vt, UsdPhysics, PhysxSchema
import sys
#put schemaHelpers.py into path
from omni.kitchen.asset.layout.fluid.schemaHelpers import PhysxParticleInstancePrototype, \
addPhysxParticleSystem, addPhysxParticlesSimple
import omni.timeline
from typing import List
from omni.kitchen.asset.task_check.newJointCheck import JointCheck
import math
from .utils import generate_cylinder_y, point_sphere
from ...param import IS_IN_ISAAC_SIM
from .constants import PARTICLE_PROPERTY, particel_scale
from omni.physx.scripts import particleUtils
def setGridFilteringPass(gridFilteringFlags: int, passIndex: int, operation: int, numRepetitions: int = 1):
numRepetitions = max(0, numRepetitions - 1)
shift = passIndex * 4
gridFilteringFlags &= ~(3 << shift)
gridFilteringFlags |= (((operation) << 2) | numRepetitions) << shift
return gridFilteringFlags
class Faucet():
def __init__(self,
liquid_material_path = "/World/Looks/OmniSurface_ClearWater", inflow_path:str = "/World/faucet/inflow",
link_paths:List[str] = ["/World/faucet/link_0"]
):
"""! Faucet class
@param particle_params : parameters for particles
@param iso_surface_params: parameters for iso_surface
@param liquid_material_path: parameters for liquid materials
@param inflow_path: used to compute the location of water drops
@param link_paths: used to compute the rotation of faucet handle and determine the speed and size of water drops
@param particle_params: parameters related to particle systems
@return an instance of Faucet class
"""
# particle Instance path
# self.particleInstanceStr_tmp = "/particlesInstance"
# self.particle_params = particle_params
# self.iso_surface_params = iso_surface_params
self.liquid_material_path = liquid_material_path
# inflow position
self.stage = omni.usd.get_context().get_stage()
self.inflow_path = inflow_path
self.inflow_prim = self.stage.GetPrimAtPath(inflow_path)
mat = omni.usd.utils.get_world_transform_matrix(self.inflow_prim)
# if IS_IN_ISAAC_SIM:
# from omni.isaac.core.prims import XFormPrim
# self.inflow_position, _ = XFormPrim(self.inflow_path).get_world_pose()
# self.inflow_position = Gf.Vec3f(*self.inflow_position.tolist())
# else:
self.inflow_position = Gf.Vec3f(*mat.ExtractTranslation())
self.link_paths = link_paths
self.list_of_point_instancers = []
self.active_indexes_for_point_instancers = []
self.rate_checkers = []
for link in link_paths:
path = Path(link)
self.rate_checkers.append(JointCheck( str(path.parent), str(path.name) ))
self.create()
# print("particleSystemPath", self.particleSystemPath)
def is_off(self):
rate = self.rate_checkers[0].compute_distance()/100.0
return rate < 0.1
def point_sphere(self, samples, scale):
"""! create locations for each particles
@param samples: the number of particles per sphere
@param scale: the scale(radius) of the water drop
"""
indices = [x + 0.5 for x in range(0, samples)]
phi = [math.acos(1 - 2 * x / samples) for x in indices]
theta = [math.pi * (1 + 5**0.5) * x for x in indices]
x = [math.cos(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)]
y = [math.sin(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)]
z = [math.cos(ph) * scale for ph in phi]
points = [Gf.Vec3f(x, y, z) for (x, y, z) in zip(x, y, z)]
return points
def create_ball(self, rate = 1):
"""! create a water drop
@param pos: the center of the water drop
@param rate: the number of particles for each water drop
"""
# create sphere on points
self.set_up_particle_system(rate)
def set_up_particle_system(self, rate):
self.particleInstanceStr_tmp = self.particleInstanceStr + "/particlesInstance" + str(self.it)
particleInstancePath = omni.usd.get_stage_next_free_path(self.stage, self.particleInstanceStr_tmp, False)
particleInstancePath = pxr.Sdf.Path(particleInstancePath)
proto = PhysxParticleInstancePrototype()
proto.selfCollision = True
proto.fluid = True
proto.collisionGroup = 0
proto.mass = PARTICLE_PROPERTY._particle_mass
protoArray = [proto]
positions_list = []
velocities_list = []
protoIndices_list = []
cylinder_height = 2
cylinder_radius = 1.5
lowerCenter = Gf.Vec3f(0, -cylinder_height, 0) # self.inflow_position
# lowerCenter = self.inflow_position
particle_rest_offset = self._particleSystemSchemaParameters["fluid_rest_offset"]
positions_list = generate_cylinder_y(lowerCenter, h=cylinder_height, radius=cylinder_radius, sphereDiameter=particle_rest_offset * 4.0)
for _ in range(len(positions_list)):
velocities_list.append(pxr.Gf.Vec3f(0, 0, 0))
protoIndices_list.append(0)
# print("positions_list", len(positions_list))
self.positions_list = positions_list
protoIndices = pxr.Vt.IntArray(protoIndices_list)
positions = pxr.Vt.Vec3fArray(positions_list)
velocities = pxr.Vt.Vec3fArray(velocities_list)
print("particleInstancePath", particleInstancePath.pathString)
particleUtils.add_physx_particleset_pointinstancer(
self.stage,
particleInstancePath,
positions,
velocities,
self.particleSystemPath,
self_collision=True,
fluid=True,
particle_group=0,
particle_mass=PARTICLE_PROPERTY._particle_mass,
density=0.0,
)
prototypePath = particleInstancePath.pathString + "/particlePrototype0"
sphere = UsdGeom.Sphere.Define(self.stage, Sdf.Path(prototypePath))
spherePrim = sphere.GetPrim()
# spherePrim.GetAttribute('visibility').Set('invisible')
color_rgb = [207/255.0, 244/255.0, 254/255.0]
color = pxr.Vt.Vec3fArray([pxr.Gf.Vec3f(color_rgb[0], color_rgb[1], color_rgb[2])])
sphere.CreateDisplayColorAttr(color)
# spherePrim.CreateAttribute("enableAnisotropy", Sdf.ValueTypeNames.Bool, True).Set(True)
def create(self):
"""! initialize the related parameters for faucet
create physics scenes
create particle systems
create isosurface
"""
self._setup_callbacks()
self.it = 0
self.counter = 10
self.set_up_fluid_physical_scene()
def set_up_fluid_physical_scene(self, gravityMagnitude = 100.0):
"""
Fluid / PhysicsScene
"""
default_prim_path = self.stage.GetDefaultPrim().GetPath()
if default_prim_path.pathString == '':
# default_prim_path = pxr.Sdf.Path('/World')
root = UsdGeom.Xform.Define(self.stage, "/World").GetPrim()
self.stage.SetDefaultPrim(root)
default_prim_path = self.stage.GetDefaultPrim().GetPath()
self.stage = omni.usd.get_context().get_stage()
particleSystemStr = default_prim_path.AppendPath("Fluid").pathString
self.physicsScenePath = default_prim_path.AppendChild("physicsScene")
self.particleSystemPath = Sdf.Path(particleSystemStr)
self.particleInstanceStr = "/World/game/inflow"
# print("particleInstanceStr", self.particleInstanceStr)
# Physics scene
self._gravityMagnitude = gravityMagnitude
self._gravityDirection = Gf.Vec3f(0.0, -1.0, 0.0)
physicsScenePath = default_prim_path.AppendChild("physicsScene")
if self.stage.GetPrimAtPath('/World/physicsScene'):
scene = UsdPhysics.Scene.Get(self.stage, physicsScenePath)
else:
scene = UsdPhysics.Scene.Define(self.stage, physicsScenePath)
scene.CreateGravityDirectionAttr().Set(self._gravityDirection)
scene.CreateGravityMagnitudeAttr().Set(self._gravityMagnitude)
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(scene.GetPrim())
physxSceneAPI.CreateEnableCCDAttr().Set(True)
physxSceneAPI.GetTimeStepsPerSecondAttr().Set(120)
self._fluidSphereDiameter = PARTICLE_PROPERTY._fluidSphereDiameter #0.24
# solver parameters:
# self._solverPositionIterations = 10
# self._solverVelocityIterations = 10
# self._particleSystemSchemaParameters = {
# "contact_offset": 0.3,
# "particle_contact_offset": 0.25,
# "rest_offset": 0.25,
# "solid_rest_offset": 0,
# "fluid_rest_offset": 0.5 * self._fluidSphereDiameter + 0.03,
# "solver_position_iterations": self._solverPositionIterations,
# "solver_velocity_iterations": self._solverVelocityIterations,
# "wind": Gf.Vec3f(0, 0, 0),
# }
self._particleSystemSchemaParameters = PARTICLE_PROPERTY._particleSystemSchemaParameters
# self._particleSystemAttributes = {
# "cohesion": 7.4,
# "smoothing": 0.8,
# "anisotropyScale": 1.0,
# "anisotropyMin": 0.2,
# "anisotropyMax": 2.0,
# "surfaceTension": 2.0, #0.74,
# "vorticityConfinement": 0.5,
# "viscosity": 5.0,
# "particleFriction": 0.34,
# "maxParticles": 20000,
# }
self._particleSystemAttributes = PARTICLE_PROPERTY._particleSystemAttributes
self._particleSystemAttributes["maxParticles"] = 2000
self._particleSystemAttributes["viscosity"] = 0.001
self._particleSystem = particleUtils.add_physx_particle_system(
self.stage, self.particleSystemPath, **self._particleSystemSchemaParameters, simulation_owner=Sdf.Path(self.physicsScenePath.pathString)
)
# addPhysxParticleSystem(
# self.stage, self.particleSystemPath, **self._particleSystemSchemaParameters, \
# scenePath=pxr.Sdf.Path(self.physicsScenePath.pathString)
# )
# particleSystem = self.stage.GetPrimAtPath(self.particleSystemPath)
# for key, value in self._particleSystemAttributes.items():
# particleSystem.GetAttribute(key).Set(value)
# filterSmooth = 1
# filtering = 0
# passIndex = 0
# filtering = setGridFilteringPass(filtering, passIndex, filterSmooth)
# passIndex = passIndex + 1
# filtering = setGridFilteringPass(filtering, passIndex, filterSmooth)
# passIndex = passIndex + 1
# self.iso_surface_params = {
# "maxIsosurfaceVertices": [Sdf.ValueTypeNames.Int, True, 1024 * 1024],
# "maxIsosurfaceTriangles": [Sdf.ValueTypeNames.Int, True, 2 * 1024 * 1024],
# "maxNumIsosurfaceSubgrids": [Sdf.ValueTypeNames.Int, True, 1024 * 4],
# "isosurfaceGridSpacing": [Sdf.ValueTypeNames.Float, True, 0.2],
# "isosurfaceKernelRadius": [Sdf.ValueTypeNames.Float, True, 0.5 ],
# "isosurfaceLevel": [ Sdf.ValueTypeNames.Float, True, -0.3 ],
# "isosurfaceGridFilteringFlags": [Sdf.ValueTypeNames.Int, True, filtering ],
# "isosurfaceGridSmoothingRadiusRelativeToCellSize": [Sdf.ValueTypeNames.Float, True, 0.3 ],
# "isosurfaceEnableAnisotropy": [Sdf.ValueTypeNames.Bool, True, False ],
# "isosurfaceAnisotropyMin": [ Sdf.ValueTypeNames.Float, True, 0.1 ],
# "isosurfaceAnisotropyMax": [ Sdf.ValueTypeNames.Float, True, 2.0 ],
# "isosurfaceAnisotropyRadius": [ Sdf.ValueTypeNames.Float, True, 0.5 ],
# "numIsosurfaceMeshSmoothingPasses": [ Sdf.ValueTypeNames.Int, True, 5 ],
# "numIsosurfaceMeshNormalSmoothingPasses": [ Sdf.ValueTypeNames.Int, True, 5 ],
# "isosurfaceDoNotCastShadows": [Sdf.ValueTypeNames.Bool, True, True ]
# }
# particleSystem.CreateAttribute("enableIsosurface", Sdf.ValueTypeNames.Bool, True).Set(True)
# for key,value in self.iso_surface_params.items():
# if isinstance(value, list):
# particleSystem.CreateAttribute(key, value[0], value[1]).Set(value[2])
# else:
# particleSystem.GetAttribute(key).Set(value)
# self.stage.SetInterpolationType(Usd.InterpolationTypeHeld)
def _setup_callbacks(self):
"""! callbacks registered with timeline and physics steps to drop water
"""
# callbacks
self._timeline = omni.timeline.get_timeline_interface()
stream = self._timeline.get_timeline_event_stream()
self._timeline_subscription = stream.create_subscription_to_pop(self._on_timeline_event)
# subscribe to Physics updates:
self._physics_update_subscription = omni.physx.get_physx_interface().subscribe_physics_step_events(
self.on_physics_step
)
# events = omni.physx.get_physx_interface().get_simulation_event_stream()
# self._simulation_event_sub = events.create_subscription_to_pop(self._on_simulation_event)
def _on_timeline_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
self.it = 0
self._physics_update_subscription = None
self._timeline_subscription = None
def on_physics_step(self, dt):
xformCache = UsdGeom.XformCache()
# compute location to dispense water
pose = xformCache.GetLocalToWorldTransform(self.stage.GetPrimAtPath(self.inflow_path))
pos_faucet = Gf.Vec3f(pose.ExtractTranslation())
##TODO hangle multiple faucet handles
rate = self.rate_checkers[0].compute_distance()/100.0
if rate > 1:
rate = 1
# if self.it == 0:
# iso2Prim = self.stage.GetPrimAtPath(self.particleSystemPath.pathString +"/Isosurface")
# rel = iso2Prim.CreateRelationship("material:binding", False)
# # rel.SetTargets([Sdf.Path(self.liquid_material_path)])
# rel.SetTargets([Sdf.Path("/World/game/other_Basin_1/Looks/OmniSurface_ClearWater")])
#TODO we can have the water keep running, but we should delete some particles that are too old and not in containers.
#this implementation will stop after 200 balls
if self.it > 200:
return
if rate < 0.1:
return
# emit a ball based on rate
rate = min(0.35, rate)
if (self.counter < 100 - rate*200 ):
self.counter = self.counter + 1
return
self.counter = 0
self.it = self.it + 1
self.create_ball(rate)
def __del__(self):
self._physics_update_subscription = None
self._timeline_subscription = None
| 15,294 | Python | 40.675749 | 152 | 0.628874 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/constants.py | from ...param import APP_VERION
from pxr import Gf
particel_scale = 2.5
if APP_VERION.startswith("2022"):
class PARTICLE_PROPERTY:
_fluidSphereDiameter = 0.24 * particel_scale
_particleSystemSchemaParameters = {
"contact_offset": 0.3 * particel_scale,
"particle_contact_offset": 0.25 * particel_scale,
"rest_offset": 0.25 * particel_scale,
"solid_rest_offset": 0,
"fluid_rest_offset": 0.5 * _fluidSphereDiameter + 0.03 * particel_scale,
"solver_position_iterations": 10,
"wind": Gf.Vec3f(0, 0, 0),
"max_velocity": 40 ,
}
_particleMaterialAttributes = {
"friction": 0.34,
"viscosity": 0.0,
"vorticity_confinement": 0.5,
"surface_tension": 0.74,
"cohesion": 0.1,
# "cfl_coefficient": 1.0,
}
_particleSystemAttributes = {
"cohesion": 0.0,
"smoothing": 0.8,
"anisotropyScale": 1.0,
"anisotropyMin": 0.2,
"anisotropyMax": 2.0,
"surfaceTension": 0.74,
"vorticityConfinement": 0.5,
"viscosity": 0.0,
"particleFriction": 0.34,
"maxVelocity": 40,
}
_particle_mass = 1e-6 * particel_scale*particel_scale
_particle_scale = (0.5, 0.5, 0.5)
_cup_rest_offset = 0.0
_cup_contact_offset = 1.0
_cup_mass = 1
_gravityMagnitude = 100
else:
class PARTICLE_PROPERTY:
_fluidSphereDiameter = 0.24 * particel_scale
_particleSystemSchemaParameters = {
"contact_offset": 0.3 * particel_scale,
"particle_contact_offset": 0.25 * particel_scale,
"rest_offset": 0.25 * particel_scale,
"solid_rest_offset": 0,
"fluid_rest_offset": 0.5 * _fluidSphereDiameter + 0.03 * particel_scale,
"solver_position_iterations": 10,
"solver_velocity_iterations": 10,
"wind": Gf.Vec3f(0, 0, 0),
}
_particleSystemAttributes = {
"cohesion": 7.4,
"smoothing": 0.8,
"anisotropyScale": 1.0,
"anisotropyMin": 0.2,
"anisotropyMax": 2.0,
"surfaceTension": 0.74,
"vorticityConfinement": 0.5,
"viscosity": 5.0,
"particleFriction": 0.34,
"maxVelocity": 40,
}
_particle_mass = 1e-6 * particel_scale
_particle_scale = (0.5, 0.5, 0.5)
_cup_rest_offset = 0.0
_cup_contact_offset = 1.0
_cup_mass = 1
_gravityMagnitude = 100
| 2,780 | Python | 32.506024 | 84 | 0.496403 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/cup_data.py | from ..param import ROOT
CUP_ROOT = ROOT + "/3dmodels/cup/"
NEW_CUP_ROOT = ROOT + "/sample/custom/Cup/"
FAUCET_INFO = {
"1028": {
"inflow_pos": [-17.4121, 4.63152, 0],
"joints":[
"link_2/joint_0",
"link_2/joint_1",
]
},
"148": {
"inflow_pos": [-17.30, 4.10 , 0],
"joints":[
"link_1/joint_0",
]
},
"149": {
"inflow_pos": [-10.80, 7.0 , 0],
"joints":[
"link_3/joint_0",
"link_3/joint_1",
]
},
"153": {
"inflow_pos": [-13.4587, 7.00 , -2.5],
"joints":[
"link_1/joint_0",
]
},
"154": {
"inflow_pos": [-7.0, 19.00 , 0.0],
"joints":[
"link_2/joint_0",
"link_2/joint_1",
]
},
"156": {
"inflow_pos": [-17.00, 6.00 , 0.0],
"joints":[
"link_1/joint_0",
]
},
"693": {
"inflow_pos": [-14.3453, -6.21179, -0.20894],
"joints":[
"link_2/joint_1",
]
},
"1034": {
"inflow_pos": [-17.967, 4.04622, 4.11386],
"joints":[
"link_1/joint_0",
]
},
"1052": {
"inflow_pos": [-14.8737, 4.21977, 1.06383],
"joints":[
"link_2/joint_0",
]
},
"1053": {
"inflow_pos": [-9.99254, 1.0, 0],
"joints":[
"link_1/joint_0",
]
}
}
CUP_PARTICLE_INFO = [
{
"usd_path": NEW_CUP_ROOT + "0/cup.usd",
"mesh_name": "cupShape",
#"volume_container": "cup_volume",
"cylinder_height": 15.0,
"cylinder_radius": 4.5,
"particle_offset": [0, 1.05, 0],
"cup_offset": [0, 0, 0],
"scale": 1.0
},
{
"usd_path": NEW_CUP_ROOT + "1/cup.usd",
"mesh_name": "cupShape",
"volume_container": "cup_volume",
"cylinder_height": 15.0,
"cylinder_radius": 4.5,
"particle_offset": [0, 1.05, 0],
"cup_offset": [0, 0, 0],
"scale": 1.0
},
{
"usd_path": CUP_ROOT + "bottle0.usd",
"mesh_name": "D_printable_bottle",
"cylinder_height": 15.0,
"cylinder_radius": 4.5,
"particle_offset": [2.0, 1.05, 0],
"cup_offset": [0, 0, 0],
"scale": 0.25
},
{
"usd_path": CUP_ROOT + "bottle1.usd",
"mesh_name": "bioshock_salts_bottle_final",
"cylinder_height": 14.0,
"cylinder_radius": 3.0,
"particle_offset": [0.0, -10, -2.7],
# "particle_offset": [0.0, 0, -5],
"cup_offset": [0, 2.1, 0],
# "cup_offset": [0, 0, 0],
"scale": 5.0
},
{
"usd_path": CUP_ROOT + "mug0.usd",
"mesh_name": "geom",
"cylinder_height": 15.0,
"cylinder_radius": 3.0,
"particle_offset": [0.0, 1.05, 0],
"cup_offset": [0, 0, 0],
"scale": 1.2
},
{
"usd_path": CUP_ROOT + "mug1.usd",
"mesh_name": "SM_mug_2_mesh",
"cylinder_height": 15.0,
"cylinder_radius": 3.0,
"particle_offset": [0.0, 1.05, 0],
"cup_offset": [0, 0, 0],
"scale": 1.2
},
{
"usd_path": CUP_ROOT + "jar0.usd",
"mesh_name": "mesh",
"cylinder_height": 18.0,
"cylinder_radius": 5.0,
"particle_offset": [0.0, 1.05, 0],
"cup_offset": [0, 0, 0],
"scale": 1.2
},
] | 3,753 | Python | 24.026667 | 55 | 0.375966 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/__init__.py | # from .faucet import Faucet, particle_params, iso_surface_params | 65 | Python | 64.999935 | 65 | 0.8 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/utils.py | import math
from pxr import Gf
import numpy as np
import copy
def point_sphere(samples, scale):
indices = [x + 0.5 for x in range(0, samples)]
phi = [math.acos(1 - 2 * x / samples) for x in indices]
theta = [math.pi * (1 + 5**0.5) * x for x in indices]
x = [math.cos(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)]
y = [math.sin(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)]
z = [math.cos(ph) * scale for ph in phi]
points = [Gf.Vec3f(x, y, z) for (x, y, z) in zip(x, y, z)]
return points
#generate inside mesh
def swapPositions(list, pos1, pos2):
list[pos1], list[pos2] = list[pos2], list[pos1]
return list
def generate_inside_mesh(lowerCenter: Gf.Vec3f, h: float, radius: float, sphereDiameter: float, mesh, scale):
# print("bounds: ", mesh.bounds)
# samples = generate_hcp_samples(Gf.Vec3f(-radius, 0, -radius), Gf.Vec3f(radius, h, radius), sphereDiameter)
min_bound = list(mesh.bounds[0])
max_bound = list(mesh.bounds[1])
min_bound = [min_bound[0], min_bound[2], min_bound[1]]
max_bound = [max_bound[0], max_bound[2], max_bound[1]]
min_bound = (item * scale for item in min_bound)
max_bound = (item * scale for item in max_bound)
samples = generate_hcp_samples(Gf.Vec3f(*min_bound), Gf.Vec3f(*max_bound), sphereDiameter*2)
finalSamples = []
import copy
import trimesh
samples_copy = copy.deepcopy(samples)
samples_copy = [ [ sample_copy[0]/scale, sample_copy[1]/scale, sample_copy[2]/scale ] for sample_copy in samples_copy ]
samples_copy = [ [ sample_copy[0], sample_copy[2], sample_copy[1] ] for sample_copy in samples_copy ]
# print("num particles: ", len(samples_copy))
print("eva contains:")
contains = mesh.contains(samples_copy)
# signed_distance = trimesh.proximity.ProximityQuery(mesh).signed_distance(samples_copy)
# contains = signed_distance >= 0
print("eva done:")
for contain, sample in zip(contains, samples):
if contain:
finalSamples.append(sample)
print("length: ", len(finalSamples) )
return finalSamples
def in_hull(p, hull):
"""
Test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
"""
try:
from scipy.spatial import Delaunay
except:
import omni
omni.kit.pipapi.install("scipy")
from scipy.spatial import Delaunay
if not isinstance(hull,Delaunay):
hull = Delaunay(hull)
return hull.find_simplex(p)>=0
def generate_inside_point_cloud(sphereDiameter, cloud_points, scale = 1):
"""
Generate sphere packs inside a point cloud
"""
offset = 2
min_x = np.min(cloud_points[:, 0]) + offset
min_y = np.min(cloud_points[:, 1]) + offset
min_z = np.min(cloud_points[:, 2]) + offset
max_x = np.max(cloud_points[:, 0])
max_y = np.max(cloud_points[:, 1])
max_z = np.max(cloud_points[:, 2])
min_bound = [min_x, min_y, min_z]
max_bound = [max_x, max_y, max_z]
min_bound = [item * scale for item in min_bound]
max_bound = [item * scale for item in max_bound]
samples = generate_hcp_samples(Gf.Vec3f(*min_bound), Gf.Vec3f(*max_bound), sphereDiameter)
samples_copy = np.array(copy.deepcopy(samples))
print("samples_copy", samples_copy.shape)
finalSamples = []
contains = in_hull(samples, cloud_points)
max_particles = 2000
for contain, sample in zip(contains, samples):
if contain and len(finalSamples) < max_particles:
finalSamples.append(sample)
print("length: ", len(finalSamples) )
return finalSamples
# generate cylinder points
def generate_cylinder_y(lowerCenter: Gf.Vec3f, h: float, radius: float, sphereDiameter: float):
samples = generate_hcp_samples(Gf.Vec3f(-radius, 0, -radius), Gf.Vec3f(radius, h, radius), sphereDiameter)
finalSamples = []
for p in samples:
r2 = p[0] * p[0] + p[2] * p[2]
if r2 <= radius * radius:
finalSamples.append(p + lowerCenter)
return finalSamples
# Generates hexagonal close packed samples inside an axis aligned bounding box
def generate_hcp_samples(boxMin: Gf.Vec3f, boxMax: Gf.Vec3f, sphereDiameter: float):
layerDistance = math.sqrt(2.0 / 3.0) * sphereDiameter
rowShift = math.sqrt(3.0) / 2.0 * sphereDiameter
result = []
layer1Offset = (1.0 / 3.0) * (
Gf.Vec2f(0, 0) + Gf.Vec2f(0.5 * sphereDiameter, rowShift) + Gf.Vec2f(sphereDiameter, 0)
)
zIndex = 0
while True:
z = boxMin[2] + zIndex * layerDistance
if z > boxMax[2]:
break
yOffset = layer1Offset[1] if zIndex % 2 == 1 else 0
yIndex = 0
while True:
y = boxMin[1] + yIndex * rowShift + yOffset
if y > boxMax[1]:
break
xOffset = 0
if zIndex % 2 == 1:
xOffset += layer1Offset[0]
if yIndex % 2 == 1:
xOffset -= 0.5 * sphereDiameter
elif yIndex % 2 == 1:
xOffset += 0.5 * sphereDiameter
xIndex = 0
while True:
x = boxMin[0] + xIndex * sphereDiameter + xOffset
if x > boxMax[0]:
break
result.append(Gf.Vec3f(x, y, z))
xIndex += 1
yIndex += 1
zIndex += 1
return result
def get_quat_from_extrinsic_xyz_rotation(angleXrad: float = 0.0, angleYrad: float = 0.0, angleZrad: float = 0.0):
def rotate_around_axis(x, y, z, angle):
s = math.sin(0.5 * angle)
return Gf.Quatf(math.cos(0.5 * angle), s * x, s * y, s * z)
# angles are in radians
rotX = rotate_around_axis(1, 0, 0, angleXrad)
rotY = rotate_around_axis(0, 1, 0, angleYrad)
rotZ = rotate_around_axis(0, 0, 1, angleZrad)
return rotZ * rotY * rotX | 6,155 | Python | 31.230366 | 123 | 0.59805 |
yizhouzhao/VRKitchen2.0-IndoorKit/exts/vrkitchen.indoor.kit/vrkitchen/indoor/kit/layout/fluid/fluid_setup.py | import carb
import math
from pxr import Usd, UsdGeom, Sdf, Gf, Vt, UsdPhysics, PhysxSchema
import omni.timeline
import omni.physxdemos as demo
from .schemaHelpers import PhysxParticleInstancePrototype, addPhysxParticleSystem
ASYNC_SIMULATION = "/persistent/physics/asyncSimRender"
def setGridFilteringPass(gridFilteringFlags: int, passIndex: int, operation: int, numRepetitions: int = 1):
numRepetitions = max(0, numRepetitions - 1)
shift = passIndex * 4
gridFilteringFlags &= ~(3 << shift)
gridFilteringFlags |= (((operation) << 2) | numRepetitions) << shift
return gridFilteringFlags
class FluidFill(demo.Base):
def __init__(self, pos = Gf.Vec3f(0 , 20, 0.0)):
self.stage = omni.usd.get_context().get_stage()
self.pos = pos
xformCache = UsdGeom.XformCache()
pose = xformCache.GetLocalToWorldTransform(self.stage.GetPrimAtPath("/World/mobility/link_0"))
pos_link = Gf.Vec3f(pose.ExtractTranslation())
self.rot_link_init = Gf.Quatf(pose.ExtractRotationQuat())
# print("attributes: ", self.stage.GetPrimAtPath("/World/faucet/link_0").GetAttributes())
self.init_orient = self.stage.GetPrimAtPath("/World/mobility/link_0").GetAttribute("xformOp:orient").Get()
def point_sphere(self, samples, scale):
indices = [x + 0.5 for x in range(0, samples)]
phi = [math.acos(1 - 2 * x / samples) for x in indices]
theta = [math.pi * (1 + 5**0.5) * x for x in indices]
x = [math.cos(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)]
y = [math.sin(th) * math.sin(ph) * scale for (th, ph) in zip(theta, phi)]
z = [math.cos(ph) * scale for ph in phi]
points = [Gf.Vec3f(x, y, z) for (x, y, z) in zip(x, y, z)]
return points
def create_ball(self, stage, pos, rate = 1):
# create sphere on points
# print("scale: ", rate)
points = self.point_sphere( 10+int(90 * rate), 1)
# points = self.point_sphere( int(80 * rate), 1)
# basePos = Gf.Vec3f(11.0, 12.0, 35.0) + pos
basePos = pos
positions = [Gf.Vec3f(x) + Gf.Vec3f(basePos) for x in points]
radius = 0.1
# particleSpacing = 2.0 * radius * 0.6
particleSpacing = 2.0 * radius * 0.6
positions_list = positions
velocities_list = [Gf.Vec3f(0.0, 0.0, 0.0)] * len(positions)
protoIndices_list = [0] * len(positions)
protoIndices = Vt.IntArray(protoIndices_list)
positions = Vt.Vec3fArray(positions_list)
velocities = Vt.Vec3fArray(velocities_list)
particleInstanceStr = "/particlesInstance" + str(self.it)
particleInstancePath = Sdf.Path(particleInstanceStr)
# Create point instancer
pointInstancer = UsdGeom.PointInstancer.Define(stage, particleInstancePath)
prototypeRel = pointInstancer.GetPrototypesRel()
# Create particle instance prototypes
particlePrototype = PhysxParticleInstancePrototype()
particlePrototype.selfCollision = True
particlePrototype.fluid = True
particlePrototype.collisionGroup = 0
particlePrototype.mass = 0.001
prototypePath = particleInstancePath.pathString + "/particlePrototype"
sphere = UsdGeom.Sphere.Define(stage, Sdf.Path(prototypePath))
spherePrim = sphere.GetPrim()
sphere.GetRadiusAttr().Set(particleSpacing)
# color_rgb = [0.0, 0.08, 0.30]
# color = Vt.Vec3fArray([Gf.Vec3f(color_rgb[0], color_rgb[1], color_rgb[2])])
# sphere.CreateDisplayColorAttr(color)
spherePrim = sphere.GetPrim()
spherePrim.GetAttribute('visibility').Set('invisible')
# spherePrim.GetVisibilityAttr().Set(False)
spherePrim.CreateAttribute("enableAnisotropy", Sdf.ValueTypeNames.Bool, True).Set(True)
particleInstanceApi = PhysxSchema.PhysxParticleAPI.Apply(spherePrim)
particleInstanceApi.CreateSelfCollisionAttr().Set(particlePrototype.selfCollision)
particleInstanceApi.CreateFluidAttr().Set(particlePrototype.fluid)
particleInstanceApi.CreateParticleGroupAttr().Set(particlePrototype.collisionGroup)
particleInstanceApi.CreateMassAttr().Set(particlePrototype.mass)
# Reference simulation owner using PhysxPhysicsAPI
physicsApi = PhysxSchema.PhysxPhysicsAPI.Apply(spherePrim)
physicsApi.CreateSimulationOwnerRel().SetTargets([self.particleSystemPath])
# add prototype references to point instancer
prototypeRel.AddTarget(Sdf.Path(prototypePath))
# Set active particle indices
activeIndices = []
for i in range(len(positions)):
activeIndices.append(protoIndices[i])
orientations = [Gf.Quath(1.0, Gf.Vec3h(0.0, 0.0, 0.0))] * len(positions)
angular_velocities = [Gf.Vec3f(0.0, 0.0, 0.0)] * len(positions)
pointInstancer.GetProtoIndicesAttr().Set(activeIndices)
pointInstancer.GetPositionsAttr().Set(positions)
pointInstancer.GetOrientationsAttr().Set(orientations)
pointInstancer.GetVelocitiesAttr().Set(velocities)
pointInstancer.GetAngularVelocitiesAttr().Set(angular_velocities)
def create(self, stage):
self._setup_callbacks()
self.stage = stage
self.it = 0
self.counter = 10
# set up axis to z
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
UsdGeom.SetStageMetersPerUnit(stage, 0.01)
# light
# sphereLight = UsdLux.SphereLight.Define(stage, Sdf.Path("/SphereLight"))
# sphereLight.CreateRadiusAttr(150)
# sphereLight.CreateIntensityAttr(30000)
# sphereLight.AddTranslateOp().Set(Gf.Vec3f(650.0, 0.0, 1150.0))
# Physics scene
scenePath = Sdf.Path("/physicsScene")
scene = UsdPhysics.Scene.Define(stage, scenePath)
scene.CreateGravityDirectionAttr().Set(Gf.Vec3f(0.0, -1.0, 0.0))
scene.CreateGravityMagnitudeAttr().Set(9.81)
# Particle System
particleSystemPath = Sdf.Path("/particleSystem0")
self.particleSystemPath = particleSystemPath
particleSpacing = 0.2
restOffset = particleSpacing * 0.9
solidRestOffset = restOffset
fluidRestOffset = restOffset * 0.6
particleContactOffset = max(solidRestOffset + 0.001, fluidRestOffset / 0.6)
contactOffset = restOffset + 0.001
addPhysxParticleSystem(
stage,
particleSystemPath,
contactOffset,
restOffset,
particleContactOffset,
solidRestOffset,
fluidRestOffset,
4,
1,
Gf.Vec3f(0, 0, 0),
scenePath
)
particleSystem = stage.GetPrimAtPath(particleSystemPath)
# particle system settings
particleSystem.GetAttribute("cohesion").Set(0.002)
particleSystem.GetAttribute("smoothing").Set(0.8)
particleSystem.GetAttribute("anisotropyScale").Set(1.0)
particleSystem.GetAttribute("anisotropyMin").Set(0.2)
particleSystem.GetAttribute("anisotropyMax").Set(2.0)
particleSystem.GetAttribute("viscosity").Set(0.0091)
particleSystem.GetAttribute("surfaceTension").Set(0.0074)
particleSystem.GetAttribute("particleFriction").Set(0.1)
particleSystem.CreateAttribute("maxParticleNeighborhood", Sdf.ValueTypeNames.Int, True).Set(64)
particleSystem.GetAttribute("maxParticles").Set(20000)
# apply isoSurface params
particleSystem.CreateAttribute("enableIsosurface", Sdf.ValueTypeNames.Bool, True).Set(True)
particleSystem.CreateAttribute("maxIsosurfaceVertices", Sdf.ValueTypeNames.Int, True).Set(1024 * 1024)
particleSystem.CreateAttribute("maxIsosurfaceTriangles", Sdf.ValueTypeNames.Int, True).Set(2 * 1024 * 1024)
particleSystem.CreateAttribute("maxNumIsosurfaceSubgrids", Sdf.ValueTypeNames.Int, True).Set(1024 * 4)
particleSystem.CreateAttribute("isosurfaceGridSpacing", Sdf.ValueTypeNames.Float, True).Set(0.2)
filterSmooth = 1
filtering = 0
passIndex = 0
filtering = setGridFilteringPass(filtering, passIndex, filterSmooth)
passIndex = passIndex + 1
filtering = setGridFilteringPass(filtering, passIndex, filterSmooth)
passIndex = passIndex + 1
particleSystem.CreateAttribute("isosurfaceKernelRadius", Sdf.ValueTypeNames.Float, True).Set(0.5)
particleSystem.CreateAttribute("isosurfaceLevel", Sdf.ValueTypeNames.Float, True).Set(-0.3)
particleSystem.CreateAttribute("isosurfaceGridFilteringFlags", Sdf.ValueTypeNames.Int, True).Set(filtering)
particleSystem.CreateAttribute(
"isosurfaceGridSmoothingRadiusRelativeToCellSize", Sdf.ValueTypeNames.Float, True
).Set(0.3)
particleSystem.CreateAttribute("isosurfaceEnableAnisotropy", Sdf.ValueTypeNames.Bool, True).Set(False)
particleSystem.CreateAttribute("isosurfaceAnisotropyMin", Sdf.ValueTypeNames.Float, True).Set(0.1)
particleSystem.CreateAttribute("isosurfaceAnisotropyMax", Sdf.ValueTypeNames.Float, True).Set(2.0)
particleSystem.CreateAttribute("isosurfaceAnisotropyRadius", Sdf.ValueTypeNames.Float, True).Set(0.5)
particleSystem.CreateAttribute("numIsosurfaceMeshSmoothingPasses", Sdf.ValueTypeNames.Int, True).Set(5)
particleSystem.CreateAttribute("numIsosurfaceMeshNormalSmoothingPasses", Sdf.ValueTypeNames.Int, True).Set(5)
particleSystem.CreateAttribute("isosurfaceDoNotCastShadows", Sdf.ValueTypeNames.Bool, True).Set(True)
stage.SetInterpolationType(Usd.InterpolationTypeHeld)
def _setup_callbacks(self):
# callbacks
self._timeline = omni.timeline.get_timeline_interface()
stream = self._timeline.get_timeline_event_stream()
self._timeline_subscription = stream.create_subscription_to_pop(self._on_timeline_event)
# subscribe to Physics updates:
self._physics_update_subscription = omni.physx.get_physx_interface().subscribe_physics_step_events(
self.on_physics_step
)
def _on_timeline_event(self, e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
self.it = 0
self.on_shutdown()
def step(self):
self.on_physics_step(None)
def on_physics_step(self, dt):
# import transforms3d
import math
xformCache = UsdGeom.XformCache()
# stop after 80 balls
# if (self.it > 80):
# return
pose = xformCache.GetLocalToWorldTransform(self.stage.GetPrimAtPath("/World/faucet/inflow"))
pos_faucet = Gf.Vec3f(pose.ExtractTranslation())
rot_faucet = Gf.Quatf(pose.ExtractRotationQuat())
pose = xformCache.GetLocalToWorldTransform(self.stage.GetPrimAtPath("/World/faucet/link_0"))
pos_link = Gf.Vec3f(pose.ExtractTranslation())
rot_link = Gf.Quatf(pose.ExtractRotationQuat())
diff = rot_link * self.rot_link_init.GetInverse()
real = diff.GetReal()
img = [diff.GetImaginary()[0],diff.GetImaginary()[1], diff.GetImaginary()[2] ]
#angle = transforms3d.euler.quat2euler([real, img[0], img[1], img[2]], axes='sxyz')
#sum_angle = abs(math.degrees(angle[0])) + abs(math.degrees(angle[1])) + abs(math.degrees(angle[2]))
rate = 1 #(sum_angle/30.0)
# print("pre rate:", rate)
if rate > 1:
rate = 1
# print("rate: ", rate)
# print("sum_angle", sum_angle)
if self.it == 0:
iso2Prim = self.stage.GetPrimAtPath("/particleSystem0/Isosurface")
rel = iso2Prim.CreateRelationship("material:binding", False)
rel.SetTargets([Sdf.Path("/World/Looks/OmniSurface_ClearWater")])
# rel.SetTargets([Sdf.Path("/World/Looks/OmniSurface_OrangeJuice")])
if self.it > 200:
return
# emit a ball every 10 physics steps
if (self.counter < 20 - rate):
self.counter = self.counter + 1
return
self.counter = 0
self.it = self.it + 1
# print(faucet_prim.GetAttribute('xformOp:translate'))
# openness = 0.6 + 0.5 * rate
# print("openess", openness)
if rate < 0.1:
return
self.create_ball(self.stage, pos_faucet, rate)
def on_shutdown(self):
self._physics_update_subscription = None
self._timeline_subscription = None
# restore settings
# isregistry = carb.settings.acquire_settings_interface()
# isregistry.set_bool(ASYNC_SIMULATION, self._async_simulation)
def on_startup(self):
isregistry = carb.settings.acquire_settings_interface()
self._async_simulation = carb.settings.get_settings().get_as_bool(ASYNC_SIMULATION)
isregistry.set_bool(ASYNC_SIMULATION, True)
isregistry.set_int("persistent/simulation/minFrameRate", 60)
from omni.physx import acquire_physx_interface
physx = acquire_physx_interface()
physx.overwrite_gpu_setting(1)
physx.reset_simulation()
fluid_fill = FluidFill()
stage = omni.usd.get_context().get_stage()
fluid_fill.create(stage)
_timeline = omni.timeline.get_timeline_interface()
stream = _timeline.get_timeline_event_stream()
def _on_timeline_event(e):
if e.type == int(omni.timeline.TimelineEventType.STOP):
fluid_fill.on_shutdown()
_timeline_subscription = stream.create_subscription_to_pop(_on_timeline_event)
# for i in range(10):
# fluid_fill.step()
| 13,709 | Python | 39.56213 | 117 | 0.6584 |