diff --git a/mppiisaac/planner/isaacgym_wrapper.py b/mppiisaac/planner/isaacgym_wrapper.py index e6d13ff..8351624 100644 --- a/mppiisaac/planner/isaacgym_wrapper.py +++ b/mppiisaac/planner/isaacgym_wrapper.py @@ -4,6 +4,8 @@ import torch import numpy as np +import pathlib +file_path = pathlib.Path(__file__).parent.resolve() @dataclass class IsaacGymConfig(object): @@ -80,7 +82,7 @@ def start_sim(self): print(asset_file) self._robot_asset = self.load_robot_asset_from_urdf( asset_file=asset_file, - asset_root="../assets", + asset_root=f"{file_path}/../../assets", fix_base_link=self._fix_base, flip_visual_attachments=self._flip_visual, ) @@ -203,6 +205,7 @@ def add_box( pos: gymapi.Vec3, color: gymapi.Vec3, fixed: bool = True, + mass: float = 1.0 ) -> int: asset_options_objects = gymapi.AssetOptions() asset_options_objects.fix_base_link = fixed @@ -226,6 +229,9 @@ def add_box( self.gym.set_rigid_body_color( env, handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, color ) + props = self.gym.get_actor_rigid_body_properties(env, handle) + props[0].mass = mass + self.gym.set_actor_rigid_body_properties(env, handle, props) return handle def add_sphere( @@ -272,7 +278,7 @@ def add_ground_plane(self): def load_robot_asset_from_urdf( self, asset_file, - asset_root="../assets", + asset_root=f"{file_path}/../../assets", fix_base_link=False, flip_visual_attachments=False, ): @@ -355,7 +361,7 @@ def update_root_state_tensor_by_obstacles(self, obstacles): ) # Note: reset simulator if size changed, because this cannot be done at runtime. - if not all(o_size == self.env_cfg[obst_idx]["size"]): + if not all([a == b for a, b in zip(o_size, self.env_cfg[obst_idx]["size"]) ]): env_cfg_changed = True self.env_cfg[obst_idx]["size"] = o_size @@ -370,3 +376,12 @@ def update_root_state_tensor_by_obstacles(self, obstacles): self.gym.set_actor_root_state_tensor( self.sim, gymtorch.unwrap_tensor(self.root_state) ) + + def update_root_state_tensor_by_obstacles_tensor(self, obst_tensor): + for i, o_tensor in enumerate(obst_tensor): + self.root_state[:, i+3] = o_tensor.repeat(self.num_envs, 1) + + self.gym.set_actor_root_state_tensor( + self.sim, gymtorch.unwrap_tensor(self.root_state) + ) + diff --git a/mppiisaac/planner/mppi.py b/mppiisaac/planner/mppi.py index 9684a40..8f9c1d8 100644 --- a/mppiisaac/planner/mppi.py +++ b/mppiisaac/planner/mppi.py @@ -68,7 +68,7 @@ class MPPIConfig(object): u_min: float = -1.0 u_max: float = 1.0 u_init: float = 0.0 - U_init: Optional[List[float]] = None + U_init: Optional[List[List[float]]] = None u_scale: float = 1 u_per_command: int = 1 rollout_var_discount: float = 0.95 diff --git a/mppiisaac/planner/mppi_isaac.py b/mppiisaac/planner/mppi_isaac.py index a8c174f..982f852 100644 --- a/mppiisaac/planner/mppi_isaac.py +++ b/mppiisaac/planner/mppi_isaac.py @@ -64,13 +64,9 @@ def running_cost(self, _): # Note: again normally mppi passes the state as a parameter in the running cost call, but using isaacgym the state is already saved and accesible in the simulator itself, so we ignore it and pass a handle to the simulator. return self.objective.compute_cost(self.sim) - def compute_action(self, q, qdot, obst=None): + def compute_action(self, q, qdot, obst=None, obst_tensor=None): self.sim.reset_root_state() - if obst: - # NOTE: for now this updates based on id in the list of obstacles - self.sim.update_root_state_tensor_by_obstacles(obst) - # Deal with non fixed base link robots, that we need to reset the root_state position / velocity of. # Currently only differential drive bases are non fixed. We also have to deal with the kinematic transforms. if self.cfg.differential_drive: @@ -107,6 +103,14 @@ def compute_action(self, q, qdot, obst=None): state = state.repeat(self.sim.num_envs, 1) self.sim.set_dof_state_tensor(state) + # NOTE: There are two different ways of updating obstacle root_states + # Both update based on id in the list of obstacles + if obst: + self.sim.update_root_state_tensor_by_obstacles(obst) + + if obst_tensor: + self.sim.update_root_state_tensor_by_obstacles_tensor(obst_tensor) + self.sim.save_root_state() actions = self.mppi.command(self.state_place_holder).cpu() return actions