Skip to content

Commit

Permalink
Merge pull request #19 from tud-airlab/ft-obstacle-tensor-update
Browse files Browse the repository at this point in the history
ft[Extension]: Adds additional way of updating obstacle root_state
  • Loading branch information
cpezzato authored Feb 23, 2023
2 parents f2866c8 + bbe0240 commit f9815c8
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 9 deletions.
21 changes: 18 additions & 3 deletions mppiisaac/planner/isaacgym_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import torch
import numpy as np

import pathlib
file_path = pathlib.Path(__file__).parent.resolve()

@dataclass
class IsaacGymConfig(object):
Expand Down Expand Up @@ -80,7 +82,7 @@ def start_sim(self):
print(asset_file)
self._robot_asset = self.load_robot_asset_from_urdf(
asset_file=asset_file,
asset_root="../assets",
asset_root=f"{file_path}/../../assets",
fix_base_link=self._fix_base,
flip_visual_attachments=self._flip_visual,
)
Expand Down Expand Up @@ -203,6 +205,7 @@ def add_box(
pos: gymapi.Vec3,
color: gymapi.Vec3,
fixed: bool = True,
mass: float = 1.0
) -> int:
asset_options_objects = gymapi.AssetOptions()
asset_options_objects.fix_base_link = fixed
Expand All @@ -226,6 +229,9 @@ def add_box(
self.gym.set_rigid_body_color(
env, handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, color
)
props = self.gym.get_actor_rigid_body_properties(env, handle)
props[0].mass = mass
self.gym.set_actor_rigid_body_properties(env, handle, props)
return handle

def add_sphere(
Expand Down Expand Up @@ -272,7 +278,7 @@ def add_ground_plane(self):
def load_robot_asset_from_urdf(
self,
asset_file,
asset_root="../assets",
asset_root=f"{file_path}/../../assets",
fix_base_link=False,
flip_visual_attachments=False,
):
Expand Down Expand Up @@ -355,7 +361,7 @@ def update_root_state_tensor_by_obstacles(self, obstacles):
)

# Note: reset simulator if size changed, because this cannot be done at runtime.
if not all(o_size == self.env_cfg[obst_idx]["size"]):
if not all([a == b for a, b in zip(o_size, self.env_cfg[obst_idx]["size"]) ]):
env_cfg_changed = True
self.env_cfg[obst_idx]["size"] = o_size

Expand All @@ -370,3 +376,12 @@ def update_root_state_tensor_by_obstacles(self, obstacles):
self.gym.set_actor_root_state_tensor(
self.sim, gymtorch.unwrap_tensor(self.root_state)
)

def update_root_state_tensor_by_obstacles_tensor(self, obst_tensor):
for i, o_tensor in enumerate(obst_tensor):
self.root_state[:, i+3] = o_tensor.repeat(self.num_envs, 1)

self.gym.set_actor_root_state_tensor(
self.sim, gymtorch.unwrap_tensor(self.root_state)
)

2 changes: 1 addition & 1 deletion mppiisaac/planner/mppi.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class MPPIConfig(object):
u_min: float = -1.0
u_max: float = 1.0
u_init: float = 0.0
U_init: Optional[List[float]] = None
U_init: Optional[List[List[float]]] = None
u_scale: float = 1
u_per_command: int = 1
rollout_var_discount: float = 0.95
Expand Down
14 changes: 9 additions & 5 deletions mppiisaac/planner/mppi_isaac.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,9 @@ def running_cost(self, _):
# Note: again normally mppi passes the state as a parameter in the running cost call, but using isaacgym the state is already saved and accesible in the simulator itself, so we ignore it and pass a handle to the simulator.
return self.objective.compute_cost(self.sim)

def compute_action(self, q, qdot, obst=None):
def compute_action(self, q, qdot, obst=None, obst_tensor=None):
self.sim.reset_root_state()

if obst:
# NOTE: for now this updates based on id in the list of obstacles
self.sim.update_root_state_tensor_by_obstacles(obst)

# Deal with non fixed base link robots, that we need to reset the root_state position / velocity of.
# Currently only differential drive bases are non fixed. We also have to deal with the kinematic transforms.
if self.cfg.differential_drive:
Expand Down Expand Up @@ -107,6 +103,14 @@ def compute_action(self, q, qdot, obst=None):
state = state.repeat(self.sim.num_envs, 1)
self.sim.set_dof_state_tensor(state)

# NOTE: There are two different ways of updating obstacle root_states
# Both update based on id in the list of obstacles
if obst:
self.sim.update_root_state_tensor_by_obstacles(obst)

if obst_tensor:
self.sim.update_root_state_tensor_by_obstacles_tensor(obst_tensor)

self.sim.save_root_state()
actions = self.mppi.command(self.state_place_holder).cpu()
return actions

0 comments on commit f9815c8

Please sign in to comment.