Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix seed shape issue #249

Open
wants to merge 25 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
17e9231
Fix seed shape issue
scharalambous-sony May 2, 2024
3aa52d0
Minor fix
scharalambous-sony May 3, 2024
85cf2d3
Augmented state (position and velocity) property
scharalambous-sony Apr 22, 2024
0c57010
Specify js TO seeds
scharalambous-sony May 3, 2024
5f591e8
Specify finetune js TO iters
scharalambous-sony May 3, 2024
30b6216
Resolve js finetune fails (experimental)
scharalambous-sony May 3, 2024
e5ebbf9
Resolve repeated application of limits scaling
scharalambous-sony May 6, 2024
f8965ac
Resolve bug for any other than linear interpolation type
scharalambous-sony May 7, 2024
1fe73d7
Cubic interpolation kernel (WIP), add utility to compare solutions
scharalambous-sony May 10, 2024
d4dce02
Debug TO fails
scharalambous-sony May 10, 2024
0f9cad6
Move joint limit scaling to JointLimits class
scharalambous-sony May 10, 2024
196b2c8
Add contiguous property to JointState
scharalambous-sony May 17, 2024
3f60599
Add plot util
scharalambous-sony May 17, 2024
dc6bf28
Differentiable cubic interpolation kernel
scharalambous-sony May 17, 2024
6bdca2f
Remove scale from differentiable interpolation kernel. Add linear dif…
scharalambous-sony Jun 6, 2024
41a89d5
Update plot util
scharalambous-sony Jun 10, 2024
a250683
Add self collision constraint member to RobotWorld
scharalambous-sony Jun 10, 2024
568210c
Resolve bug when not optimizing dt
scharalambous-sony Jun 10, 2024
0c781ce
Fix bug in time optimization (experimental)
scharalambous-sony Jun 10, 2024
e38c20d
Revert "Fix bug in time optimization (experimental)"
scharalambous-sony Jun 11, 2024
31e4ab2
Attempt to fix bug when optimize dt is False (experimental)
scharalambous-sony Jun 13, 2024
37194d3
Minor fixes and annotation to BoundCost kernels and functions
scharalambous-sony Jun 18, 2024
61c5696
Add get_link_spheres_tensor function to enable jit tracing
scharalambous-sony Jul 15, 2024
d9d32a4
Resolve 'not hashable' error
scharalambous-sony Jul 17, 2024
308b6fc
Merge pull request #1 from scharalambous-sony/dev_v0.7.2
scharalambous-sony Jul 22, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 2 additions & 6 deletions src/curobo/cuda_robot_model/cuda_robot_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -969,11 +969,7 @@ def _update_joint_limits(self):
self.cspace.max_acceleration.unsqueeze(0),
]
)
# clip joint position:
# NOTE: change this to be per joint
joint_limits["position"][0] += self.cspace.position_limit_clip
joint_limits["position"][1] -= self.cspace.position_limit_clip
joint_limits["velocity"][0] *= self.cspace.velocity_scale
joint_limits["velocity"][1] *= self.cspace.velocity_scale

self._joint_limits = JointLimits(joint_names=self.joint_names, **joint_limits)
self._joint_limits.clip_position_Limits(self.cspace.position_limit_clip)
self._joint_limits.apply_scale(velocity_scale=self.cspace.velocity_scale)
36 changes: 24 additions & 12 deletions src/curobo/cuda_robot_model/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from __future__ import annotations

# Standard Library
from dataclasses import dataclass
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Union

Expand Down Expand Up @@ -53,6 +53,11 @@ class JointLimits:
effort: Optional[torch.Tensor] = None
tensor_args: TensorDeviceType = TensorDeviceType()

position_clipped: bool = field(default=False, init=False)
velocity_scaled: bool = field(default=False, init=False)
acceleration_scaled: bool = field(default=False, init=False)
jerk_scaled: bool = field(default=False, init=False)

@staticmethod
def from_data_dict(data: Dict, tensor_args: TensorDeviceType = TensorDeviceType()):
p = tensor_args.to_device(data["position"])
Expand Down Expand Up @@ -84,6 +89,23 @@ def copy_(self, new_jl: JointLimits):
self.effort = copy_if_not_none(new_jl.effort, self.effort)
return self

def clip_position_Limits(self, position_clip: float):
if not self.position_clipped:
# Clip the position limits
self.position[0] += position_clip
self.position[1] -= position_clip

def apply_scale(self, velocity_scale: Optional[float] = None, acceleration_scale: Optional[float] = None, jerk_scale: Optional[float] = None):
if velocity_scale is not None and not self.velocity_scaled:
self.velocity = self.velocity * velocity_scale
self.velocity_scaled = True
if acceleration_scale is not None and not self.acceleration_scaled:
self.acceleration = self.acceleration * acceleration_scale
self.acceleration_scaled = True
if jerk_scale is not None and not self.jerk_scaled:
self.jerk = self.jerk * jerk_scale
self.jerk_scaled = True


@dataclass
class CSpaceConfig:
Expand Down Expand Up @@ -196,16 +218,6 @@ def clone(self) -> CSpaceConfig:
jerk_scale=self.jerk_scale.clone(),
)

def scale_joint_limits(self, joint_limits: JointLimits):
if self.velocity_scale is not None:
joint_limits.velocity = joint_limits.velocity * self.velocity_scale
if self.acceleration_scale is not None:
joint_limits.acceleration = joint_limits.acceleration * self.acceleration_scale
if self.jerk_scale is not None:
joint_limits.jerk = joint_limits.jerk * self.jerk_scale

return joint_limits

@staticmethod
def load_from_joint_limits(
joint_position_upper: torch.Tensor,
Expand Down Expand Up @@ -315,7 +327,7 @@ def __post_init__(self):
if self.cspace is None and self.joint_limits is not None:
self.load_cspace_cfg_from_kinematics()
if self.joint_limits is not None and self.cspace is not None:
self.joint_limits = self.cspace.scale_joint_limits(self.joint_limits)
self.joint_limits.apply_scale(self.cspace.velocity_scale, self.cspace.acceleration_scale, self.cspace.jerk_scale)
if self.link_spheres is not None and self.reference_link_spheres is None:
self.reference_link_spheres = self.link_spheres.clone()

Expand Down
64 changes: 44 additions & 20 deletions src/curobo/rollout/cost/bound_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class BoundCostType(Enum):
BOUNDS_SMOOTH = 2


@dataclass
@dataclass(eq=False)
class BoundCostConfig(CostConfig):
joint_limits: Optional[JointLimits] = None
smooth_weight: Optional[List[float]] = None
Expand Down Expand Up @@ -298,12 +298,15 @@ def forward_all_bound_cost(
p,
v,
a,
j,
p_lower_bounds,
p_upper_bounds,
v_lower_bounds,
v_upper_bounds,
a_lower_bounds,
a_upper_bounds,
j_lower_bounds,
j_upper_bounds,
weight,
):
# c = torch.sum(
Expand All @@ -325,7 +328,9 @@ def forward_all_bound_cost(
+ torch.nn.functional.relu(v - v_upper_bounds)
+ torch.nn.functional.relu(a_lower_bounds - a)
+ torch.nn.functional.relu(a - a_upper_bounds)
),
+ torch.nn.functional.relu(j_lower_bounds - j)
+ torch.nn.functional.relu(j - j_upper_bounds)
),
dim=-1,
)

Expand Down Expand Up @@ -661,7 +666,7 @@ def forward_bound_pos_warp(

w = wp.float32(0.0)
c_p = wp.float32(0.0)
g_p = wp.float32(0.0)
g_p = wp.float32(0.0) # gradient for position
c_total = wp.float32(0.0)

# we launch batch * horizon * dof kernels
Expand All @@ -670,12 +675,13 @@ def forward_bound_pos_warp(
d_id = tid - (b_id * horizon * dof + h_id * dof)
if b_id >= batch_size or h_id >= horizon or d_id >= dof:
return

# read weights:
# read activation distance (a ratio):
eta_p = activation_distance[0]
# read weights:
w = weight[0]

n_w = wp.float32(0.0)
# read null space weight:
n_w = null_weight[0]
target_p = wp.float32(0.0)
target_id = wp.int32(0.0)
Expand All @@ -684,11 +690,14 @@ def forward_bound_pos_warp(
target_id = retract_idx[b_id]
target_id = target_id * dof + d_id
target_p = retract_config[target_id]
# read lower and upper bounds:
p_l = p_b[d_id]
p_u = p_b[dof + d_id]

# compute activation distance as ratio of limits range:
p_range = p_u - p_l
eta_p = eta_p * (p_range)
# shrink lower and upper limits by activation distance:
p_l += eta_p
p_u -= eta_p
if p_range < 1.0:
Expand All @@ -697,17 +706,16 @@ def forward_bound_pos_warp(
# compute cost:
b_addrs = b_id * horizon * dof + h_id * dof + d_id

# read buffers:

# read current position:
c_p = pos[b_addrs]

# compute null space cost and its gradient:
if n_w > 0.0:
error = c_p - target_p
c_total = n_w * error * error
g_p = 2.0 * n_w * error

# bound cost:

delta = 0.0
if c_p < p_l:
delta = c_p - p_l
Expand Down Expand Up @@ -756,18 +764,25 @@ def forward_bound_warp(
d_id = wp.int32(0)
b_addrs = int(0)

w = wp.float32(0.0)
c_p = wp.float32(0.0)
c_v = wp.float32(0.0)
c_a = wp.float32(0.0)
c_p = wp.float32(0.0)
c_j = wp.float32(0.0)

g_p = wp.float32(0.0)
g_v = wp.float32(0.0)
g_a = wp.float32(0.0)
b_wv = float(0.0)
b_wa = float(0.0)
b_wj = float(0.0)
g_j = wp.float32(0.0)

b_wp = wp.float32(0.0)
b_wv = wp.float32(0.0)
b_wa = wp.float32(0.0)
b_wj = wp.float32(0.0)

c_total = wp.float32(0.0)

delta = float(0.0)

# we launch batch * horizon * dof kernels
b_id = tid / (horizon * dof)
h_id = (tid - (b_id * horizon * dof)) / dof
Expand All @@ -785,7 +800,7 @@ def forward_bound_warp(
target_p = retract_config[target_id]

# read weights:
w = weight[0]
b_wp = weight[0]
b_wv = weight[1]
b_wa = weight[2]
b_wj = weight[3]
Expand All @@ -794,18 +809,17 @@ def forward_bound_warp(
b_addrs = b_id * horizon * dof + h_id * dof + d_id

# read buffers:
c_p = pos[b_addrs]
c_v = vel[b_addrs]
c_a = acc[b_addrs]
c_p = pos[b_addrs]
c_j = jerk[b_addrs]

# if w_j > 0.0:
eta_p = activation_distance[0]
eta_v = activation_distance[1]
eta_a = activation_distance[2]
eta_j = activation_distance[3]

c_j = jerk[b_addrs]

p_l = p_b[d_id]
p_u = p_b[dof + d_id]
p_range = p_u - p_l
Expand Down Expand Up @@ -834,7 +848,17 @@ def forward_bound_warp(
j_l += eta_j
j_u -= eta_j

delta = float(0.0)
# TODO: Is this necessary?
# TODO: Why is sometimes (1/range) used while others (1 / range**2)
if p_range < 1.0:
b_wp = (1.0 / p_range) * b_wp
if v_range < 1.0:
b_wv = b_wv * (1.0 / v_range)
if a_range < 1.0:
b_wa = b_wa * (1.0 / a_range)
if j_range < 1.0:
b_wj = b_wj * (1.0 / j_range)

if n_w > 0.0:
error = c_p - target_p
c_total = n_w * error * error
Expand All @@ -848,8 +872,8 @@ def forward_bound_warp(
elif c_p > p_u:
delta = c_p - p_u

c_total += w * delta * delta
g_p += 2.0 * w * delta
c_total += b_wp * delta * delta
g_p += 2.0 * b_wp * delta

# bound cost:
delta = 0.0
Expand Down
2 changes: 1 addition & 1 deletion src/curobo/rollout/cost/cost_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from curobo.types.base import TensorDeviceType


@dataclass
@dataclass(eq=False)
class CostConfig:
weight: Union[torch.Tensor, float, List[float]]
tensor_args: TensorDeviceType = None
Expand Down
2 changes: 1 addition & 1 deletion src/curobo/rollout/cost/primitive_collision_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from curobo.util.torch_utils import get_torch_jit_decorator


@dataclass
@dataclass(eq=False)
class PrimitiveCollisionCostConfig(CostConfig):
"""Create Collision Cost Configuration."""

Expand Down
2 changes: 1 addition & 1 deletion src/curobo/rollout/cost/self_collision_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from .cost_base import CostBase, CostConfig


@dataclass
@dataclass(eq=False)
class SelfCollisionCostConfig(CostConfig):
self_collision_kin_config: Optional[SelfCollisionKinematicsConfig] = None

Expand Down
19 changes: 19 additions & 0 deletions src/curobo/types/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,13 @@ def get_state_tensor(self):
state_tensor = torch.cat((self.position, velocity, acceleration, jerk), dim=-1)
return state_tensor

@property
def augmented_state(self):
velocity = self.velocity
if velocity is None:
velocity = self.position * 0.0
return torch.cat((self.position, velocity), dim=-1)

@staticmethod
def from_state_tensor(state_tensor, joint_names=None, dof=7):
return JointState(
Expand Down Expand Up @@ -371,6 +378,18 @@ def zeros(
joint_names=joint_names,
)

def contiguous(self):
p = v = a = j = None
if self.position is not None:
p = self.position.contiguous()
if self.velocity is not None:
v = self.velocity.contiguous()
if self.acceleration is not None:
a = self.acceleration.contiguous()
if self.jerk is not None:
j = self.jerk.contiguous()
return JointState(p, v, a, self.joint_names, jerk=j)

def detach(self):
self.position = self.position.detach()
self.velocity = self.velocity.detach()
Expand Down
Loading