forked from maxspahn/gym_envs_urdf
-
Notifications
You must be signed in to change notification settings - Fork 0
/
point_robot.py
46 lines (41 loc) · 1.32 KB
/
point_robot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gym
from urdfenvs.robots.generic_urdf import GenericUrdfReacher
# from urdfenvs.urdf_common.differential_drive_robot import DifferentialDriveRobot
import numpy as np
from urdfenvs.sensors.lidar import Lidar
def run_point_robot(n_steps=1000, render=True, goal=False, obstacles=False):
robots = [
GenericUrdfReacher(urdf="loadPointRobot.urdf", mode="vel"),
]
env = gym.make(
"urdf-env-v0",
dt=0.01, robots=robots, render=render
)
action = np.array([0.1, 0.0, 0.5])
pos0 = np.array([1.0, 0.1, 0.0])
vel0 = np.array([1.0, 0.0, 0.0])
ob = env.reset(pos=pos0, vel=vel0)
print(f"Initial observation : {ob}")
env.add_walls()
if obstacles:
from examples.scene_objects.obstacles import (
sphereObst1,
sphereObst2,
urdfObst1,
dynamicSphereObst3,
)
env.add_obstacle(sphereObst1)
env.add_obstacle(sphereObst2)
env.add_obstacle(urdfObst1)
env.add_obstacle(dynamicSphereObst3)
if goal:
from examples.scene_objects.goal import splineGoal
env.add_goal(splineGoal)
history = []
for _ in range(n_steps):
ob, _, _, _ = env.step(action)
history.append(ob)
env.close()
return history
if __name__ == "__main__":
run_point_robot(render=True)