-
Notifications
You must be signed in to change notification settings - Fork 21
Pragnay/randomagents #292
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Pragnay/randomagents #292
Changes from all commits
42b8955
c49b15e
8c9376c
87bd891
5466e9e
7b376fd
bc5b942
5cbf4c4
7237135
d62e586
932d727
ef50fd4
2ade546
257d721
ff6a94f
7ceeeb1
0aa0a87
0c8d21b
b6c64c9
dc972e5
974b93e
2f5df66
e94420b
9e37dcf
ca4a3ae
5ae06e7
9e5de03
3433c77
ff2b769
1044152
2971422
21f6940
1eb3a6b
7b5111a
1fe73a9
b8aeed9
3a25ac7
8fd053f
392905e
e5436bf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -50,16 +50,27 @@ offroad_behavior = 0 | |
| episode_length = 300 | ||
| resample_frequency = 300 | ||
| termination_mode = 1 # 0 - terminate at episode_length, 1 - terminate after all agents have been reset | ||
| map_dir = "resources/drive/binaries/carla_3D" | ||
| num_maps = 10000 | ||
| map_dir = "resources/drive/binaries/carla_2D" | ||
| num_maps = 3 | ||
| ; If True, allows training with fewer maps than requested (warns instead of erroring) | ||
| allow_fewer_maps = True | ||
| ; Determines which step of the trajectory to initialize the agents at upon reset | ||
| init_steps = 0 | ||
| ; Options: "control_vehicles", "control_agents", "control_wosac", "control_sdc_only" | ||
| control_mode = "control_vehicles" | ||
| ; Options: "created_all_valid", "create_only_controlled" | ||
| init_mode = "create_all_valid" | ||
| ; Options: "create_all_valid", "create_only_controlled", "init_variable_agent_number"(creates random number of controlled agents per env) | ||
| init_mode = "init_variable_agent_number" | ||
| ; Below options only valid for "init_variable_agent_number" init_mode | ||
| min_agents_per_env = 1 | ||
| max_agents_per_env = 128 | ||
| ; Dimension Ranges for agents | ||
| spawn_width_min = 1.5 | ||
| spawn_width_max = 2.5 | ||
| spawn_length_min = 2.0 | ||
| spawn_length_max = 5.5 | ||
| spawn_height = 1.5 | ||
|
|
||
| ; Reward settings | ||
| reward_randomization = 1 | ||
| ; Options: 0 - Fixed reward values, 1 - Random reward values | ||
| reward_conditioning = 1 | ||
|
|
@@ -144,11 +155,11 @@ vf_clip_coef = 0.1999999999999999 | |
| vf_coef = 2 | ||
| vtrace_c_clip = 1 | ||
| vtrace_rho_clip = 1 | ||
| checkpoint_interval = 250 | ||
| checkpoint_interval = 1000 | ||
| ; Rendering options | ||
| render = True | ||
| render_async = False # Render interval of below 50 might cause process starvation and slowness in training | ||
| render_interval = 250 | ||
| render_interval = 1000 | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why change this?
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Rendering was taking a really long amount of time even with async on so I just increased the interval
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This only happens when max_agents is very high; previously, we were working with just 32 so it was not a problem |
||
| ; If True, show exactly what the agent sees in agent observation | ||
| obs_only = True | ||
| ; Show grid lines | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -42,6 +42,9 @@ void demo() { | |
| exit(1); | ||
| } | ||
|
|
||
| // Set different seed each time | ||
| srand(time(NULL)); | ||
|
|
||
| // Note: Use below hardcoded settings for 2.0 demo purposes. Since the policy was | ||
| // trained with these exact settings, changing them may lead to | ||
| // weird behavior. | ||
|
|
@@ -68,6 +71,15 @@ void demo() { | |
| // .map_name = "resources/drive/map_town_02_carla.bin", | ||
| // }; | ||
|
|
||
| AgentSpawnSettings spawn_settings = { | ||
| .max_agents_in_sim = conf.max_agents_per_env, | ||
| .min_w = conf.spawn_width_min, | ||
| .max_w = conf.spawn_width_max, | ||
| .min_l = conf.spawn_length_min, | ||
| .max_l = conf.spawn_length_max, | ||
| .h = conf.spawn_height, | ||
mpragnay marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| }; | ||
|
|
||
| Drive env = { | ||
| .human_agent_idx = 0, | ||
| .action_type = 0, // Demo doesn't support continuous action space | ||
|
|
@@ -90,13 +102,19 @@ void demo() { | |
| .init_steps = conf.init_steps, | ||
| .init_mode = conf.init_mode, | ||
| .control_mode = conf.control_mode, | ||
| .map_name = "resources/drive/binaries/carla/carla_3D/map_001.bin", | ||
| .reward_conditioning = 1, | ||
| .spawn_settings = spawn_settings, | ||
| .map_name = "resources/drive/binaries/carla_2D/map_000.bin", | ||
| .reward_conditioning = conf.reward_conditioning, | ||
| }; | ||
|
|
||
| if (conf.init_mode == INIT_VARIABLE_AGENT_NUMBER) { | ||
| env.num_agents = conf.min_agents_per_env + rand() % (conf.max_agents_per_env - conf.min_agents_per_env + 1); | ||
| } | ||
|
|
||
| allocate(&env); | ||
| c_reset(&env); | ||
| c_render(&env); | ||
| Weights *weights = load_weights("resources/drive/puffer_drive_resampling_speed_lane.bin"); | ||
| Weights *weights = load_weights("best_policy_with_reward_conditioning.bin"); | ||
|
||
| DriveNet *net = init_drivenet(weights, env.active_agent_count, env.dynamics_model, env.reward_conditioning); | ||
|
|
||
| int accel_delta = 1; | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.