diff --git a/vmas/scenarios/joint_passage.py b/vmas/scenarios/joint_passage.py index cea4268a..64d54a43 100644 --- a/vmas/scenarios/joint_passage.py +++ b/vmas/scenarios/joint_passage.py @@ -458,11 +458,10 @@ def reward(self, agent: Agent): self.world.get_distance(a, passage) <= self.min_collision_distance ] += self.collision_reward - for wall in self.walls: - self.collision_rew[ - self.world.get_distance(a, wall) - <= self.min_collision_distance - ] += self.collision_reward + for wall in self.walls: + self.collision_rew[ + self.world.get_distance(a, wall) <= self.min_collision_distance + ] += self.collision_reward # Joint collisions for p in self.passages: diff --git a/vmas/scenarios/joint_passage_size.py b/vmas/scenarios/joint_passage_size.py index befaf8b3..d22d8a55 100644 --- a/vmas/scenarios/joint_passage_size.py +++ b/vmas/scenarios/joint_passage_size.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024. +# Copyright (c) 2022-2025. # ProrokLab (https://www.proroklab.org/) # All rights reserved. import math @@ -453,11 +453,10 @@ def reward(self, agent: Agent): self.world.get_distance(a, passage) <= self.min_collision_distance ] += self.collision_reward - for wall in self.walls: - self.collision_rew[ - self.world.get_distance(a, wall) - <= self.min_collision_distance - ] += self.collision_reward + for wall in self.walls: + self.collision_rew[ + self.world.get_distance(a, wall) <= self.min_collision_distance + ] += self.collision_reward # Energy reward if self.energy_reward_coeff != 0: