Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions server.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@


class GrpcAgent:
def __init__(self, agent_type, uniform_number, logger) -> None:
def __init__(self, agent_type, uniform_number, logger, debug) -> None:
self.agent_type: pb2.AgentType = agent_type
self.uniform_number: int = uniform_number
self.agent: IAgent = None
Expand All @@ -37,8 +37,8 @@ def __init__(self, agent_type, uniform_number, logger) -> None:
self.agent = SampleCoachAgent(self.logger)
elif self.agent_type == pb2.AgentType.TrainerT:
self.agent = SampleTrainerAgent(self.logger)
self.agent.set_debug_mode(debug)
self.debug_mode: bool = False


def GetAction(self, state: pb2.State):
self.logger.debug(f"================================= cycle={state.world_model.cycle}.{state.world_model.stoped_cycle} =================================")
Expand All @@ -57,9 +57,7 @@ def GetAction(self, state: pb2.State):

def GetPlayerActions(self, state: pb2.State):
self.agent.update_actions(state.world_model)
res = pb2.PlayerActions()
res.actions.extend(self.agent.get_actions())
return res
return self.agent.get_actions()

def GetBestPlannerAction(self, request: pb2.BestPlannerActionRequest) -> int:
self.logger.debug(f"GetBestPlannerAction cycle:{request.state.world_model.cycle} pairs:{len(request.pairs)} unum:{request.state.register_response.uniform_number}")
Expand All @@ -74,11 +72,11 @@ def GetBestPlannerAction(self, request: pb2.BestPlannerActionRequest) -> int:

def GetCoachActions(self, state: pb2.State):
self.agent.update_actions(state.world_model)
return pb2.CoachActions(actions=self.agent.get_actions())
return self.agent.get_actions()

def GetTrainerActions(self, state: pb2.State):
self.agent.update_actions(state.world_model)
return pb2.TrainerActions(actions=self.agent.get_actions())
return self.agent.get_actions()

def SetServerParams(self, server_params: pb2.ServerParam):
try:
Expand Down Expand Up @@ -108,10 +106,11 @@ def SetPlayerType(self, player_type: pb2.PlayerType):
return pb2.PlayerActions()

class GameHandler(pb2_grpc.GameServicer):
def __init__(self, shared_lock, shared_number_of_connections) -> None:
def __init__(self, shared_lock, shared_number_of_connections, debug) -> None:
self.agents: dict[int, GrpcAgent] = {}
self.shared_lock = shared_lock
self.shared_number_of_connections = shared_number_of_connections
self.debug = debug

def GetPlayerActions(self, state: pb2.State, context):
main_logger.debug(f"GetPlayerActions unum {state.register_response.uniform_number} at {state.world_model.cycle}")
Expand Down Expand Up @@ -169,7 +168,7 @@ def Register(self, register_request: pb2.RegisterRequest, context):
agent_type=agent_type)
logger = setup_logger(f"agent{register_response.uniform_number}_{register_response.client_id}", log_dir,
console_level=player_console_logging_level, file_level=player_file_logging_level)
self.agents[self.shared_number_of_connections.value] = GrpcAgent(agent_type, uniform_number, logger)
self.agents[self.shared_number_of_connections.value] = GrpcAgent(agent_type, uniform_number, logger, self.debug)
return register_response
except Exception as e:
main_logger.error(f"Error in Register: {e}")
Expand All @@ -190,9 +189,9 @@ def GetBestPlannerAction(self, pairs: pb2.BestPlannerActionRequest, context):
return res


def serve(port, shared_lock, shared_number_of_connections):
def serve(port, shared_lock, shared_number_of_connections, debug):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=22))
game_service = GameHandler(shared_lock, shared_number_of_connections)
game_service = GameHandler(shared_lock, shared_number_of_connections, debug)
pb2_grpc.add_GameServicer_to_server(game_service, server)
server.add_insecure_port(f'[::]:{port}')
server.start()
Expand All @@ -208,6 +207,7 @@ def main():
parser.add_argument('-l', '--log-dir', required=False, help='The directory of the log file',
default=f'logs/{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
parser.add_argument('--disable-log-file', required=False, help='Disable logging to a file', default=False, action='store_true')
parser.add_argument('-d', '--debug', required=False, help='Enable debug mode for agents', default=False, action='store_true')

args = parser.parse_args()

Expand All @@ -222,7 +222,7 @@ def main():
shared_lock = Lock() # Create a Lock for synchronization
shared_number_of_connections = manager.Value('i', 0)

serve(args.rpc_port, shared_lock, shared_number_of_connections)
serve(args.rpc_port, shared_lock, shared_number_of_connections, args.debug)

if __name__ == '__main__':
main()
Expand Down
2 changes: 1 addition & 1 deletion src/behaviors/bhv_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def execute(self, agent: "SamplePlayerAgent") -> bool:
max_dash_power=100.0,
distance_threshold=0.5)))
agent.add_action(PlayerAction(neck_turn_to_ball_or_scan=Neck_TurnToBallOrScan(count_threshold=0)))
agent.add_log_circle(LoggerLevel.BLOCK, future_ball_pos.x, future_ball_pos.y, 0.5, 'red', True)
agent.add_log_circle(LoggerLevel.BLOCK, future_ball_pos.x(), future_ball_pos.y(), 0.5, 'red', True)
return True
else:
agent.logger.debug(f'Bhv_Block: False: tm {our_player.uniform_number} can block in {block_cycles} in {future_ball_pos=}')
Expand Down
3 changes: 3 additions & 0 deletions src/behaviors/bhv_kick_planner.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from pyrusgeom.soccer_math import *
from pyrusgeom.geom_2d import *
from service_pb2 import *
from src.behaviors.bhv_shoot import BhvShoot

if TYPE_CHECKING:
from src.sample_player_agent import SamplePlayerAgent
Expand Down Expand Up @@ -39,6 +40,8 @@ def __init__(self):
def execute(self, agent: "SamplePlayerAgent"):
agent.logger.debug("--- WithBallDecisionMaker ---")

BhvShoot().execute(agent)

agent.add_action(
PlayerAction(helios_offensive_planner=self._get_helios_offensive_planner(agent))
)
Expand Down
21 changes: 21 additions & 0 deletions src/behaviors/bhv_shoot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from typing import TYPE_CHECKING
from src.interfaces.IAgent import IAgent
from src.utils.tools import Tools
from pyrusgeom.geom_2d import *
from pyrusgeom.soccer_math import *
from service_pb2 import *
from src.interfaces.IBehavior import IBehavior

if TYPE_CHECKING:
from src.sample_player_agent import SamplePlayerAgent

class BhvShoot(IBehavior):
def __init__(self):
pass

def execute(self, agent: "SamplePlayerAgent"):
agent.logger.debug("BhvShoot.execute")
# To enable this behavior, you need to set ignore_shootInPreprocess to True in the sample_player_agent.py
# Otherwise, the proxy will execute shoot action automatically.

agent.add_action(PlayerAction(helios_shoot=HeliosShoot()))
51 changes: 26 additions & 25 deletions src/behaviors/starter/setplay/bhv_starter_setplay_kickin.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,33 +201,34 @@ def do_move(self, agent: "SamplePlayerAgent"):
# Check if the agent has sufficient stamina and the nearest opponent is close to the target point then move away from the opponent
if wm.self.stamina > agent.server_params.stamina_max * 0.9:
nearest_opp = Tools.get_opponent_nearest_to_self(agent)
nearest_opp_pos = Vector2D(nearest_opp.position.x, nearest_opp.position.y)

if nearest_opp and nearest_opp_pos.dist(target_point) < 3.0:
add_vec = ball_position - target_point
add_vec.set_length(3.0)

time_val = wm.cycle % 60
if time_val < 20:
pass
elif time_val < 40:
target_point += add_vec.rotated_vector(90.0)
else:
target_point += add_vec.rotated_vector(-90.0)

target_point.set_x(
min(
max(-agent.server_params.pitch_half_length, target_point.x()),
agent.server_params.pitch_half_length,

if nearest_opp:
nearest_opp_pos = Vector2D(nearest_opp.position.x, nearest_opp.position.y)
if nearest_opp_pos.dist(target_point) < 3.0:
add_vec = ball_position - target_point
add_vec.set_length(3.0)

time_val = wm.cycle % 60
if time_val < 20:
pass
elif time_val < 40:
target_point += add_vec.rotated_vector(90.0)
else:
target_point += add_vec.rotated_vector(-90.0)

target_point.set_x(
min(
max(-agent.server_params.pitch_half_length, target_point.x()),
agent.server_params.pitch_half_length,
)
)
)
target_point.set_y(
min(
max(-agent.server_params.pitch_half_width, target_point.y()),
agent.server_params.pitch_half_width,
target_point.set_y(
min(
max(-agent.server_params.pitch_half_width, target_point.y()),
agent.server_params.pitch_half_width,
)
)
)
avoid_opponent = True
avoid_opponent = True

dash_power = setplay.get_set_play_dash_power(agent)
dist_thr = wm.ball.dist_from_self * 0.07
Expand Down
30 changes: 27 additions & 3 deletions src/interfaces/IAgent.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,15 @@
from typing import TYPE_CHECKING
from typing import Union
from abc import ABC, abstractmethod
from typing import Union
from service_pb2 import *
import logging
from src.utils.memory import Memory


if TYPE_CHECKING:
from src.utils.tools import Tools

class IAgent(ABC):
def __init__(self, logger) -> None:
super().__init__()
Expand All @@ -14,6 +19,7 @@ def __init__(self, logger) -> None:
self.player_params: Union[PlayerParam, None] = None
self.player_types: dict[PlayerType] = {}
self.debug_mode: bool = False
self.memory: Memory = Memory()
self.logger: logging.Logger = logger

def set_server_params(self, server_param: ServerParam):
Expand All @@ -24,6 +30,8 @@ def set_player_params(self, player_param: PlayerParam):

def set_player_types(self, player_type: PlayerType):
self.player_types[player_type.id] = player_type
from src.utils.tools import Tools
Tools.update_dash_distance_table(player_type, self)

def get_player_type(self, id: int) -> PlayerType:
if id < 0:
Expand Down Expand Up @@ -82,9 +90,25 @@ def add_log_circle(self, level: LoggerLevel, center_x: float, center_y: float, r
)
)
))

def add_log_line(self, level: LoggerLevel, start_x: float, start_y: float, end_x: float, end_y: float, color: str):
if not self.debug_mode:
return
self.add_action(PlayerAction(
log=Log(
add_line=AddLine(
level=level,
start=RpcVector2D(x=start_x, y=start_y),
end=RpcVector2D(x=end_x, y=end_y),
color=color,
)
)
)
)

def add_action(self, action: Union[PlayerAction, CoachAction, TrainerAction]):
self.actions.append(action)

def get_actions(self) -> list[Union[PlayerAction, CoachAction, TrainerAction]]:
return self.actions

@abstractmethod
def get_actions(self) -> Union[PlayerActions, CoachActions, TrainerActions]:
pass
9 changes: 8 additions & 1 deletion src/sample_coach_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,11 @@ def update_actions(self, wm:WorldModel) -> CoachActions:
# do_helios_substitute=DoHeliosSubstitute()
# ))

self.logger.debug(f'actions: {self.actions}')
self.logger.debug(f'actions: {self.actions}')

def get_actions(self) -> CoachActions:
"""
Get the list of coach actions to be executed
"""

return CoachActions(actions=self.actions)
19 changes: 18 additions & 1 deletion src/sample_player_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,4 +61,21 @@ def get_strategy(self):
Returns:
Strategy: Current strategy instance (FormationStrategy or StarterStrategy)
"""
return self.strategy
return self.strategy

def get_actions(self) -> PlayerActions:
"""
Get the list of player actions to be executed
"""

res = PlayerActions()
res.actions.extend(self.actions)

if self.use_starter_code:
res.ignore_doHeardPassRecieve = True
res.ignore_doIntention = True
res.ignore_shootInPreprocess = True
else:
pass
# res.ignore_shootInPreprocess = True
return res
9 changes: 8 additions & 1 deletion src/sample_trainer_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,4 +68,11 @@ def set_params(self, params):
elif isinstance(params, PlayerType):
self.playerTypes[params.id] = params
else:
raise Exception("Unknown params type")
raise Exception("Unknown params type")

def get_actions(self) -> TrainerActions:
"""
Get the list of coach actions to be executed
"""

return TrainerActions(actions=self.actions)
4 changes: 4 additions & 0 deletions src/utils/memory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@

class Memory:
def __init__(self):
self.dash_distance_tables: dict[int, list[float]] = {}
44 changes: 31 additions & 13 deletions src/utils/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,21 +156,39 @@ def estimate_virtual_dash_distance(player: Player, real_speed_max: float):
return d

@staticmethod
def cycles_to_reach_distance(dash_dist, real_speed_max):
def update_dash_distance_table(pt: PlayerType, agent: IAgent):
sp: ServerParam = agent.server_params
agent.memory.dash_distance_tables[pt.id] = [0] * 50
speed = 0.0
reach_dist = 0.0
accel = sp.max_dash_power * pt.dash_power_rate * pt.effort_max
for counter in range(50):
if speed + accel > pt.player_speed_max:
accel = pt.player_speed_max - speed

speed += accel
reach_dist += speed
agent.memory.dash_distance_tables[pt.id][counter] = reach_dist
speed *= pt.player_decay

@staticmethod
def cycles_to_reach_distance(agent: IAgent, player: Player, dash_dist: float):
if dash_dist <= 0.001:
return 0
return int(math.ceil(dash_dist / real_speed_max))
# todo: implement this
# ddc = 0
# for dd in self._dash_distance_table:
# if dash_dist <= dd:
# return ddc
# ddc += 1

# cycle = len(self._dash_distance_table)
# rest_dist = dash_dist - self._dash_distance_table[cycle - 1]
# cycle += int(math.ceil(rest_dist / self.real_speed_max()))
# return cycle
player_type = agent.get_player_type(player.type_id)
dash_distance_table = agent.memory.dash_distance_tables[player_type.id]

it = next((i for i, dist in enumerate(dash_distance_table) if dist >= dash_dist - 0.001), len(dash_distance_table))

if it < len(dash_distance_table):
return it + 1

rest_dist = dash_dist - dash_distance_table[-1]
cycle = len(dash_distance_table)

cycle += math.ceil(rest_dist / player_type.real_speed_max)

return cycle

@staticmethod
def predict_player_turn_cycle(sp: ServerParam, ptype: PlayerType, player_body: AngleDeg, player_speed, target_dist,
Expand Down
2 changes: 2 additions & 0 deletions start.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ def server_main():
sys.argv = ['server.py', '--rpc-port', str(rpc_port), '--log-dir', log_dir]
if args.disable_log_file:
sys.argv += ['--disable-log-file']
if args.debug:
sys.argv += ['--debug']
main()

# Start the main function as a new process
Expand Down