diff --git a/Cargo.toml b/Cargo.toml index 876dd20..e9c0c89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,3 +11,4 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +rand = "0.7.2" diff --git a/src/ai.rs b/src/ai.rs index 90c8e7b..af83c7f 100644 --- a/src/ai.rs +++ b/src/ai.rs @@ -1,57 +1,190 @@ +//! Provides functionality for creating single player games. + +use rand::seq::SliceRandom; +use rand::Rng; +use std::collections::HashMap; use std::collections::HashSet; +use std::hash::BuildHasher; use crate::game; -pub struct AIOpponent {} +/// Provides a computer controlled AI opponent. +/// +/// This can be used to create single player games or implement a hint system +/// for human users. +pub struct AIOpponent { + mistake_probability: f64, +} impl AIOpponent { + /// Constructs a new AI opponent. + /// + /// The mistake probability indicates how likely the AI will fail to consider + /// various situations. A value of 0.0 makes the AI play a perfect game. + /// A value of 1.0 causes the AI to always pick a random position. Values + /// less than 0.0 are set to 0.0 and values greater than 1.0 are set to 1.0. + /// + /// # Examples + /// + /// Construct an unbeatable AI opponent: + /// ``` + /// use open_ttt_lib::ai; + /// + /// let mistake_probability = 0.0; + /// let unbeatable_opponent = ai::AIOpponent::new(mistake_probability); + /// ``` + /// + /// Construct an AI opponent that randomly picks a position: + /// ``` + /// use open_ttt_lib::ai; + /// + /// let mistake_probability = 1.0; + /// let rando = ai::AIOpponent::new(mistake_probability); + /// ``` pub fn new(mistake_probability: f64) -> Self { - unimplemented!(); + Self { + mistake_probability, + } } - pub fn get_move(self, game: &game::Game) -> Option { - unimplemented!(); + /// Gets the position the AI opponent wishes to move based on the provided game. + /// + /// `None` is returned if the game is over. The AI opponent never tries to + /// select an invalid position, that is a position that is not free. + /// + /// # Examples + /// ``` + /// use open_ttt_lib::ai; + /// use open_ttt_lib::game; + /// + /// let game = game::Game::new(); + /// let ai_opponent = ai::AIOpponent::new(0.0); + /// + /// match ai_opponent.get_move(&game) { + /// Some(position) => assert!(game.can_move(position)), + /// None => panic!("The game is over so the AI opponent cannot do a move."), + /// }; + /// ``` + pub fn get_move(&self, game: &game::Game) -> Option { + // Return the best position based evaluating the game. + let outcomes = self.evaluate_game(game); + best_position(&outcomes) } + /// Evaluates each free position in the provided game. + /// + /// Each free position in the game is mapped to an outcome for the AI opponent. + /// If the game is over an empty map is returned. + /// + /// This functionality is useful if you wish to examine how the AI opponent + /// viewed the game. E.g. this can be helpful for creating a hint system to + /// help human players pick a position or when fine tuning the AI difficulty + /// settings. + /// + /// # Examples + /// ``` + /// use open_ttt_lib::ai; + /// use open_ttt_lib::game; + /// + /// let game = game::Game::new(); + /// let ai_opponent = ai::AIOpponent::new(0.0); + /// + /// let position_map = ai_opponent.evaluate_game(&game); + /// + /// for (position, outcome) in position_map { + /// assert!(game.can_move(position)); + /// println!("position: {:?} outcome: {:?}", position, outcome); + /// } + /// ``` + pub fn evaluate_game(&self, game: &game::Game) -> HashMap { + let mut outcomes = HashMap::new(); + + // We only evaluate the game if it is not over as if the game is over we + // cannot determine which player the AI is playing as. + if !game.state().is_game_over() { + // Determine which player the AI is playing as. + let ai_player = AIPlayer::from_game_state(game.state()); + + // For each free square, evaluate the consequences of using that + // square. The outcome for each position and the position is recorded. + for position in game.free_positions() { + let outcome = self.evaluate_position(&game, position, ai_player); + outcomes.insert(position, outcome); + } + } + + outcomes + } + + // Evaluates what outcome of the game would be by selecting a specific position. + // + // **Note** this is a recursive function. fn evaluate_position( - self, + &self, game: &game::Game, position: game::Position, ai_player: AIPlayer, ) -> AIOutcome { - unimplemented!(); - } -} + debug_assert!( + game.can_move(position), + "Cannot move into the provided position, {:?}. Thus, the position \ + cannot be evaluated. Ensure the game is not over and the position \ + is free. This condition is the result of a bug in the open_ttt_lib \ + used by this application.", + position + ); + + // Check to see if the AI should make a mistake. If so, don't consider + // this position. + if self.should_make_mistake() { + return AIOutcome::Unknown; + } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -enum AIPlayer { - PlayerX, - PlayerO, -} + // Clone the game so we can try out the move without modifying the original game. + let mut game = game.clone(); + let state = game.do_move(position).unwrap(); -impl AIPlayer { - // Determines which player the AI is playing as, X or O, based on the current - // state of the game. - // - // Panics if the game is over. - fn from_game_state(state: game::State) -> Self { - match state { - game::State::PlayerXMove => Self::PlayerX, - game::State::PlayerOMove => Self::PlayerO, - _ => panic!( - "Cannot determine the AI player since the game is over. \ - This condition is the result of a bug in the \ - open_ttt_lib used by this application." - ), + // Check to see if the game is over. If so, return the outcome of the + // game from the AI's perspective, e.g. win, loss, or cat's game. + if state.is_game_over() { + return AIOutcome::from_game_state(state, ai_player); + } + + // The game is not over, to evaluate each of the remaining free squares. + // Note: the game automatically takes care of switching between each + // player's turn. + let mut outcomes = HashSet::new(); + for free_position in game.free_positions() { + let outcome = self.evaluate_position(&game, free_position, ai_player); + outcomes.insert(outcome); } + + // The AI assumes the other player plays a perfect game, so return the + // worst outcome that was found. + worst_outcome(&outcomes) + } + + // Indicates if the AI opponent should make a mistake by skipping examining + // part of the tree. + fn should_make_mistake(&self) -> bool { + // Use a random number generator to get a boolean per the mistake probability. + rand::thread_rng().gen_bool(self.mistake_probability) } } +/// Represents a game outcome for the AI opponent. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -enum AIOutcome { +pub enum AIOutcome { + /// The AI player wins the game. Win, + + /// The AI player looses the game. Loss, + + /// The game results in a cats game. CatsGame, + + /// The outcome of the game is unknown to the AI player. Unknown, } @@ -80,6 +213,92 @@ impl AIOutcome { } } +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +enum AIPlayer { + PlayerX, + PlayerO, +} + +impl AIPlayer { + // Determines which player the AI is playing as, X or O, based on the current + // state of the game. + // + // Panics if the game is over. + fn from_game_state(state: game::State) -> Self { + match state { + game::State::PlayerXMove => Self::PlayerX, + game::State::PlayerOMove => Self::PlayerO, + _ => panic!( + "Cannot determine the AI player since the game is over. \ + This condition is the result of a bug in the \ + open_ttt_lib used by this application." + ), + } + } +} + +/// Picks a position with the best outcome based on the provided mapping of +/// positions to outcomes. +/// +/// The ordering of outcomes from best to worst are: `Win`, `CatsGame`, +/// `Unknown`, `Loss`. A cats game is considered better than unknown as the +/// AI would rather have the game end in a draw than risk a loss. If there +/// are multiple positions with the same outcome, one of the positions is +/// picked at random. +pub fn best_position( + outcomes: &HashMap, +) -> Option { + // Build a mapping from outcomes to positions so one of the positions with + // the best outcome can be selected. + let mut outcome_to_position_map = HashMap::new(); + + for (position, outcome) in outcomes { + if !outcome_to_position_map.contains_key(outcome) { + outcome_to_position_map.insert(outcome, Vec::new()); + } + outcome_to_position_map + .get_mut(outcome) + .unwrap() + .push(position); + } + + if outcome_to_position_map.contains_key(&AIOutcome::Win) { + Some( + **outcome_to_position_map + .get(&AIOutcome::Win) + .unwrap() + .choose(&mut rand::thread_rng()) + .unwrap(), + ) + } else if outcome_to_position_map.contains_key(&AIOutcome::CatsGame) { + Some( + **outcome_to_position_map + .get(&AIOutcome::CatsGame) + .unwrap() + .choose(&mut rand::thread_rng()) + .unwrap(), + ) + } else if outcome_to_position_map.contains_key(&AIOutcome::Unknown) { + Some( + **outcome_to_position_map + .get(&AIOutcome::Unknown) + .unwrap() + .choose(&mut rand::thread_rng()) + .unwrap(), + ) + } else if outcome_to_position_map.contains_key(&AIOutcome::Loss) { + Some( + **outcome_to_position_map + .get(&AIOutcome::Loss) + .unwrap() + .choose(&mut rand::thread_rng()) + .unwrap(), + ) + } else { + None + } +} + // Gets the worst possible outcome based on the provided outcomes. // // The ordering of outcomes returned are: `Loss`, `CatsGame`, `Win`. @@ -104,6 +323,266 @@ fn worst_outcome(outcomes: &HashSet) -> AIOutcome { mod tests { use super::*; + // Create several game boards for use with the unit tests. An asterisk (*) + // marks the last position placed. + + // +---+---+---+ + // | X | O | X | + // +---+---+---+ + // | | O | | + // +---+---+---+ + // | X | | O*| + // +---+---+---+ + const PLAYER_X_MOVE_WITH_WIN_AVAILABLE: [game::Position; 6] = [ + game::Position { row: 0, column: 0 }, + game::Position { row: 0, column: 1 }, + game::Position { row: 0, column: 2 }, + game::Position { row: 1, column: 1 }, + game::Position { row: 2, column: 0 }, + game::Position { row: 2, column: 2 }, + ]; + + // +---+---+---+ + // | X | O | X | + // +---+---+---+ + // | X*| O | | + // +---+---+---+ + // | X | | O | + // +---+---+---+ + const PLAYER_X_WIN: [game::Position; 7] = [ + game::Position { row: 0, column: 0 }, + game::Position { row: 0, column: 1 }, + game::Position { row: 0, column: 2 }, + game::Position { row: 1, column: 1 }, + game::Position { row: 2, column: 0 }, + game::Position { row: 2, column: 2 }, + game::Position { row: 1, column: 0 }, + ]; + + // Helper function that creates a game where the provided positions are + // owned. The positions are marked in the order contained in the slice. + // + // # Panics + // Panics if the game's do move method returns an error. + fn create_game(owned_positions: &[game::Position]) -> game::Game { + let mut game = game::Game::new(); + for position in owned_positions { + game.do_move(*position).unwrap(); + } + + game + } + + #[test] + fn ai_opponent_get_move_when_game_is_over_should_be_none() { + // Create a game where the game is over. + let game = create_game(&PLAYER_X_WIN); + let ai_opponent = AIOpponent::new(0.0); + let expected_position = None; + + let actual_position = ai_opponent.get_move(&game); + + assert_eq!( + expected_position, + actual_position, + "\nGame board used for this test: \n{}", + game.board() + ); + } + + #[test] + fn ai_opponent_get_move_when_zero_mistake_probability_should_pick_wining_position() { + // Create a game where the AI player has a wining move available. + // The flawless AI should pick this position. + let game = create_game(&PLAYER_X_MOVE_WITH_WIN_AVAILABLE); + let ai_opponent = AIOpponent::new(0.0); + let expected_position = game::Position { row: 1, column: 0 }; + + let actual_position = ai_opponent.get_move(&game).unwrap(); + + assert_eq!( + expected_position, + actual_position, + "\nGame board used for this test: \n{}", + game.board() + ); + } + + #[test] + fn ai_opponent_evaluate_game_when_game_over_should_be_empty_map() { + let game = create_game(&PLAYER_X_WIN); + let mistake_probability = 0.0; + let ai_opponent = AIOpponent::new(mistake_probability); + let expected_outcomes = HashMap::new(); + + let actual_outcomes = ai_opponent.evaluate_game(&game); + + assert_eq!( + expected_outcomes, + actual_outcomes, + "\nGame board used for this test: \n{}", + game.board() + ); + } + + #[test] + fn ai_opponent_evaluate_game_when_zero_mistake_probability_should_evaluate_all_positions() { + // Create a game where the AI player has a wining move available. + // The flawless AI should determine the outcome of all remaining positions. + let game = create_game(&PLAYER_X_MOVE_WITH_WIN_AVAILABLE); + let mistake_probability = 0.0; + let ai_opponent = AIOpponent::new(mistake_probability); + let mut expected_outcomes = HashMap::new(); + expected_outcomes.insert(game::Position { row: 1, column: 0 }, AIOutcome::Win); + expected_outcomes.insert(game::Position { row: 1, column: 2 }, AIOutcome::Loss); + expected_outcomes.insert(game::Position { row: 2, column: 1 }, AIOutcome::CatsGame); + + let actual_outcomes = ai_opponent.evaluate_game(&game); + + assert_eq!( + expected_outcomes, + actual_outcomes, + "\nGame board used for this test: \n{}", + game.board() + ); + } + + #[test] + fn ai_opponent_evaluate_game_when_one_mistake_probability_should_see_unknown_outcome_for_all_positions( + ) { + // Create a game where the AI player has a wining move available. + // The AI that always makes mistakes should see the outcome as unknown for all positions. + let game = create_game(&PLAYER_X_MOVE_WITH_WIN_AVAILABLE); + let mistake_probability = 1.0; + let ai_opponent = AIOpponent::new(mistake_probability); + let mut expected_outcomes = HashMap::new(); + expected_outcomes.insert(game::Position { row: 1, column: 0 }, AIOutcome::Unknown); + expected_outcomes.insert(game::Position { row: 1, column: 2 }, AIOutcome::Unknown); + expected_outcomes.insert(game::Position { row: 2, column: 1 }, AIOutcome::Unknown); + + let actual_outcomes = ai_opponent.evaluate_game(&game); + + assert_eq!( + expected_outcomes, + actual_outcomes, + "\nGame board used for this test: \n{}", + game.board() + ); + } + + #[test] + fn ai_opponent_best_position_when_outcomes_empty_should_none() { + let outcomes = HashMap::new(); + let expected_position = None; + + let actual_position = best_position(&outcomes); + + assert_eq!(expected_position, actual_position); + } + + #[test] + fn ai_opponent_best_position_when_win_and_cats_game_should_be_win() { + let mut outcomes = HashMap::new(); + outcomes.insert(game::Position { row: 0, column: 0 }, AIOutcome::CatsGame); + let expected_position = game::Position { row: 0, column: 1 }; + outcomes.insert(expected_position, AIOutcome::Win); + + let actual_position = best_position(&outcomes); + + assert_eq!(Some(expected_position), actual_position); + } + + #[test] + fn ai_opponent_best_position_when_win_and_unknown_should_be_win() { + let mut outcomes = HashMap::new(); + outcomes.insert(game::Position { row: 0, column: 0 }, AIOutcome::Unknown); + let expected_position = game::Position { row: 0, column: 1 }; + outcomes.insert(expected_position, AIOutcome::Win); + + let actual_position = best_position(&outcomes); + + assert_eq!(Some(expected_position), actual_position); + } + + #[test] + fn ai_opponent_best_position_when_win_and_loss_should_be_win() { + let mut outcomes = HashMap::new(); + outcomes.insert(game::Position { row: 0, column: 0 }, AIOutcome::Loss); + let expected_position = game::Position { row: 0, column: 1 }; + outcomes.insert(expected_position, AIOutcome::Win); + + let actual_position = best_position(&outcomes); + + assert_eq!(Some(expected_position), actual_position); + } + + #[test] + fn ai_opponent_best_position_when_cats_game_and_loss_should_be_cats_game() { + let mut outcomes = HashMap::new(); + outcomes.insert(game::Position { row: 0, column: 0 }, AIOutcome::Loss); + let expected_position = game::Position { row: 0, column: 1 }; + outcomes.insert(expected_position, AIOutcome::CatsGame); + + let actual_position = best_position(&outcomes); + + assert_eq!(Some(expected_position), actual_position); + } + + #[test] + fn ai_opponent_best_position_when_cats_game_and_unknown_should_be_cats_game() { + let mut outcomes = HashMap::new(); + outcomes.insert(game::Position { row: 0, column: 0 }, AIOutcome::Unknown); + let expected_position = game::Position { row: 0, column: 1 }; + outcomes.insert(expected_position, AIOutcome::CatsGame); + + let actual_position = best_position(&outcomes); + + assert_eq!(Some(expected_position), actual_position); + } + + #[test] + fn ai_opponent_best_position_when_unknown_and_loss_should_be_unknown() { + let mut outcomes = HashMap::new(); + outcomes.insert(game::Position { row: 0, column: 0 }, AIOutcome::Loss); + let expected_position = game::Position { row: 0, column: 1 }; + outcomes.insert(expected_position, AIOutcome::Unknown); + + let actual_position = best_position(&outcomes); + + assert_eq!(Some(expected_position), actual_position); + } + + #[test] + fn ai_opponent_best_position_when_same_outcome_should_pick_random_position() { + let mut outcomes = HashMap::new(); + outcomes.insert(game::Position { row: 0, column: 0 }, AIOutcome::CatsGame); + outcomes.insert(game::Position { row: 0, column: 1 }, AIOutcome::CatsGame); + outcomes.insert(game::Position { row: 0, column: 2 }, AIOutcome::CatsGame); + // A set is used to see which positions were picked. + let mut positions_set = HashSet::new(); + + // This test exercises code that has random behavior. Therefore, we act + // multiple times to hopefully cover the distribution of outcomes. + const NUM_TIMES: i32 = 300; + for _ in 0..NUM_TIMES { + let position = best_position(&outcomes); + positions_set.insert(position); + } + + // Given a sufficient number of times getting the best position we expect + // each position to be returned at least once. + assert_eq!( + outcomes.len(), + positions_set.len(), + "This test relies on random behavior. Therefore, it is possible, \ + although highly unlikely, that the test could fail even if the \ + code is working as expected. If this happens try re-running the \ + test a few times. Continual failures indicate there is a problem \ + that needs addressed in the code as the requirement of picking \ + random positions is not being fulfilled." + ); + } + #[test] fn ai_player_from_game_state_when_player_X_move_should_be_player_X() { let game_state = game::State::PlayerXMove;