Skip to content

Commit

Permalink
Merge pull request #131 from kiudee/128_fix_concurrency
Browse files Browse the repository at this point in the history
Fix match parser not working correctly with concurrency > 1
  • Loading branch information
kiudee committed Jun 26, 2021
2 parents 8fe3da7 + cdf822f commit bd89f3d
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 0 deletions.
2 changes: 2 additions & 0 deletions HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ History

0.7.3 (2021-06-26)
------------------
* Fix the match parser producing incorrect results, when concurrency > 1 is
used for playing matches.
* Fix the server for distributed tuning trying to compute the current optimum
before a model has been fit.

Expand Down
26 changes: 26 additions & 0 deletions tests/test_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,32 @@ def test_parse_experiment_result():
assert_almost_equal(score, -2.7958800173440745)
assert_almost_equal(error, 1.9952678343378125)

# Test if the result is correct in case the order of finished games is not linear.
# This can happen with concurrency > 1
teststr = """Started game 1 of 4 (engine1 vs engine2)
Started game 2 of 4 (engine2 vs engine1)
Started game 3 of 4 (engine1 vs engine2)
Started game 4 of 4 (engine2 vs engine1)
Finished game 4 (engine2 vs engine1): 0-1 {Black mates}
Score of engine1 vs engine2: 1 - 0 - 0 [0.375] 1
Finished game 1 (engine1 vs engine2): 1/2-1/2 {Draw by stalemate}
Score of engine1 vs engine2: 1 - 0 - 1 [0.000] 2
Finished game 2 (engine2 vs engine1): 1-0 {White mates}
Score of engine1 vs engine2: 1 - 1 - 1 [0.250] 3
Finished game 3 (engine1 vs engine2): 0-1 {Black mates}
Score of engine1 vs engine2: 1 - 2 - 1 [0.167] 4
... engine1 playing White: 0 - 2 - 0 [0.000] 2
... engine1 playing Black: 1 - 0 - 1 [0.750] 2
... White vs Black: 0 - 3 - 1 [0.125] 4
Elo difference: -88.7 +/- nan, LOS: 28.2 %, DrawRatio: 25.0 %
Finished match
"""
score, error = parse_experiment_result(
teststr, n_dirichlet_samples=1000, random_state=0
)
assert_almost_equal(score, 0.38764005203222596)
assert_almost_equal(error, 0.6255020676255081)


def test_reduce_ranges():
space = normalize_dimensions([(0.0, 1.0), ("a", "b", "c")])
Expand Down
6 changes: 6 additions & 0 deletions tune/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,12 @@ def parse_experiment_result(
)
diffs = np.diff(array, axis=0, prepend=np.array([[0, 0, 0]]))

# Parse order of finished games to be able to compute the correct pentanomial scores
finished = np.array(
[int(x) - 1 for x in re.findall(r"Finished game ([0-9]+)", outstr)]
)
diffs = diffs[np.argsort(finished)]

counts = {"WW": 0, "WD": 0, "WL/DD": 0, "LD": 0, "LL": 0}
for i in range(0, len(diffs) - 1, 2):
match = diffs[i] + diffs[i + 1]
Expand Down

0 comments on commit bd89f3d

Please sign in to comment.