Skip to content

Commit

Permalink
Add pypy support (#80)
Browse files Browse the repository at this point in the history
* Add pypy support

* Switch to builtin statistics library. Drop scipy requirement due to negligible difference in benchmark performance.

* Using custom ranking function

* Drop 3.7 support due to nearing EOL

* Skip doctest for *.rst files

* Adjust relative tolerance of doctest number comparison

* Change doctest glob to *.py

* Add changelog fragments
  • Loading branch information
vivekjoshy committed Dec 11, 2022
1 parent ff33d76 commit 35e42ed
Show file tree
Hide file tree
Showing 13 changed files with 45 additions and 19 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ jobs:
fail-fast: false
matrix:
os: [Ubuntu, MacOS, Windows]
python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"]
python-version: ["3.8", "3.9", "3.10", "3.11", "pypy3.8", "pypy3.9"]
experimental: [false]
include:
- os: Ubuntu
Expand Down Expand Up @@ -42,7 +42,7 @@ jobs:
python -m build
twine check dist/*
black --check .
pytest . --cov --cov-report=term-missing -vv
pytest . --cov --cov-report=term-missing -vv --doctest-glob="*.py"
coverage report
coverage html
coverage xml --ignore-errors
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ Sometimes you want to know what the likelihood is someone will place at a partic
>>> team_1, team_2, team_3 = [a1, a2, a3], [b1, b2, b3], [c1, c2, c3]
>>> draw_probability = predict_draw(teams=[team_1, team_2, team_3])
>>> draw_probability
0.3295385074666581
0.329538507466658
>>> rank_probability = predict_rank(teams=[team_1, team_2, team_3])
>>> rank_probability
[(1, 0.4450361350569973), (2, 0.19655022513040032), (3, 0.028875132345944337)]
Expand Down
1 change: 0 additions & 1 deletion benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.validation import ValidationError, Validator

from benchmark.processors import Competition
from openskill.models import (
BradleyTerryFull,
BradleyTerryPart,
Expand Down
1 change: 1 addition & 0 deletions changes/80.breaking.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Drop support for Python 3.7
1 change: 1 addition & 0 deletions changes/80.doc.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Improve SEO of Documentation
1 change: 1 addition & 0 deletions changes/80.feature.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Add support for PyPy 3.8 and 3.9
4 changes: 2 additions & 2 deletions docs/manual.rst
Original file line number Diff line number Diff line change
Expand Up @@ -137,10 +137,10 @@ Sometimes you want to know what the likelihood is someone will place at a partic
>>> team_1, team_2, team_3 = [a1, a2, a3], [b1, b2, b3], [c1, c2, c3]
>>> draw_probability = predict_draw(teams=[team_1, team_2, team_3])
>>> draw_probability
0.3295385074666581
0.329538507466658
>>> rank_probability = predict_rank(teams=[team_1, team_2, team_3])
>>> rank_probability
[(1, 0.4450361350569973), (2, 0.19655022513040032), (3, 0.028875132345944337)]
[(1, 0.4450361350569973), (2, 0.19655022513040035), (3, 0.02887513234594437)]
>>> sum([y for x, y in rank_probability]) + draw_probability
1.0
Expand Down
6 changes: 2 additions & 4 deletions openskill/rate.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,12 @@
from functools import reduce
from typing import List, Optional, Tuple, Union

from scipy.stats import rankdata

from openskill.constants import Constants, beta
from openskill.constants import mu as default_mu
from openskill.constants import sigma as default_sigma
from openskill.models.plackett_luce import PlackettLuce
from openskill.statistics import phi_major, phi_major_inverse
from openskill.util import rankings, unwind
from openskill.util import rank_minimum, rankings, unwind


class Rating:
Expand Down Expand Up @@ -461,7 +459,7 @@ def predict_rank(
]

ranked_probability = [abs(_) for _ in win_probability]
ranks = list(rankdata(ranked_probability, method="min"))
ranks = list(rank_minimum(ranked_probability))
max_ordinal = max(ranks)
ranks = [abs(_ - max_ordinal) + 1 for _ in ranks]
predictions = list(zip(ranks, ranked_probability))
Expand Down
7 changes: 3 additions & 4 deletions openskill/statistics.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,15 @@
import sys
from statistics import NormalDist

import scipy.stats

normal = scipy.stats.norm(0, 1)
normal = NormalDist()


def phi_major(x):
return normal.cdf(x)


def phi_major_inverse(x):
return normal.ppf(x)
return normal.inv_cdf(x)


def phi_minor(x):
Expand Down
22 changes: 22 additions & 0 deletions openskill/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,3 +119,25 @@ def sorter(teams):
return [x for x, _ in sorted_list], [x for _, x in sorted_list]

return sorter(teams) if isinstance(teams, list) else sorter


def rank_simple(vector):
return sorted(range(len(vector)), key=vector.__getitem__)


def rank_minimum(a):
n = len(a)
i_vec = rank_simple(a)
s_vec = [a[rank] for rank in i_vec]
sum_ranks = 0
duplicate_count = 0
new_array = [0] * n
for i in range(n):
sum_ranks += i
duplicate_count += 1
if i == n - 1 or s_vec[i] != s_vec[i + 1]:
for j in range(i - duplicate_count + 1, i + 1):
new_array[i_vec[j]] = i + 1 - duplicate_count + 1
sum_ranks = 0
duplicate_count = 0
return new_array
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ type = [

[tool.pytest.ini_options]
minversion = "6.0"
addopts = "-ra --strict-markers --ignore=docs/conf.py --ignore=setup.py --ignore=benchmark --ignore=ci --ignore=.eggs --doctest-modules --doctest-glob=/*.rst --tb=short"
addopts = "-ra --strict-markers --ignore=docs/conf.py --ignore=setup.py --ignore=benchmark --ignore=ci --ignore=.eggs --doctest-modules --doctest-glob=/*.py --tb=short"
norecursedirs = [
".git",
".tox",
Expand All @@ -31,6 +31,7 @@ python_files = [
"*_test.py",
"tests.py"
]
doctest_optionflags = "NUMBER"

[build-system]
requires = ["wheel", "setuptools"]
Expand Down
5 changes: 2 additions & 3 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,12 @@ classifiers =
License :: OSI Approved :: MIT License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
keywords =
ranking
trueskill
Expand All @@ -36,8 +37,6 @@ project_urls =
[options]
packages = find:
python_requires = >=3.7
install_requires =
scipy

[options.extras_require]
docs =
Expand Down
7 changes: 6 additions & 1 deletion tests/predictions/test_predict_rank.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
def test_predict_rank():
a1 = Rating(mu=34, sigma=0.25)
a2 = Rating(mu=32, sigma=0.25)
a3 = Rating(mu=34, sigma=0.25)
a3 = Rating(mu=30, sigma=0.25)

b1 = Rating(mu=24, sigma=0.5)
b2 = Rating(mu=22, sigma=0.5)
Expand All @@ -22,5 +22,10 @@ def test_predict_rank():
draw_probability = predict_draw(teams=[team_1, team_2, team_3])
assert total_rank_probability + draw_probability == pytest.approx(1)

ranks = predict_rank(teams=[team_1, team_1, team_1])
total_rank_probability = sum([y for x, y in ranks])
draw_probability = predict_draw(teams=[team_1, team_1, team_1])
assert total_rank_probability + draw_probability == pytest.approx(1)

with pytest.raises(ValueError):
predict_rank(teams=[team_1])

0 comments on commit 35e42ed

Please sign in to comment.