diff --git a/app/elo/README.md b/app/elo/README.md new file mode 100644 index 0000000..3c144f5 --- /dev/null +++ b/app/elo/README.md @@ -0,0 +1,5 @@ +Implementation of an ELO-like algorithm. + +(Yes, [this](https://github.com/sublee/elo/) exists, and a [more mathematical treatment](https://cs.stanford.edu/people/paulliu/files/www-2021-elor.pdf), but eh, a bit of self implementation doesn't hurt in a personal project) + +Ref. [here](http://web.archive.org/web/20130308190719/http://elo.divergentinformatics.com/) for testing, and [here](http://sradack.blogspot.com/2008/06/elo-rating-system-multiple-players.html) for implementation. \ No newline at end of file diff --git a/app/elo/__init__.py b/app/elo/__init__.py new file mode 100644 index 0000000..732abb5 --- /dev/null +++ b/app/elo/__init__.py @@ -0,0 +1 @@ +from .elo import rerank # noqa diff --git a/app/elo/elo.py b/app/elo/elo.py new file mode 100644 index 0000000..f01a53e --- /dev/null +++ b/app/elo/elo.py @@ -0,0 +1,35 @@ +from typing import Iterable + +K_FACTOR = 10 +BETA = 200 + + +def rerank(ratings: Iterable[int], winning_player_idx: int) -> Iterable[int]: + expectations = _expectations(ratings) + return [ + rating + + (K_FACTOR * ((1 if winning_player_idx == idx else 0) - expectations[idx])) + for idx, rating in enumerate(ratings) + ] + + +def _expectations(ratings: Iterable[int]) -> Iterable[int]: + return [ + _calculate_expectation(rating, ratings[:idx] + ratings[idx + 1 :]) + for idx, rating in enumerate(ratings) + ] + + +def _calculate_expectation(rating: int, other_ratings: Iterable[int]) -> int: + return sum( + [_pairwise_expectation(rating, other_rating) for other_rating in other_ratings] + ) / (float(len(other_ratings) + 1) * len(other_ratings) / 2) + + +def _pairwise_expectation(rating: int, other_rating: int) -> Iterable[int]: + """ + Gives the expected score of `rating` against `other_rating` + """ + diff = float(other_rating) - float(rating) + f_factor = 2 * BETA # rating disparity + return 1.0 / (1 + 10 ** (diff / f_factor)) diff --git a/tests/elo/test_elo.py b/tests/elo/test_elo.py new file mode 100644 index 0000000..0ce44ef --- /dev/null +++ b/tests/elo/test_elo.py @@ -0,0 +1,18 @@ +from typing import Iterable + +from app.elo import rerank + + +def test(): + # From https://github.com/sublee/elo/blob/master/elotests.py + assert _almost_equal(rerank([1200, 800], 1), [1190.909, 809.091]) + # Couldn't find any test-cases for multiplayer games. + + +def _almost_equal(actual: Iterable, expected: Iterable) -> bool: + assert len(actual) == len(expected) + for f, s in zip(actual, expected): + if (s - f) > (0.00001 * f): + return False + else: + return True