Skip to content

Commit

Permalink
Fix test suite and small bug (#37) (#38)
Browse files Browse the repository at this point in the history
- Adds repeats to the test suite
- Adds random players to the test suite
- Fuzz testing for running complete games until the end
- Fixes a small bug where multiple pots with different parities of
  winners would not add to starting_pot
  • Loading branch information
SirRender00 authored Mar 7, 2022
1 parent fbe6b74 commit 97e05f3
Show file tree
Hide file tree
Showing 9 changed files with 347 additions and 194 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# texasholdem
A python package for Texas Hold 'Em Poker.

[Current Release Version v0.4.3](https://github.com/SirRender00/texasholdem/releases/tag/v0.4.3)
[Current Release Version v0.4.4](https://github.com/SirRender00/texasholdem/releases/tag/v0.4.4)

[v1.0.0 Roadmap](https://github.com/SirRender00/texasholdem/wiki/Version-1.0.0-Roadmap)

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "texasholdem"
version = "0.4.3"
version = "0.4.4"
description = "A texasholdem python package"
authors = ["Evyn Machi <[email protected]>"]
keywords = ['texasholdem', 'holdem', 'poker']
Expand Down
47 changes: 47 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from typing import Union
from pathlib import Path

import pytest

import tests


Expand All @@ -26,6 +28,51 @@
"""


def pytest_configure(config):
""" Configure pytest """
config.addinivalue_line(
'markers',
'repeat(n): run the given test function `n` times.')


@pytest.fixture()
def __pytest_repeat_step_number(request):
""" Internal marker for how many times to repeat a test """
marker = request.node.get_closest_marker("repeat")
count = marker and marker.args[0]
if count > 1:
return request.param
return None


@pytest.hookimpl(trylast=True)
def pytest_generate_tests(metafunc):
""" Generate number of tests corresponding to repeat marker """
marker = metafunc.definition.get_closest_marker('repeat')
count = int(marker.args[0]) if marker else 1
if count > 1:
metafunc.fixturenames.append("__pytest_repeat_step_number")

def make_progress_id(curr, total=count):
return f'{curr + 1}-{total}'

metafunc.parametrize(
'__pytest_repeat_step_number',
range(count),
indirect=True,
ids=make_progress_id
)


@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# pylint: disable=unused-argument
""" Used for attaching reports to make available to fixtures """
outcome = yield
rep = outcome.get_result()
setattr(item, 'rep_' + rep.when, rep)


def strip_comments(history_path: Union[str, os.PathLike]) -> str:
"""
Arguments:
Expand Down
88 changes: 44 additions & 44 deletions tests/evaluator/test_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,77 +23,77 @@
MAX_HAND_RANK)


@pytest.mark.repeat(FUZZ_COMPARE_WITH_BOARD)
@pytest.mark.parametrize("board_len", (0, 3, 4, 5))
def test_fuzz_compare_with_board(board_len):
"""
Tests if evaluate returns proper comparisons for two random hands, each with
two or five cards behind and sharing zero, three, four, or five cards on the board.
"""
for _ in range(FUZZ_COMPARE_WITH_BOARD):
class_choices = list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys())
class1, class2 = random.choices(class_choices, k=2)
class_choices = list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys())
class1, class2 = random.choices(class_choices, k=2)

board = [] if board_len == 0 else generate_sample_hand(class1)[:board_len]
board = [] if board_len == 0 else generate_sample_hand(class1)[:board_len]

hand1 = generate_sample_hand(class1, board=board)
hand1 = generate_sample_hand(class1, board=board)

hand2 = None
while not hand2:
try:
hand2 = generate_sample_hand(class2, board=board)
except ValueError:
class_choices.remove(class2)
class2 = random.choice(class_choices)
hand2 = None
while not hand2:
try:
hand2 = generate_sample_hand(class2, board=board)
except ValueError:
class_choices.remove(class2)
class2 = random.choice(class_choices)

score1, score2 = evaluator.evaluate(hand1, board), evaluator.evaluate(hand2, board)
score1, score2 = evaluator.evaluate(hand1, board), evaluator.evaluate(hand2, board)

hand1 = list(min(itertools.combinations(board + hand1, 5),
key=lambda hand: evaluator.evaluate([], list(hand))))
hand2 = list(min(itertools.combinations(board + hand2, 5),
key=lambda hand: evaluator.evaluate([], list(hand))))
hand1 = list(min(itertools.combinations(board + hand1, 5),
key=lambda hand: evaluator.evaluate([], list(hand))))
hand2 = list(min(itertools.combinations(board + hand2, 5),
key=lambda hand: evaluator.evaluate([], list(hand))))

if class1 > class2:
assert score1 > score2, f"Expected {hand2} to be better than {hand1}"
elif class1 < class2:
assert score1 < score2, f"Expected {hand1} to be better than {hand2}"
elif less_hands_same_class(class1, hand1, hand2):
assert score1 > score2, f"Expected {hand2} to be better than {hand1}"
elif less_hands_same_class(class1, hand2, hand1):
assert score1 < score2, f"Expected {hand1} to be better than {hand2}"
else:
assert score1 == score2, f"Expected {hand1} and {hand2} to have the same score"
if class1 > class2:
assert score1 > score2, f"Expected {hand2} to be better than {hand1}"
elif class1 < class2:
assert score1 < score2, f"Expected {hand1} to be better than {hand2}"
elif less_hands_same_class(class1, hand1, hand2):
assert score1 > score2, f"Expected {hand2} to be better than {hand1}"
elif less_hands_same_class(class1, hand2, hand1):
assert score1 < score2, f"Expected {hand1} to be better than {hand2}"
else:
assert score1 == score2, f"Expected {hand1} and {hand2} to have the same score"


@pytest.mark.repeat(GETTER_CONVENIENCE_RUNS)
def test_get_rank_class():
"""
Tests if get_rank_class() returns the correct rank class
"""
for _ in range(GETTER_CONVENIENCE_RUNS):
class1 = random.choice(list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys()))
hand = generate_sample_hand(class1)
score = evaluator.evaluate([], hand)
assert evaluator.get_rank_class(score) == class1
class1 = random.choice(list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys()))
hand = generate_sample_hand(class1)
score = evaluator.evaluate([], hand)
assert evaluator.get_rank_class(score) == class1


@pytest.mark.repeat(GETTER_CONVENIENCE_RUNS)
def test_rank_to_string():
"""
Tests if rank_to_string() returns the correct string
"""
for _ in range(GETTER_CONVENIENCE_RUNS):
class1 = random.choice(list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys()))
hand = generate_sample_hand(class1)
score = evaluator.evaluate([], hand)
assert evaluator.rank_to_string(score) \
== evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING[class1]
class1 = random.choice(list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys()))
hand = generate_sample_hand(class1)
score = evaluator.evaluate([], hand)
assert evaluator.rank_to_string(score) \
== evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING[class1]


@pytest.mark.repeat(GETTER_CONVENIENCE_RUNS)
def test_five_card_percentage():
"""
Tests if get_five_card_rank_percentage() returns the correct percentage
"""
for _ in range(GETTER_CONVENIENCE_RUNS):
class1 = random.choice(list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys()))
hand = generate_sample_hand(class1)
score = evaluator.evaluate([], hand)
assert math.isclose(evaluator.get_five_card_rank_percentage(score),
1 - float(score) / float(MAX_HAND_RANK))
class1 = random.choice(list(evaluator.LOOKUP_TABLE.RANK_CLASS_TO_STRING.keys()))
hand = generate_sample_hand(class1)
score = evaluator.evaluate([], hand)
assert math.isclose(evaluator.get_five_card_rank_percentage(score),
1 - float(score) / float(MAX_HAND_RANK))
Loading

0 comments on commit 97e05f3

Please sign in to comment.