Skip to content

Commit

Permalink
Merge pull request AstuteSource#116 from AstuteSource/test-total-amount
Browse files Browse the repository at this point in the history
Added Test to `Total_amount_passed` function in `util.py`
  • Loading branch information
laurennevill authored Nov 17, 2023
2 parents 8ec04f6 + 56a5666 commit 0247b00
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 12 deletions.
2 changes: 2 additions & 0 deletions chasten/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,7 @@ class Markers:
Xml: str
Zero: int
Zero_Exit: int
Percent_Multiplier: int


markers = Markers(
Expand All @@ -246,6 +247,7 @@ class Markers:
Xml="xml",
Zero=0,
Zero_Exit=0,
Percent_Multiplier=100,
)


Expand Down
4 changes: 2 additions & 2 deletions chasten/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -726,7 +726,7 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
# add the amount of total matches in each check to the end of each checks output
output.console.print(f" = {len(match_generator_list)} total matches\n")
# calculate the final count of matches found
total_result = util.total_amount_passed(chasten_results_save, len(check_list))
total_result = util.total_amount_passed(check_status_list)
# display checks passed, total amount of checks, and percentage of checks passed
output.console.print(
f":computer: {total_result[0]} / {total_result[1]} checks passed ({total_result[2]}%)\n"
Expand All @@ -747,7 +747,7 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
elapsed_time = end_time - start_time

if not all_checks_passed:
output.console.print("\n:sweat: At least one check did not pass.")
output.console.print(":sweat: At least one check did not pass.")
if store_result:
# writes results of analyze into a markdown file
analysis_file_dir.write_text(analysis_result, encoding="utf-8")
Expand Down
19 changes: 12 additions & 7 deletions chasten/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,15 +68,20 @@ def join_and_preserve(data, start, end):
return constants.markers.Newline.join(data[start:end])


def total_amount_passed(analyze_result, count_total) -> tuple[int, int, float]:
def total_amount_passed(check_status_list: list[bool]) -> tuple[int, int, float]:
"""Calculate amount of checks passed in analyze"""
# attempt calculations for percentage of checks passed
try:
# iterate through check sources to find checks passed
list_passed = [x.check.passed for x in analyze_result.sources]
# set variables to count true checks and total counts
count_true = list_passed.count(True)
# calculate total amount of checks in list
count_total = len(check_status_list)
# count total amount of checks counted as true
count_passed = check_status_list.count(True)
# return tuple of checks passed, total checks, percentage of checks passed
return (count_true, count_total, (count_true / count_total) * 100)
# return exception when dividing by zero
return (
count_passed,
count_total,
(count_passed / count_total) * constants.markers.Percent_Multiplier,
)
# return exception of zeros when dividing by zero
except ZeroDivisionError:
return (0, 0, 0.0)
16 changes: 13 additions & 3 deletions tests/test_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,8 @@
import shutil

import pytest
from hypothesis import given, strategies
from hypothesis import given
from hypothesis import strategies as st

from chasten import constants, util

Expand All @@ -14,14 +15,14 @@ def test_human_readable_boolean() -> None:
assert util.get_human_readable_boolean(answer=False) == "No"


@given(answer=strategies.booleans())
@given(answer=st.booleans())
@pytest.mark.fuzz
def test_fuzz_human_readable_boolean(answer: bool) -> None:
"""Use Hypothesis to confirm that the function does not crash."""
util.get_human_readable_boolean(answer=answer)


@given(answer=strategies.booleans())
@given(answer=st.booleans())
@pytest.mark.fuzz
def test_fuzz_human_readable_boolean_correct_string(answer: bool) -> None:
"""Use Hypothesis to confirm that the conversion to human-readable works."""
Expand All @@ -32,6 +33,15 @@ def test_fuzz_human_readable_boolean_correct_string(answer: bool) -> None:
assert str_answer == "No"


@given(check_status_list=st.lists(st.booleans()))
@pytest.mark.fuzz
def test_total_amount_passed(check_status_list: list[bool]):
stats = util.total_amount_passed(check_status_list)

assert constants.markers.Zero <= stats[2] <= constants.markers.Percent_Multiplier
assert stats[0] <= stats[1]


OpSystem = util.get_OS()
datasette_exec = constants.datasette.Datasette_Executable

Expand Down

0 comments on commit 0247b00

Please sign in to comment.