diff --git a/chasten/constants.py b/chasten/constants.py index 600f42d0..5eca7352 100644 --- a/chasten/constants.py +++ b/chasten/constants.py @@ -220,6 +220,7 @@ class Markers: Xml: str Zero: int Zero_Exit: int + Percent_Multiplier: int markers = Markers( @@ -246,6 +247,7 @@ class Markers: Xml="xml", Zero=0, Zero_Exit=0, + Percent_Multiplier=100, ) diff --git a/chasten/main.py b/chasten/main.py index 109e8e54..839a78a2 100644 --- a/chasten/main.py +++ b/chasten/main.py @@ -726,7 +726,7 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915 # add the amount of total matches in each check to the end of each checks output output.console.print(f" = {len(match_generator_list)} total matches\n") # calculate the final count of matches found - total_result = util.total_amount_passed(chasten_results_save, len(check_list)) + total_result = util.total_amount_passed(check_status_list) # display checks passed, total amount of checks, and percentage of checks passed output.console.print( f":computer: {total_result[0]} / {total_result[1]} checks passed ({total_result[2]}%)\n" @@ -747,7 +747,7 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915 elapsed_time = end_time - start_time if not all_checks_passed: - output.console.print("\n:sweat: At least one check did not pass.") + output.console.print(":sweat: At least one check did not pass.") if store_result: # writes results of analyze into a markdown file analysis_file_dir.write_text(analysis_result, encoding="utf-8") diff --git a/chasten/util.py b/chasten/util.py index 70e94d64..d763e5f5 100644 --- a/chasten/util.py +++ b/chasten/util.py @@ -68,15 +68,20 @@ def join_and_preserve(data, start, end): return constants.markers.Newline.join(data[start:end]) -def total_amount_passed(analyze_result, count_total) -> tuple[int, int, float]: +def total_amount_passed(check_status_list: list[bool]) -> tuple[int, int, float]: """Calculate amount of checks passed in analyze""" + # attempt calculations for percentage of checks passed try: - # iterate through check sources to find checks passed - list_passed = [x.check.passed for x in analyze_result.sources] - # set variables to count true checks and total counts - count_true = list_passed.count(True) + # calculate total amount of checks in list + count_total = len(check_status_list) + # count total amount of checks counted as true + count_passed = check_status_list.count(True) # return tuple of checks passed, total checks, percentage of checks passed - return (count_true, count_total, (count_true / count_total) * 100) - # return exception when dividing by zero + return ( + count_passed, + count_total, + (count_passed / count_total) * constants.markers.Percent_Multiplier, + ) + # return exception of zeros when dividing by zero except ZeroDivisionError: return (0, 0, 0.0) diff --git a/tests/test_util.py b/tests/test_util.py index 86e75d62..5b263bdd 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -3,7 +3,8 @@ import shutil import pytest -from hypothesis import given, strategies +from hypothesis import given +from hypothesis import strategies as st from chasten import constants, util @@ -14,14 +15,14 @@ def test_human_readable_boolean() -> None: assert util.get_human_readable_boolean(answer=False) == "No" -@given(answer=strategies.booleans()) +@given(answer=st.booleans()) @pytest.mark.fuzz def test_fuzz_human_readable_boolean(answer: bool) -> None: """Use Hypothesis to confirm that the function does not crash.""" util.get_human_readable_boolean(answer=answer) -@given(answer=strategies.booleans()) +@given(answer=st.booleans()) @pytest.mark.fuzz def test_fuzz_human_readable_boolean_correct_string(answer: bool) -> None: """Use Hypothesis to confirm that the conversion to human-readable works.""" @@ -32,6 +33,15 @@ def test_fuzz_human_readable_boolean_correct_string(answer: bool) -> None: assert str_answer == "No" +@given(check_status_list=st.lists(st.booleans())) +@pytest.mark.fuzz +def test_total_amount_passed(check_status_list: list[bool]): + stats = util.total_amount_passed(check_status_list) + + assert constants.markers.Zero <= stats[2] <= constants.markers.Percent_Multiplier + assert stats[0] <= stats[1] + + OpSystem = util.get_OS() datasette_exec = constants.datasette.Datasette_Executable