From 216c675c0e74a87d0ecfe7d6deb3f12075bf19e0 Mon Sep 17 00:00:00 2001 From: Chezka Quinola Date: Thu, 24 Oct 2024 15:27:54 -0400 Subject: [PATCH 1/7] add: added --check-include --checl-exclude and --show-failures flag --- gatorgrade/main.py | 50 +++++++++++++++++++-- gatorgrade/output/output.py | 88 ++++++++++++++++++++++++++++--------- 2 files changed, 114 insertions(+), 24 deletions(-) diff --git a/gatorgrade/main.py b/gatorgrade/main.py index 42bbbd81..e7a90317 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -45,24 +45,68 @@ def gatorgrade( 3. the name of the file or environment variable\ 4. use 'env md GITHUB_STEP_SUMMARY' to create GitHub job summary in GitHub Action", ), + output_limit: int = typer.Option( + None, + "--output-limit", + "-l", + help="The maximum number of lines to store in an environment variable. Example: '--output-limit 1000'", + ), + check_include: str = typer.Option( + None, + "--check-include", + "-i", + help="Description of the checks to include. Example: '--check-include \"Complete all TODOs\"'", + ), + check_exclude: str = typer.Option( + None, + "--check-exclude", + "-e", + help="Description of the checks to exclude. Example: '--check-exclude \"Complete all TODOs\"'", + ), + check_status: str = typer.Option( + None, + "--check-status", + "-s", + help="Filter checks by their status (pass or fail). Example: '--check-status pass'", + ), + show_failures: bool = typer.Option( + False, + "--show-failures", + "-f", + help="Only show the failed checks.", + ), ): """Run the GatorGrader checks in the specified gatorgrade.yml file.""" # if ctx.subcommand is None then this means # that, by default, gatorgrade should run in checking mode if ctx.invoked_subcommand is None: # parse the provided configuration file - checks = parse_config(filename) + (checks, match) = parse_config(filename, check_include, check_exclude) # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: - checks_status = run_checks(checks, report) + checks_status = run_checks( + checks, report, output_limit, check_status, show_failures + ) # no checks were created and this means # that, most likely, the file was not # valid and thus the tool cannot run checks else: checks_status = False console.print() - console.print(f"The file {filename} either does not exist or is not valid.") + if match is False: + if check_include: + console.print( + f"The check {check_include} does not exist in the file {filename}." + ) + if check_exclude: + console.print( + f"The check {check_exclude} does not exist in the file {filename}." + ) + else: + console.print( + f"The file {filename} either does not exist or is not valid." + ) console.print("Exiting now!") console.print() # at least one of the checks did not pass or diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 8fc94c5b..1da289f5 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -285,7 +285,13 @@ def write_json_or_md_file(file_name, content_type, content): def run_checks( - checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str] + checks: List[Union[ShellCheck, GatorGraderCheck]], + report: Tuple[str, str, str], + output_limit: int = None, + check_status: str = None, + show_failures: bool = False, # Added this parameter + check_include: str = None, # Added this parameter + check_exclude: str = None, # Added this parameter ) -> bool: """Run shell and GatorGrader checks and display whether each has passed or failed. @@ -330,29 +336,69 @@ def run_checks( # there were results from running checks # and thus they must be displayed if result is not None: - result.print() - results.append(result) + if check_status: + if check_status == "pass" and result.passed: + result.print() + results.append(result) + elif check_status == "fail" and not result.passed: + result.print() + results.append(result) + else: + results.append(result) # determine if there are failures and then display them - failed_results = list(filter(lambda result: not result.passed, results)) # print failures list if there are failures to print # and print what ShellCheck command that Gatorgrade ran - if len(failed_results) > 0: - print("\n-~- FAILURES -~-\n") - for result in failed_results: - # main.console.print("This is a result") - # main.console.print(result) - result.print(show_diagnostic=True) - # this result is an instance of CheckResult - # that has a run_command field that is some - # value that is not the default of an empty - # string and thus it should be displayed; - # the idea is that displaying this run_command - # will give the person using Gatorgrade a way - # to quickly run the command that failed - if result.run_command != "": - rich.print( - f"[blue] → Run this command: [green]{result.run_command}\n" - ) + if show_failures: + failed_results = list(filter(lambda result: not result.passed, results)) + if len(failed_results) > 0: + print("\n-~- FAILURES -~-\n") + for result in failed_results: + # main.console.print("This is a result") + # main.console.print(result) + result.print(show_diagnostic=True) + # this result is an instance of CheckResult + # that has a run_command field that is some + # value that is not the default of an empty + # string and thus it should be displayed; + # the idea is that displaying this run_command + # will give the person using Gatorgrade a way + # to quickly run the command that failed + if result.run_command != "": + rich.print( + f"[blue] → Run this command: [green]{result.run_command}\n" + ) + else: + for result in results: # Print all results if show_failures is False + result.print() + # Check for included and excluded checks + if check_include or check_exclude: + filtered_results = results + + if check_include: + filtered_results = [ + r for r in results if check_include in r.description + ] + + if check_exclude: + filtered_results = [ + r for r in filtered_results if check_exclude not in r.description + ] + + if len(filtered_results) > 0: + print("\n-~- INCLUDED / EXCLUDED CHECKS -~-\n") + for result in filtered_results: + if not result.passed: + result.print( + show_diagnostic=True + ) # Show diagnostics for failing included/excluded checks + else: + result.print() # Print normally for passing checks + # Append results (from the other branch) + results.append(result) + + # Determine if there are failures and then display them + failed_results = list(filter(lambda result: not result.passed, results)) + # determine how many of the checks passed and then # compute the total percentage of checks passed passed_count = len(results) - len(failed_results) From 153f1613c3c8b4b03208fee3e6529008eacff207 Mon Sep 17 00:00:00 2001 From: Chezka Quinola Date: Wed, 30 Oct 2024 11:54:33 -0400 Subject: [PATCH 2/7] fix: show summary when --show-failures run --- gatorgrade/output/output.py | 90 ++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 1da289f5..989a80ef 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -284,6 +284,10 @@ def write_json_or_md_file(file_name, content_type, content): ) from e +from pathlib import Path +from typing import List, Union, Tuple + + def run_checks( checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str], @@ -295,8 +299,8 @@ def run_checks( ) -> bool: """Run shell and GatorGrader checks and display whether each has passed or failed. - Also, print a list of all failed checks with their diagnostics and a summary message that - shows the overall fraction of passed checks. + Also, print a list of all failed checks with their diagnostics and a summary message that + shows the overall fraction of passed checks. Args: checks: The list of shell and GatorGrader checks to run. @@ -345,6 +349,7 @@ def run_checks( results.append(result) else: results.append(result) + # determine if there are failures and then display them # print failures list if there are failures to print # and print what ShellCheck command that Gatorgrade ran @@ -368,57 +373,52 @@ def run_checks( f"[blue] → Run this command: [green]{result.run_command}\n" ) else: - for result in results: # Print all results if show_failures is False + print("No failures detected!") + for result in results: result.print() - # Check for included and excluded checks - if check_include or check_exclude: - filtered_results = results - - if check_include: - filtered_results = [ - r for r in results if check_include in r.description - ] - - if check_exclude: - filtered_results = [ - r for r in filtered_results if check_exclude not in r.description - ] - - if len(filtered_results) > 0: - print("\n-~- INCLUDED / EXCLUDED CHECKS -~-\n") - for result in filtered_results: - if not result.passed: - result.print( - show_diagnostic=True - ) # Show diagnostics for failing included/excluded checks - else: - result.print() # Print normally for passing checks - # Append results (from the other branch) - results.append(result) - - # Determine if there are failures and then display them - failed_results = list(filter(lambda result: not result.passed, results)) + else: + for result in results: # Print all results if show_failures is False + result.print() + + # Check for included and excluded checks + if check_include or check_exclude: + filtered_results = results + + if check_include: + filtered_results = [r for r in results if check_include in r.description] + + if check_exclude: + filtered_results = [ + r for r in filtered_results if check_exclude not in r.description + ] + + if len(filtered_results) > 0: + print("\n-~- INCLUDED / EXCLUDED CHECKS -~-\n") + for result in filtered_results: + if not result.passed: + result.print( + show_diagnostic=True + ) # Show diagnostics for failing included/excluded checks + else: + result.print() # Print normally for passing checks + # compute summary results and display them in the console # determine how many of the checks passed and then # compute the total percentage of checks passed - passed_count = len(results) - len(failed_results) - # prevent division by zero if no results - if len(results) == 0: - percent = 0 - else: - percent = round(passed_count / len(results) * 100) - # if the report is wanted, create output in line with their specifications - if all(report): - report_output_data = create_report_json(passed_count, results, percent) - configure_report(report, report_output_data) - # compute summary results and display them in the console - summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!" - summary_color = "green" if passed_count == len(results) else "bright white" + failed_count = len([result for result in results if not result.passed]) + passed_count = len(results) - failed_count + total = len(results) + percent = round((passed_count / total) * 100) if total > 0 else 0 + summary = ( + f"Passed {passed_count}/{total} ({percent}%) of checks for {Path.cwd().name}!" + ) + summary_color = "green" if passed_count == total else "bright white" print_with_border(summary, summary_color) + # determine whether or not the run was a success or not: # if all of the tests pass then the function returns True; # otherwise the function must return False - summary_status = True if passed_count == len(results) else False + summary_status = passed_count == total return summary_status From e5f5723cd1393e6a9cf956aef869061ad1f4233a Mon Sep 17 00:00:00 2001 From: Chezka Quinola Date: Wed, 30 Oct 2024 11:59:16 -0400 Subject: [PATCH 3/7] fix: changed the parser to only accept one param --- gatorgrade/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gatorgrade/main.py b/gatorgrade/main.py index e7a90317..dc627cba 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -81,7 +81,7 @@ def gatorgrade( # that, by default, gatorgrade should run in checking mode if ctx.invoked_subcommand is None: # parse the provided configuration file - (checks, match) = parse_config(filename, check_include, check_exclude) + (checks, match) = parse_config(filename, check_include=None, check_exclude=None) # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: From 98224819501cc9e8245d3ebc26457ea449b7bddb Mon Sep 17 00:00:00 2001 From: Chezka Quinola Date: Wed, 30 Oct 2024 12:01:10 -0400 Subject: [PATCH 4/7] fix(main.py): changed parse_config to handle fileame only --- gatorgrade/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gatorgrade/main.py b/gatorgrade/main.py index dc627cba..5349cdc7 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -81,7 +81,7 @@ def gatorgrade( # that, by default, gatorgrade should run in checking mode if ctx.invoked_subcommand is None: # parse the provided configuration file - (checks, match) = parse_config(filename, check_include=None, check_exclude=None) + (checks, match) = parse_config(filename) # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: From 79a18ece892e781de8f242ae140b0e0fe1917396 Mon Sep 17 00:00:00 2001 From: Chezka Quinola Date: Wed, 30 Oct 2024 12:05:53 -0400 Subject: [PATCH 5/7] fix: revert parse_config --- gatorgrade/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gatorgrade/main.py b/gatorgrade/main.py index 5349cdc7..e7a90317 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -81,7 +81,7 @@ def gatorgrade( # that, by default, gatorgrade should run in checking mode if ctx.invoked_subcommand is None: # parse the provided configuration file - (checks, match) = parse_config(filename) + (checks, match) = parse_config(filename, check_include, check_exclude) # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: From 1a9f018511a6815383a13bebb85cbc778a7e3d9d Mon Sep 17 00:00:00 2001 From: Chezka Quinola Date: Thu, 31 Oct 2024 13:34:02 -0400 Subject: [PATCH 6/7] fix: trying to fix parsing config --- .DS_Store | Bin 0 -> 6148 bytes gatorgrade/main.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..257cafc0b8b2590b0a6f6cd8414af225b7d38fc3 GIT binary patch literal 6148 zcmeHKy-ve05I)l&sxnXk17n^5Q6XkR6(mNaz5w(`ZKak}{n_&d@Cb+n#A6@?FTldY z$Os!N-`Q?!H!Um(A#^9%pX0mpefgrqF%hZqs9qu}5>XzFF({y$W9;Xau_ZgR2~@mB zr&+4hl5Q^(DHEcADDW2*;CDBmZE8}RhTi+T_A+#R-6W2yourDne02BGKWaZ5`2PBr z{$9)ZHmtuRl~%@esYN5Idev!THyhz{*t@uXYBw~8nRU9!l*!^Kj}Jx6C!jhdRHK2< zBzN!nTn?|d6AqJHxB5K4Uw4P=7NGtq4N<#~Q6J+vHGB@2=ZoT8K39(uK6BS}xqTPw z@l=}UGB`G9fPO}yvp2AFye_Bb{hRS~@*<|sXnurW|Cn_W`&k9BXS3y34N5NxhytR( zL;?OjglLSB#nhnwbfD8m0APS=Z5S7M01Pcg7E^=pz?4Y^np9#nhll zC#5T69a~x16N=K+F(2x1QjtOFMFCO3S0HCDi@g6IE 0: From 27130535a938857810cff761b8ad85bd517f043e Mon Sep 17 00:00:00 2001 From: Chezka Quinola Date: Thu, 31 Oct 2024 13:41:22 -0400 Subject: [PATCH 7/7] fix: gave one param to parse config --- gatorgrade/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gatorgrade/main.py b/gatorgrade/main.py index 9eafd45d..38d542a7 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -81,7 +81,7 @@ def gatorgrade( # that, by default, gatorgrade should run in checking mode if ctx.invoked_subcommand is None: # parse the provided configuration file - checks, match = parse_config(filename, check_include, check_exclude) + checks, match = parse_config(filename) # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: