diff --git a/gatorgrade/input/command_line_generator.py b/gatorgrade/input/command_line_generator.py index 364fbe78..615ee5e6 100644 --- a/gatorgrade/input/command_line_generator.py +++ b/gatorgrade/input/command_line_generator.py @@ -11,7 +11,7 @@ # pylint: disable=too-many-nested-blocks def generate_checks( - check_data_list: List[CheckData], + check_data_list: List[CheckData], deadline ) -> List[Union[ShellCheck, GatorGraderCheck]]: """Generate a list of checks based on check data from the configuration file. @@ -64,4 +64,4 @@ def generate_checks( gg_args.extend(["--directory", dirname, "--file", filename]) checks.append(GatorGraderCheck(gg_args=gg_args, json_info=check_data.check)) - return checks + return checks, deadline diff --git a/gatorgrade/input/in_file_path.py b/gatorgrade/input/in_file_path.py index a9cd048d..14729557 100644 --- a/gatorgrade/input/in_file_path.py +++ b/gatorgrade/input/in_file_path.py @@ -1,5 +1,6 @@ """Generates a list of commands to be run through gatorgrader.""" +import datetime from collections import namedtuple from pathlib import Path from typing import Any @@ -37,13 +38,23 @@ def parse_yaml_file(file_path: Path) -> List[Any]: def reformat_yaml_data(data): - """Reformat the raw data from a YAML file into a list of tuples.""" + """Reformat the raw data from a YAML file into a list of tuples. + Args: + data: the raw data from the YAMl file. + """ reformatted_data = [] - if len(data) == 2: - setup_commands = data.pop(0) # Removes the setup commands - run_setup(setup_commands) - add_checks_to_list(None, data[0], reformatted_data) - return reformatted_data + deadline = None + data_values = len(data) + + if data_values >= 2: + for i in range(data_values - 1): + if "setup" in data[i]: + setup_commands = data[i] + run_setup(setup_commands) + elif "deadline" in data[i]: + deadline = data[i]["deadline"] + add_checks_to_list(None, data[-1], reformatted_data) + return reformatted_data, deadline def add_checks_to_list(path, data_list, reformatted_data): diff --git a/gatorgrade/input/parse_config.py b/gatorgrade/input/parse_config.py index d6ac12c7..cfb99cc5 100644 --- a/gatorgrade/input/parse_config.py +++ b/gatorgrade/input/parse_config.py @@ -24,8 +24,9 @@ def parse_config(file: Path): # use it to generate all of the checks; # these will be valid checks that are now # ready for execution with this tool - parse_con = generate_checks(reformat_yaml_data(parsed_yaml_file)) - return parse_con + parsed_yaml_file, deadline = reformat_yaml_data(parsed_yaml_file) + parse_con, deadline = generate_checks(parsed_yaml_file, deadline) + return parse_con, deadline # return an empty list because of the fact that the # parsing process did not return a list with content; # allow the calling function to handle the empty list diff --git a/gatorgrade/main.py b/gatorgrade/main.py index 42bbbd81..9d0fd9fb 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -51,11 +51,11 @@ def gatorgrade( # that, by default, gatorgrade should run in checking mode if ctx.invoked_subcommand is None: # parse the provided configuration file - checks = parse_config(filename) + checks, deadline = parse_config(filename) # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: - checks_status = run_checks(checks, report) + checks_status = run_checks(checks, report, deadline) # no checks were created and this means # that, most likely, the file was not # valid and thus the tool cannot run checks diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 8fc94c5b..900daf66 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -1,9 +1,9 @@ """Run checks and display whether each has passed or failed.""" -import datetime import json import os import subprocess +from datetime import datetime from pathlib import Path from typing import List from typing import Tuple @@ -66,16 +66,6 @@ def _run_gg_check(check: GatorGraderCheck) -> CheckResult: passed = result[1] description = result[0] diagnostic = result[2] - # Fetch the path from gatorgrade arguments - # the path pattern are 4 consistent string in the list - # --dir `dir_name` --file `file_name` - file_path = None - for i in range(len(check.gg_args)): - if check.gg_args[i] == "--directory": - dir_name = check.gg_args[i + 1] - file_name = check.gg_args[i + 3] - file_path = dir_name + "/" + file_name - break # If arguments are formatted incorrectly, catch the exception and # return it as the diagnostic message # Disable pylint to catch any type of exception thrown by GatorGrader @@ -83,53 +73,55 @@ def _run_gg_check(check: GatorGraderCheck) -> CheckResult: passed = False description = f'Invalid GatorGrader check: "{" ".join(check.gg_args)}"' diagnostic = f'"{command_exception.__class__}" thrown by GatorGrader' - file_path = None return CheckResult( passed=passed, description=description, json_info=check.json_info, diagnostic=diagnostic, - path=file_path, ) def create_report_json( - passed_count, + passed_count: int, checkResults: List[CheckResult], - percent_passed, + percent_passed: int, + deadline_info: str, ) -> dict: """Take checks and put them into json format in a dictionary. Args: passed_count: the number of checks that passed - check_information: the basic information about checks and their params checkResults: the list of check results that will be put in json percent_passed: the percentage of checks that passed + deadline_info: the time until/since the given deadline, if included """ - # create list to hold the key values for the dictionary that - # will be converted into json - overall_key_list = ["amount_correct", "percentage_score", "report_time", "checks"] + # create list to hold the key values for the dictionary + # that will be converted into json checks_list = [] overall_dict = {} - report_generation_time = datetime.datetime.now() - formatted_time = report_generation_time.strftime("%Y-%m-%d %H:%M:%S") + # for each check: for i in range(len(checkResults)): # grab all of the information in it and add it to the checks list results_json = checkResults[i].json_info results_json["status"] = checkResults[i].passed - if checkResults[i].path: - results_json["path"] = checkResults[i].path if not checkResults[i].passed: results_json["diagnostic"] = checkResults[i].diagnostic checks_list.append(results_json) + + # create list to hold the key values for the dictionary that + # will be converted into json + # if there isn't a deadline + if deadline_info == "N/A": + overall_key_list = ["amount_correct", "percentage_score", "checks"] + overall_value_list = [passed_count, percent_passed, checks_list] + # if there is a deadline, include it in the key and value lists + else: + overall_key_list = ["amount_correct", "percentage_score", "deadline", "checks"] + overall_value_list = [passed_count, percent_passed, deadline_info, checks_list] + # create the dictionary for all of the check information - overall_dict = dict( - zip( - overall_key_list, - [passed_count, percent_passed, formatted_time, checks_list], - ) - ) + overall_dict = dict(zip(overall_key_list, overall_value_list)) return overall_dict @@ -144,7 +136,16 @@ def create_markdown_report_file(json: dict) -> str: failing_checks = [] num_checks = len(json.get("checks")) # type: ignore # write the total, amt correct and percentage score to md file - markdown_contents += f"# Gatorgrade Insights\n\n**Project Name:** {Path.cwd().name}\n**Amount Correct:** {(json.get('amount_correct'))}/{num_checks} ({(json.get('percentage_score'))}%)\n" + markdown_contents += f"""# Gatorgrade Insights\n\n**Project Name:** + {Path.cwd().name}\n**Amount Correct:** + {(json.get('amount_correct'))}/{num_checks} + ({(json.get('percentage_score'))}%)""" + # if there is a deadline, include it + if "deadline" in json: + markdown_contents += f"\n**Deadline:** {json.get('deadline')}\n" + # else, add newline to prepare for checks + else: + markdown_contents += "\n" # split checks into passing and not passing for check in json.get("checks"): # type: ignore # if the check is passing @@ -198,6 +199,22 @@ def create_markdown_report_file(json: dict) -> str: return markdown_contents +def calculate_deadline_time_dif(older_time: datetime, latest_time: datetime): + """ + Input two times and return the difference of the two in days, hours, minutes, and seconds. + + Args: + older_time: The larger datetime object + latest_time: The smaller datetime object + """ + time_difference = older_time - latest_time + days = time_difference.days + hours, remainder = divmod(time_difference.seconds, 3600) + minutes, seconds = divmod(remainder, 60) + + return days, hours, minutes, seconds + + def configure_report( report_params: Tuple[str, str, str], report_output_data_json: dict ): @@ -285,7 +302,9 @@ def write_json_or_md_file(file_name, content_type, content): def run_checks( - checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str] + checks: List[Union[ShellCheck, GatorGraderCheck]], + report: Tuple[str, str, str], + deadline, ) -> bool: """Run shell and GatorGrader checks and display whether each has passed or failed. @@ -365,6 +384,33 @@ def run_checks( if all(report): report_output_data = create_report_json(passed_count, results, percent) configure_report(report, report_output_data) + # if a deadline is included: + deadline_difference = "N/A" + if deadline is not None: + # turn the string into a datetime variable + deadline = datetime.strptime(deadline[:-1], "%m/%d/%y %H:%M:%S") + # if the deadline has passed, print out late + now = datetime.now() + if now > deadline: + days, hours, minutes, seconds = calculate_deadline_time_dif(now, deadline) + deadline_difference = f"Late by {abs(days)} days, {hours} hours, {minutes} minutes, and {seconds} seconds." + print( + f"\n-~- Your assignment is late. The deadline was {abs(days)} days, {hours} hours, {minutes} minutes, and {seconds} seconds ago. -~-" + ) + # else, print out the remaining time until the assignment is due + else: + days, hours, minutes, seconds = calculate_deadline_time_dif(deadline, now) + deadline_difference = f"Due in {days * -1} days, {hours} hours, {minutes} minutes, and {seconds} seconds." + print( + f"\n-~- Your assignment is due in {days * -1} days, {hours} hours, {minutes} minutes, and {seconds} seconds. -~-" + ) + + # if the report is wanted, create output in line with their specifications + if all(report): + report_output_data = create_report_json( + passed_count, results, percent, deadline_difference + ) + configure_report(report, report_output_data) # compute summary results and display them in the console summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!" summary_color = "green" if passed_count == len(results) else "bright white" diff --git a/pyproject.toml b/pyproject.toml index 36a44540..bbb0ffff 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gatorgrade" -version = "0.7.0" +version = "0.8.0" description = "GatorGrade executes GatorGrader checks!" authors = ["Michael Abraham", "Jacob Allebach", "Liam Black", "Katherine Burgess", "Yanqiao Chen", "Ochirsaikhan Davaajambal", "Tuguldurnemekh Gantulga", "Anthony Grant-Cook", "Dylan Holland", "Gregory M. Kapfhammer", "Peyton Kelly", "Luke Lacaria", "Lauren Nevill", "Rebekah Rudd", "Jack Turner", "Daniel Ullrich", "Garrison Vanzin", "Rian Watson"] readme = "README.md" diff --git a/tests/input/test_input_gg_checks.py b/tests/input/test_input_gg_checks.py index 6633fb8a..93f0ee84 100644 --- a/tests/input/test_input_gg_checks.py +++ b/tests/input/test_input_gg_checks.py @@ -12,7 +12,7 @@ def test_parse_config_gg_check_in_file_context_contains_file(): # Given a configuration file with a GatorGrader check within a file context config = Path("tests/input/yml_test_files/gatorgrade_one_gg_check_in_file.yml") # When parse_config is run - output = parse_config(config) + output, deadline = parse_config(config) # Then the file path should be in the GatorGrader arguments assert "file.py" in output[0].gg_args @@ -22,7 +22,7 @@ def test_parse_config_check_gg_matchfilefragment(): # Given a configuration file with a GatorGrader check config = Path("tests/input/yml_test_files/gatorgrade_matchfilefragment.yml") # When parse_config is run - output = parse_config(config) + output, deadline = parse_config(config) # Then the description, check name, and options appear in the GatorGrader arguments assert output[0].gg_args == [ "--description", @@ -43,11 +43,11 @@ def test_parse_config_check_gg_matchfilefragment(): def test_parse_config_gg_check_no_file_context_contains_no_file(): """Test to make sure checks without a file context do not have a file path in GatorGrader arguments.""" # Given a configuration file with a GatorGrader check without a file context - config = Path( + config, deadline = Path( "tests/input/yml_test_files/gatorgrade_one_gg_check_no_file_context.yml" ) # When parse_config is run - output = parse_config(config) + output, deadline = parse_config(config) # Then the GatorGrader arguments do not contain a file path assert output[0].gg_args == [ "--description", @@ -63,7 +63,7 @@ def test_parse_config_parses_both_shell_and_gg_checks(): # Given a configuration file that contains a shell check and GatorGrader check config = Path("tests/input/yml_test_files/gatorgrader_both_checks.yml") # When parse_config is run - output = parse_config(config) + output, deadline = parse_config(config) # Then the output should contain a shell check and GatorGrader check assert isinstance(output[0], GatorGraderCheck) assert isinstance(output[1], ShellCheck) @@ -74,7 +74,7 @@ def test_parse_config_yml_file_runs_setup_shell_checks(): # Given a configuration file without setup commands config = Path("tests/input/yml_test_files/gatorgrade_no_shell_setup_check.yml") # When parse_config run - output = parse_config(config) + output, deadline = parse_config(config) # Then the output should contain the GatorGrader check assert output[0].gg_args == [ "--description", @@ -90,6 +90,6 @@ def test_parse_config_shell_check_contains_command(): # Given a configuration file with a shell check config = Path("tests/input/yml_test_files/gatorgrade_one_shell_command_check.yml") # When the parse_config is run - output = parse_config(config) + output, deadline = parse_config(config) # Then the command should be stored in the shell check assert output[0].command == "mdl ." diff --git a/tests/output/test_output.py b/tests/output/test_output.py index a5e9ca7c..5bb1c16b 100644 --- a/tests/output/test_output.py +++ b/tests/output/test_output.py @@ -22,6 +22,64 @@ def now(cls): monkeypatch.setattr(datetime, "datetime", mydatetime) +def test_past_date_displays_late_message(capsys): + """Test that run_checks runs a GatorGrader check and prints that the check has passed.""" + # Given a GatorGrader check that should pass + check = GatorGraderCheck( + gg_args=[ + "--description", + "Check TODOs", + "MatchFileFragment", + "--fragment", + "TODO", + "--count", + "0", + "--exact", + "--directory", + "tests/test_assignment/src", + "--file", + "hello-world.py", + ], + json_info="test", + ) + report = (None, None, None) + deadline = "01/01/23 12:00:00\n" + # When run_checks is called + output.run_checks([check], report, deadline) + # Then the output shows that the check has passed + out, _ = capsys.readouterr() + assert "Your assignment is late. The deadline was" in out + + +def test_future_date_shows_upcoming_deadline(capsys): + """Test that run_checks runs a GatorGrader check and prints that the check has passed.""" + # Given a GatorGrader check that should pass + check = GatorGraderCheck( + gg_args=[ + "--description", + "Check TODOs", + "MatchFileFragment", + "--fragment", + "TODO", + "--count", + "0", + "--exact", + "--directory", + "tests/test_assignment/src", + "--file", + "hello-world.py", + ], + json_info="test", + ) + # When run_checks is called + report = (None, None, None) + deadline = "01/01/50 12:00:00\n" + output.run_checks([check], report, deadline) + # Then the output shows that the check has passed + out, _ = capsys.readouterr() + assert "Your assignment is due in" in out + + def test_run_checks_invalid_gg_args_prints_exception(capsys): """Test that run_checks prints an exception when given an invalid GatorGrader argument.""" # Given a GatorGrader check with invalid arguments @@ -40,7 +98,8 @@ def test_run_checks_invalid_gg_args_prints_exception(capsys): ) report = (None, None, None) # When run_checks is called - output.run_checks([check], report) # type: ignore + deadline = None + output.run_checks([check], report, None, deadline) # type: ignore # Then the output contains a declaration # about the use of an Invalid GatorGrader check out, _ = capsys.readouterr() @@ -90,7 +149,8 @@ def test_run_checks_some_failed_prints_correct_summary(capsys): ] report = (None, None, None) # When run_checks is called - output.run_checks(checks, report) # type: ignore + deadline = None + output.run_checks(checks, report, None, deadline) # type: ignore # the output shows the correct fraction # and percentage of passed checks out, _ = capsys.readouterr() @@ -138,7 +198,8 @@ def test_run_checks_all_passed_prints_correct_summary(capsys): ] report = (None, None, None) # When run_checks is called - output.run_checks(checks, report) # type: ignore + deadline = None + output.run_checks(checks, report, None, deadline) # type: ignore # Then the output shows the correct fraction and percentage of passed checks out, _ = capsys.readouterr() assert "Passed 3/3 (100%) of checks" in out @@ -207,7 +268,7 @@ def test_md_report_file_created_correctly(): ] # run them with the wanted report config report = ("file", "md", "insights.md") - output.run_checks(checks, report) + output.run_checks(checks, report, None) # check to make sure the created file matches the expected output expected_file_contents = """# Gatorgrade Insights\n\n**Project Name:** gatorgrade\n**Amount Correct:** 1/3 (33%)\n\n## Passing Checks""" @@ -278,7 +339,7 @@ def test_print_error_with_invalid_report_path(): ] report = ("file", "md", "invalid_path/insight.md") with pytest.raises(ValueError): - output.run_checks(checks, report) + output.run_checks(checks, report, None) def test_throw_errors_if_report_type_not_md_nor_json(): @@ -339,7 +400,7 @@ def test_throw_errors_if_report_type_not_md_nor_json(): ] report = ("file", "not_md_nor_json", "invalid_path") with pytest.raises(ValueError): - output.run_checks(checks, report) + output.run_checks(checks, report, None) def test_write_md_and_json_correctly(tmp_path): @@ -348,3 +409,5 @@ def test_write_md_and_json_correctly(tmp_path): tmp_json = tmp_path / "test.json" assert output.write_json_or_md_file(tmp_md, "md", "hello-world") assert output.write_json_or_md_file(tmp_json, "json", "hello-world") + +