From b694aaed18398ba236ed4d10251e399afb5f6851 Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Wed, 6 Nov 2024 21:38:35 -0500 Subject: [PATCH 1/9] feat: added Motivator to gatorgrade --- gatorgrade/main.py | 11 +- gatorgrade/output/output.py | 246 ++++++++++++++++++++++++++---------- 2 files changed, 190 insertions(+), 67 deletions(-) diff --git a/gatorgrade/main.py b/gatorgrade/main.py index 42bbbd81..d6de48ee 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -45,6 +45,15 @@ def gatorgrade( 3. the name of the file or environment variable\ 4. use 'env md GITHUB_STEP_SUMMARY' to create GitHub job summary in GitHub Action", ), + run_status_bar: bool = typer.Option( + False, + "--run_status_bar", + help="Enable a progress bar for checks running/not running.", + ), + no_status_bar: bool = typer.Option( + False, "--no_status_bar", help="Disable the progress bar entirely." + ), + run_motivation: bool = typer.Option( False, "--motivation", help="Enable a motivational message" ), ): """Run the GatorGrader checks in the specified gatorgrade.yml file.""" # if ctx.subcommand is None then this means @@ -55,7 +64,7 @@ def gatorgrade( # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: - checks_status = run_checks(checks, report) + checks_status = run_checks(checks, report, run_status_bar, no_status_bar, run_motivation) # no checks were created and this means # that, most likely, the file was not # valid and thus the tool cannot run checks diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 420aa9e6..af9c8667 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -1,15 +1,22 @@ """Run checks and display whether each has passed or failed.""" + import datetime import json import os import subprocess from pathlib import Path -from typing import List +from typing import List, Dict from typing import Tuple from typing import Union import gator +import random import rich +from rich.progress import BarColumn +from rich.progress import Progress +from rich.progress import TextColumn +from rich.panel import Panel +import yaml from gatorgrade.input.checks import GatorGraderCheck from gatorgrade.input.checks import ShellCheck @@ -39,7 +46,6 @@ def _run_shell_check(check: ShellCheck) -> CheckResult: stderr=subprocess.STDOUT, ) passed = result.returncode == 0 - # Add spaces after each newline to indent all lines of diagnostic diagnostic = ( "" if passed else result.stdout.decode().strip().replace("\n", "\n ") @@ -66,7 +72,6 @@ def _run_gg_check(check: GatorGraderCheck) -> CheckResult: passed = result[1] description = result[0] diagnostic = result[2] - # Fetch the path from gatorgrade arguments # the path pattern are 4 consistent string in the list # --dir `dir_name` --file `file_name` @@ -110,10 +115,8 @@ def create_report_json( # create list to hold the key values for the dictionary that # will be converted into json overall_key_list = ["amount_correct", "percentage_score", "report_time", "checks"] - checks_list = [] overall_dict = {} - report_generation_time = datetime.datetime.now() formatted_time = report_generation_time.strftime("%Y-%m-%d %H:%M:%S") # for each check: @@ -126,7 +129,6 @@ def create_report_json( if not checkResults[i].passed: results_json["diagnostic"] = checkResults[i].diagnostic checks_list.append(results_json) - # create the dictionary for all of the check information overall_dict = dict( zip( @@ -146,21 +148,17 @@ def create_markdown_report_file(json: dict) -> str: markdown_contents = "" passing_checks = [] failing_checks = [] - - num_checks = len(json.get("checks")) - + num_checks = len(json.get("checks")) # type: ignore # write the total, amt correct and percentage score to md file markdown_contents += f"# Gatorgrade Insights\n\n**Project Name:** {Path.cwd().name}\n**Amount Correct:** {(json.get('amount_correct'))}/{num_checks} ({(json.get('percentage_score'))}%)\n" - # split checks into passing and not passing - for check in json.get("checks"): + for check in json.get("checks"): # type: ignore # if the check is passing - if check["status"] == True: + if check["status"]: passing_checks.append(check) # if the check is failing else: failing_checks.append(check) - # give short info about passing checks markdown_contents += "\n## Passing Checks\n" for check in passing_checks: @@ -168,7 +166,6 @@ def create_markdown_report_file(json: dict) -> str: markdown_contents += f"\n- [x] {check['description']}" else: markdown_contents += f"\n- [x] {check['check']}" - # give extended information about failing checks markdown_contents += "\n\n## Failing Checks\n" # for each failing check, print out all related information @@ -178,7 +175,6 @@ def create_markdown_report_file(json: dict) -> str: markdown_contents += f"\n- [ ] {check['description']}" else: markdown_contents += f"\n- [ ] {check['check']}" - if "options" in check: for i in check.get("options"): if "command" == i: @@ -205,7 +201,6 @@ def create_markdown_report_file(json: dict) -> str: if "diagnostic" in check: markdown_contents += f"\n\t- **diagnostic:** {check['diagnostic']}" markdown_contents += "\n" - return markdown_contents @@ -228,31 +223,50 @@ def configure_report( raise ValueError( "\n[red]The second argument of report has to be 'md' or 'json' " ) - # if the user wants markdown, get markdown content based on json if report_type == "md": report_output_data_md = create_markdown_report_file(report_output_data_json) - - # if the user wants the data stored in a file: + # if the user wants the data stored in a file if report_format == "file": if report_type == "md": - write_json_or_md_file(report_name, report_type, report_output_data_md) + write_json_or_md_file(report_name, report_type, report_output_data_md) # type: ignore else: write_json_or_md_file(report_name, report_type, report_output_data_json) - + # the user wants the data stored in an environment variable; do not attempt + # to save to the environment variable if it does not exist in the environment elif report_format == "env": if report_name == "GITHUB_STEP_SUMMARY": - env_file = os.getenv("GITHUB_STEP_SUMMARY") - if report_type == "md": - write_json_or_md_file(env_file, report_type, report_output_data_md) - else: - write_json_or_md_file(env_file, report_type, report_output_data_json) - - # Add json report into the GITHUB_ENV environment variable for data collection purpose - env_file = os.getenv("GITHUB_ENV") - with open(env_file, "a") as myfile: - myfile.write(f"JSON_REPORT={json.dumps(report_output_data_json)}") - # Add env + env_file = os.getenv("GITHUB_STEP_SUMMARY", None) + if env_file is not None: + if report_type == "md": + write_json_or_md_file(env_file, report_type, report_output_data_md) # type: ignore + else: + write_json_or_md_file( + env_file, report_type, report_output_data_json + ) + # Add json report into the GITHUB_ENV environment variable for data collection purpose; + # note that this is an undocumented side-effect of running gatorgrade with command-line + # arguments that save data to the GITHUB_STEP_SUMMARY environment variable. The current + # implementation of this approach should not cause the setting to fail when GatorGrade + # is run with the same command-line for which it is normally run in a GitHub Actions + # convert the data to a JSON string so that it can potentially be saved + json_string = json.dumps(report_output_data_json) + # check to see if the GITHUB_ENV environment variable is set + env_file = os.getenv("GITHUB_ENV", None) + # the environment variable is defined and thus it is acceptable + # to write a key-value pair to the GITHUB_ENV environment file + # (note that the comment on the previous line is correct; this + # environment variable is a pointer to a file that allows for + # key-value pairs in one step to be passed to the next step + # inside of GitHub Actions and it is done through a file) + if env_file is not None: + # if it is, append the JSON string to the GITHUB_ENV file; + # note that this step is specifically helpful when running + # GatorGrade inside of a GitHub Actions workflow because + # this variable called GITHUB_ENV is used to store environment + # variables that are available to all of the subsequent steps + with open(os.environ["GITHUB_ENV"], "a") as env_file: # type: ignore + env_file.write(f"JSON_REPORT={json_string}\n") # type: ignore else: raise ValueError( "\n[red]The first argument of report has to be 'env' or 'file' " @@ -264,7 +278,6 @@ def write_json_or_md_file(file_name, content_type, content): # try to store content in a file with user chosen format try: # Second argument has to be json or md - with open(file_name, "w", encoding="utf-8") as file: if content_type == "json": json.dump(content, file, indent=4) @@ -276,9 +289,27 @@ def write_json_or_md_file(file_name, content_type, content): "\n[red]Can't open or write the target file, check if you provide a valid path" ) from e +def load_quotes(file_path: str) -> Dict[str, str]: + """Loads the yml file and reads the quotes in the file""" + with open(file_path, "r", encoding="utf-8") as file: + data = yaml.safe_load(file) + return data['quotes'] + +def motivation(quotes_list, message_title="Motivational Message"): + """Display a single motivational message with a title.""" + quote = random.choice(quotes_list) # Pick one quote from the list + return f"{quote}" + +file_path = "quotes.yml" +quotes = load_quotes(file_path) +assert isinstance(quotes, dict) def run_checks( - checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str] + checks: List[Union[ShellCheck, GatorGraderCheck]], + report: Tuple[str, str, str], + running_mode=False, + no_status_bar=False, + run_motivation=False, ) -> bool: """Run shell and GatorGrader checks and display whether each has passed or failed. @@ -287,40 +318,122 @@ def run_checks( Args: checks: The list of shell and GatorGrader checks to run. + running_mode: Convert the Progress Bar to update based on checks ran/not ran. + no_status_bar: Option to completely disable all Progress Bar options. """ results = [] # run each of the checks - for check in checks: - result = None - # run a shell check; this means - # that it is going to run a command - # in the shell as a part of a check - # store the command that ran - command_output = None - - if isinstance(check, ShellCheck): - result = _run_shell_check(check) - command_output = check.command - # run a check that GatorGrader implements - elif isinstance(check, GatorGraderCheck): - result = _run_gg_check(check) - - # there were results from running checks - # and thus they must be displayed - if result is not None: - result.print() - results.append((result, command_output)) - + # check how many tests are being ran + total_checks = len(checks) + # run checks with no progress bar + if no_status_bar: + for check in checks: + result = None + command_ran = None + # run a shell check; this means + # that it is going to run a command + # in the shell as a part of a check; + # store the command that ran in the + # field called run_command that is + # inside of a CheckResult object but + # not initialized in the constructor + if isinstance(check, ShellCheck): + result = _run_shell_check(check) + command_ran = check.command + result.run_command = command_ran + # run a check that GatorGrader implements + elif isinstance(check, GatorGraderCheck): + result = _run_gg_check(check) + # check to see if there was a command in the + # GatorGraderCheck. This code finds the index of the + # word "--command" in the check.gg_args list if it + # is available (it is not available for all of + # the various types of GatorGraderCheck instances), + # and then it adds 1 to that index to get the actual + # command run and then stores that command in the + # result.run_command field that is initialized to + # an empty string in the constructor for CheckResult + if "--command" in check.gg_args: + index_of_command = check.gg_args.index("--command") + index_of_new_command = int(index_of_command) + 1 + result.run_command = check.gg_args[index_of_new_command] + # there were results from running checks + # and thus they must be displayed + if result is not None: + result.print() + results.append(result) + else: + with Progress( + TextColumn("[progress.description]{task.description}"), + BarColumn( + bar_width=40, + style="red", + complete_style="green", + finished_style="green", + ), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + ) as progress: + # add a progress task for tracking + task = progress.add_task("[green]Running checks...", total=total_checks) + + # run each of the checks + for check in checks: + result = None + command_ran = None + + if isinstance(check, ShellCheck): + result = _run_shell_check(check) + command_ran = check.command + result.run_command = command_ran + # run a check that GatorGrader implements + elif isinstance(check, GatorGraderCheck): + result = _run_gg_check(check) + # check to see if there was a command in the + # GatorGraderCheck. This code finds the index of the + # word "--command" in the check.gg_args list if it + # is available (it is not available for all of + # the various types of GatorGraderCheck instances), + # and then it adds 1 to that index to get the actual + # command run and then stores that command in the + # result.run_command field that is initialized to + # an empty string in the constructor for CheckResult + if "--command" in check.gg_args: + index_of_command = check.gg_args.index("--command") + index_of_new_command = int(index_of_command) + 1 + result.run_command = check.gg_args[index_of_new_command] + # there were results from running checks + # and thus they must be displayed + if result is not None: + result.print() + results.append(result) + + # update progress based on running_mode + if running_mode: + progress.update(task, advance=1) + else: + if result and result.passed: + progress.update(task, advance=1) # determine if there are failures and then display them - failed_results = list(filter(lambda result: not result[0].passed, results)) - # print failures list if there are failures to print + failed_results = list(filter(lambda result: not result.passed, results)) + # print failures list if there are failures to print # and print what ShellCheck command that Gatorgrade ran if len(failed_results) > 0: print("\n-~- FAILURES -~-\n") for result in failed_results: - result[0].print(show_diagnostic=True) - if result[1] is not None: - rich.print(f"[blue] → Command that failed: [green]{result[1]}") + # main.console.print("This is a result") + # main.console.print(result) + result.print(show_diagnostic=True) + # this result is an instance of CheckResult + # that has a run_command field that is some + # value that is not the default of an empty + # string and thus it should be displayed; + # the idea is that displaying this run_command + # will give the person using Gatorgrade a way + # to quickly run the command that failed + if result.run_command != "": + rich.print( + f"[blue] → Run this command: [green]{result.run_command}\n" + ) # determine how many of the checks passed and then # compute the total percentage of checks passed passed_count = len(results) - len(failed_results) @@ -329,23 +442,25 @@ def run_checks( percent = 0 else: percent = round(passed_count / len(results) * 100) - # if the report is wanted, create output in line with their specifications if all(report): report_output_data = create_report_json(passed_count, results, percent) configure_report(report, report_output_data) - # compute summary results and display them in the console summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!" summary_color = "green" if passed_count == len(results) else "bright white" print_with_border(summary, summary_color) + if run_motivation: + if 0.25 <= percent < 0.75: + rich.print(Panel(motivation(quotes["low_motivation"], "You're just getting warmed up!"), expand=False, title="Motivation", border_style="bright_cyan")) + elif percent >= 0.75: + rich.print(Panel(motivation(quotes["high_motivation"], "Finish Line Insight"), expand=False, title="Motivation", border_style="bright_cyan")) # determine whether or not the run was a success or not: # if all of the tests pass then the function returns True; # otherwise the function must return False summary_status = True if passed_count == len(results) else False return summary_status - def print_with_border(text: str, rich_color: str): """Print text with a border. @@ -359,14 +474,13 @@ def print_with_border(text: str, rich_color: str): # Upper right corner downleft = "\u2517" # Lower left corner - downright = "\u251B" + downright = "\u251b" # Lower right corner vert = "\u2503" # Vertical line horz = "\u2501" # Horizontal line - line = horz * (len(text) + 2) rich.print(f"[{rich_color}]\n\t{upleft}{line}{upright}") rich.print(f"[{rich_color}]\t{vert} {text} {vert}") - rich.print(f"[{rich_color}]\t{downleft}{line}{downright}\n") + rich.print(f"[{rich_color}]\t{downleft}{line}{downright}\n") \ No newline at end of file From 88159f6a7432bf99f3d291129fd785dfd001bae5 Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Mon, 11 Nov 2024 09:19:04 -0500 Subject: [PATCH 2/9] fix: fixed mergeing conflicts --- gatorgrade/output/output.py | 24 +++++++++++++++++++----- gatorgrade/output/quotes.yml | 15 +++++++++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 gatorgrade/output/quotes.yml diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index af9c8667..e82ccaa3 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -1,6 +1,7 @@ """Run checks and display whether each has passed or failed.""" import datetime +import time import json import os import subprocess @@ -16,6 +17,8 @@ from rich.progress import Progress from rich.progress import TextColumn from rich.panel import Panel +from rich.live import Live +from rich import print as rprint import yaml from gatorgrade.input.checks import GatorGraderCheck @@ -294,11 +297,22 @@ def load_quotes(file_path: str) -> Dict[str, str]: with open(file_path, "r", encoding="utf-8") as file: data = yaml.safe_load(file) return data['quotes'] - -def motivation(quotes_list, message_title="Motivational Message"): - """Display a single motivational message with a title.""" - quote = random.choice(quotes_list) # Pick one quote from the list - return f"{quote}" + +def animated_message(quote: str, title: str = "Motivational Message", animation_time: float = 2.0): + """Display a motivational message with a brief animated effect.""" + with Live(refresh_per_second=10) as live: + for i in range(10): + panel = Panel(f"[white]{quote[:i * len(quote) // 10]}_", + expand=False, border_style="bright_cyan") + live.update(panel) + time.sleep(animation_time / 10) + # Final display without the underscore + live.update(Panel(f"[white]{quote}", expand=False, border_style="bright_cyan")) + +def motivation(quotes_list: List[str], message_title="Motivational Message"): + """Display a single motivational message with animation.""" + quote = random.choice(quotes_list) + animated_message(quote, message_title) file_path = "quotes.yml" quotes = load_quotes(file_path) diff --git a/gatorgrade/output/quotes.yml b/gatorgrade/output/quotes.yml new file mode 100644 index 00000000..e95e9371 --- /dev/null +++ b/gatorgrade/output/quotes.yml @@ -0,0 +1,15 @@ +quotes: + low_motivation: + - "The journey of 1000 miles begins with a single step" + - "Keep pushing, you're just getting started!" + - "Hang in there; the best is yet to come!" + + mid_motivation: + - "50% there" + - "Halfway there, keep going strong!" + - "You're making great progress!" + + high_motivation: + - "Congrats, you're done!" + - "Almost there, don't give up now!" + - "Finish line is in sight!" From 1a8a3c4b3da7b06097b6797f36b3ed08de2431d3 Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Wed, 20 Nov 2024 18:29:12 -0500 Subject: [PATCH 3/9] fix: fixing merge conflicts and add quotes as json file instead of yaml file. --- gatorgrade/output/output.py | 60 +++++++++++++++++++++-------------- gatorgrade/output/quotes.json | 17 ++++++++++ gatorgrade/output/quotes.yml | 15 --------- 3 files changed, 54 insertions(+), 38 deletions(-) create mode 100644 gatorgrade/output/quotes.json delete mode 100644 gatorgrade/output/quotes.yml diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index e82ccaa3..2018dac7 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -292,32 +292,34 @@ def write_json_or_md_file(file_name, content_type, content): "\n[red]Can't open or write the target file, check if you provide a valid path" ) from e -def load_quotes(file_path: str) -> Dict[str, str]: - """Loads the yml file and reads the quotes in the file""" +def load_quotes(file_path: str) -> Dict[str, List[str]]: + """Loads the JSON file and reads the quotes in the file.""" with open(file_path, "r", encoding="utf-8") as file: - data = yaml.safe_load(file) - return data['quotes'] + data = json.load(file) + return data -def animated_message(quote: str, title: str = "Motivational Message", animation_time: float = 2.0): - """Display a motivational message with a brief animated effect.""" - with Live(refresh_per_second=10) as live: - for i in range(10): - panel = Panel(f"[white]{quote[:i * len(quote) // 10]}_", - expand=False, border_style="bright_cyan") - live.update(panel) - time.sleep(animation_time / 10) - # Final display without the underscore - live.update(Panel(f"[white]{quote}", expand=False, border_style="bright_cyan")) - -def motivation(quotes_list: List[str], message_title="Motivational Message"): - """Display a single motivational message with animation.""" +def motivation(quotes_list: List[str], message_title="Motivational Message") -> str: + """Display a single motivational message.""" + if not quotes_list: + return "No motivational messages available." quote = random.choice(quotes_list) - animated_message(quote, message_title) + return f"{message_title}: {quote}" -file_path = "quotes.yml" +file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'quotes.json')) quotes = load_quotes(file_path) assert isinstance(quotes, dict) +def error_fix(): + try: + quotes = load_quotes(file_path) + assert isinstance(quotes, dict) + except FileNotFoundError: + print(f"Error: The file {file_path} was not found.") + quotes = {} + except json.JSONDecodeError as exc: + print(f"Error parsing JSON file: {exc}") + quotes = {} + def run_checks( checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str], @@ -389,12 +391,10 @@ def run_checks( ) as progress: # add a progress task for tracking task = progress.add_task("[green]Running checks...", total=total_checks) - # run each of the checks for check in checks: result = None command_ran = None - if isinstance(check, ShellCheck): result = _run_shell_check(check) command_ran = check.command @@ -466,9 +466,23 @@ def run_checks( print_with_border(summary, summary_color) if run_motivation: if 0.25 <= percent < 0.75: - rich.print(Panel(motivation(quotes["low_motivation"], "You're just getting warmed up!"), expand=False, title="Motivation", border_style="bright_cyan")) + print( + Panel( + motivation(quotes["low_motivation"], "You're just getting warmed up!"), + expand=False, + title="Motivation", + border_style="bright_cyan" + ) + ) elif percent >= 0.75: - rich.print(Panel(motivation(quotes["high_motivation"], "Finish Line Insight"), expand=False, title="Motivation", border_style="bright_cyan")) + print( + Panel( + motivation(quotes["high_motivation"], "Finish Line Insight"), + expand=False, + title="Motivation", + border_style="bright_cyan" + ) + ) # determine whether or not the run was a success or not: # if all of the tests pass then the function returns True; # otherwise the function must return False diff --git a/gatorgrade/output/quotes.json b/gatorgrade/output/quotes.json new file mode 100644 index 00000000..219954ac --- /dev/null +++ b/gatorgrade/output/quotes.json @@ -0,0 +1,17 @@ +{ + "low_motivation": [ + "The journey of 1000 miles begins with a single step", + "Keep pushing, you're just getting started!", + "Hang in there; the best is yet to come!" + ], + "mid_motivation": [ + "50% there", + "Halfway there, keep going strong!", + "You're making great progress!" + ], + "high_motivation": [ + "Congrats, you're done!", + "Almost there, don't give up now!", + "Finish line is in sight!" + ] +} diff --git a/gatorgrade/output/quotes.yml b/gatorgrade/output/quotes.yml deleted file mode 100644 index e95e9371..00000000 --- a/gatorgrade/output/quotes.yml +++ /dev/null @@ -1,15 +0,0 @@ -quotes: - low_motivation: - - "The journey of 1000 miles begins with a single step" - - "Keep pushing, you're just getting started!" - - "Hang in there; the best is yet to come!" - - mid_motivation: - - "50% there" - - "Halfway there, keep going strong!" - - "You're making great progress!" - - high_motivation: - - "Congrats, you're done!" - - "Almost there, don't give up now!" - - "Finish line is in sight!" From 3b434f6f0e2b2b46b9f8c65b7b2226991d5c3fc3 Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Sun, 24 Nov 2024 22:49:02 +0000 Subject: [PATCH 4/9] fix: fixed output issue. --- gatorgrade/output/output.py | 41 ++++--------------------------------- 1 file changed, 4 insertions(+), 37 deletions(-) diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 8f1b3752..15be361f 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -13,6 +13,7 @@ import gator import random import rich +from rich.console import Console from rich.progress import BarColumn from rich.progress import Progress from rich.progress import TextColumn @@ -307,6 +308,7 @@ def motivation(quotes_list: List[str], message_title="Motivational Message") -> file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'quotes.json')) quotes = load_quotes(file_path) assert isinstance(quotes, dict) +console = Console() def error_fix(): try: @@ -387,41 +389,6 @@ def run_checks( if result and result.passed: progress.update(task, advance=1) - """for check in checks: - result = None - command_ran = None - # run a shell check; this means - # that it is going to run a command - # in the shell as a part of a check; - # store the command that ran in the - # field called run_command that is - # inside of a CheckResult object but - # not initialized in the constructor - if isinstance(check, ShellCheck): - result = _run_shell_check(check) - command_ran = check.command - result.run_command = command_ran - # run a check that GatorGrader implements - elif isinstance(check, GatorGraderCheck): - result = _run_gg_check(check) - # check to see if there was a command in the - # GatorGraderCheck. This code finds the index of the - # word "--command" in the check.gg_args list if it - # is available (it is not available for all of - # the various types of GatorGraderCheck instances), - # and then it adds 1 to that index to get the actual - # command run and then stores that command in the - # result.run_command field that is initialized to - # an empty string in the constructor for CheckResult - if "--command" in check.gg_args: - index_of_command = check.gg_args.index("--command") - index_of_new_command = int(index_of_command) + 1 - result.run_command = check.gg_args[index_of_new_command] - # there were results from running checks - # and thus they must be displayed - if result is not None: - result.print() - results.append(result)""" # determine if there are failures and then display them failed_results = list(filter(lambda result: not result.passed, results)) # print failures list if there are failures to print @@ -461,7 +428,7 @@ def run_checks( print_with_border(summary, summary_color) if run_motivation: if 0.25 <= percent < 0.75: - print( + console.print( Panel( motivation(quotes["low_motivation"], "You're just getting warmed up!"), expand=False, @@ -470,7 +437,7 @@ def run_checks( ) ) elif percent >= 0.75: - print( + console.print( Panel( motivation(quotes["high_motivation"], "Finish Line Insight"), expand=False, From fe88a4916b4d1023c16787e4d97a886940242583 Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Thu, 5 Dec 2024 12:33:59 -0500 Subject: [PATCH 5/9] feat: added aiden hint feature and test for output. --- gatorgrade/output/check_result.py | 3 ++- gatorgrade/output/output.py | 22 +++++++++++++++------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/gatorgrade/output/check_result.py b/gatorgrade/output/check_result.py index f6e88c29..22d19700 100644 --- a/gatorgrade/output/check_result.py +++ b/gatorgrade/output/check_result.py @@ -30,6 +30,7 @@ def __init__( self.diagnostic = diagnostic self.path = path self.run_command = "" + self.motivation = "" def display_result(self, show_diagnostic: bool = False) -> str: """Print check's passed or failed status, description, and, optionally, diagnostic message. @@ -48,7 +49,7 @@ def display_result(self, show_diagnostic: bool = False) -> str: return message def __repr__(self): - return f"CheckResult(passed={self.passed}, description='{self.description}', json_info={self.json_info}, path='{self.path}', diagnostic='{self.diagnostic}', run_command='{self.run_command}')" + return f"CheckResult(passed={self.passed}, description='{self.description}', json_info={self.json_info}, path='{self.path}', diagnostic='{self.diagnostic}', run_command='{self.run_command}', motivation={self.motivation}')" def __str__(self, show_diagnostic: bool = False) -> str: """Print check's passed or failed status, description, and, optionally, diagnostic message. diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 15be361f..4ac77272 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -198,6 +198,9 @@ def create_markdown_report_file(json: dict) -> str: if "file" == i: val = check["options"]["file"] markdown_contents += f"\n\t- **file:** {val}" + if "motivation" == i: + val = check["options"]["motivation"] + markdown_contents += f"\n\t- **motivation:** {val}" elif "command" in check: val = check["command"] markdown_contents += f"\n\t- **command:** {val}" @@ -291,15 +294,15 @@ def write_json_or_md_file(file_name, content_type, content): raise ValueError( "\n[red]Can't open or write the target file, check if you provide a valid path" ) from e - +""" def load_quotes(file_path: str) -> Dict[str, List[str]]: - """Loads the JSON file and reads the quotes in the file.""" + Loads the JSON file and reads the quotes in the file. with open(file_path, "r", encoding="utf-8") as file: data = json.load(file) return data def motivation(quotes_list: List[str], message_title="Motivational Message") -> str: - """Display a single motivational message.""" + Display a single motivational message. if not quotes_list: return "No motivational messages available." quote = random.choice(quotes_list) @@ -308,7 +311,7 @@ def motivation(quotes_list: List[str], message_title="Motivational Message") -> file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'quotes.json')) quotes = load_quotes(file_path) assert isinstance(quotes, dict) -console = Console() + def error_fix(): try: @@ -320,7 +323,8 @@ def error_fix(): except json.JSONDecodeError as exc: print(f"Error parsing JSON file: {exc}") quotes = {} - +""" +console = Console() def run_check(check): """Runs the given check, returning the result and the command run (if any).""" result = None @@ -337,6 +341,10 @@ def run_check(check): index_of_command = check.gg_args.index("--command") + 1 result.run_command = check.gg_args[index_of_command] + if "--motivation" in check.gg_args: + index_of_motivation = check.gg_args.index("--motivation") + 1 + result.motivation = check.gg_args[index_of_motivation] + return result def run_checks( @@ -426,7 +434,7 @@ def run_checks( summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!" summary_color = "green" if passed_count == len(results) else "bright white" print_with_border(summary, summary_color) - if run_motivation: + """if run_motivation: if 0.25 <= percent < 0.75: console.print( Panel( @@ -444,7 +452,7 @@ def run_checks( title="Motivation", border_style="bright_cyan" ) - ) + )""" # determine whether or not the run was a success or not: # if all of the tests pass then the function returns True; # otherwise the function must return False From 1ff9505b7d8d80dcef08b6bbc2ba92c787be672f Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Thu, 5 Dec 2024 15:49:51 -0500 Subject: [PATCH 6/9] fix: testing output --- gatorgrade/output/check_result.py | 1 + gatorgrade/output/output.py | 171 ++++++++---------------------- 2 files changed, 48 insertions(+), 124 deletions(-) diff --git a/gatorgrade/output/check_result.py b/gatorgrade/output/check_result.py index 22d19700..9ad573c8 100644 --- a/gatorgrade/output/check_result.py +++ b/gatorgrade/output/check_result.py @@ -14,6 +14,7 @@ def __init__( json_info, path: Union[str, None] = None, diagnostic: str = "No diagnostic message available", + motivation: str = "" ): """Construct a CheckResult. diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 4ac77272..3c447e5f 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -1,26 +1,17 @@ """Run checks and display whether each has passed or failed.""" import datetime -import time import json import os import subprocess from pathlib import Path -from typing import List, Dict +from typing import List from typing import Tuple from typing import Union import gator -import random import rich -from rich.console import Console -from rich.progress import BarColumn -from rich.progress import Progress -from rich.progress import TextColumn -from rich.panel import Panel -from rich.live import Live -from rich import print as rprint -import yaml + from gatorgrade.input.checks import GatorGraderCheck from gatorgrade.input.checks import ShellCheck from gatorgrade.output.check_result import CheckResult @@ -198,9 +189,6 @@ def create_markdown_report_file(json: dict) -> str: if "file" == i: val = check["options"]["file"] markdown_contents += f"\n\t- **file:** {val}" - if "motivation" == i: - val = check["options"]["motivation"] - markdown_contents += f"\n\t- **motivation:** {val}" elif "command" in check: val = check["command"] markdown_contents += f"\n\t- **command:** {val}" @@ -294,65 +282,10 @@ def write_json_or_md_file(file_name, content_type, content): raise ValueError( "\n[red]Can't open or write the target file, check if you provide a valid path" ) from e -""" -def load_quotes(file_path: str) -> Dict[str, List[str]]: - Loads the JSON file and reads the quotes in the file. - with open(file_path, "r", encoding="utf-8") as file: - data = json.load(file) - return data - -def motivation(quotes_list: List[str], message_title="Motivational Message") -> str: - Display a single motivational message. - if not quotes_list: - return "No motivational messages available." - quote = random.choice(quotes_list) - return f"{message_title}: {quote}" - -file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'quotes.json')) -quotes = load_quotes(file_path) -assert isinstance(quotes, dict) - - -def error_fix(): - try: - quotes = load_quotes(file_path) - assert isinstance(quotes, dict) - except FileNotFoundError: - print(f"Error: The file {file_path} was not found.") - quotes = {} - except json.JSONDecodeError as exc: - print(f"Error parsing JSON file: {exc}") - quotes = {} -""" -console = Console() -def run_check(check): - """Runs the given check, returning the result and the command run (if any).""" - result = None - command_ran = None - if isinstance(check, ShellCheck): - result = _run_shell_check(check) - command_ran = check.command - result.run_command = command_ran - - elif isinstance(check, GatorGraderCheck): - result = _run_gg_check(check) - if "--command" in check.gg_args: - index_of_command = check.gg_args.index("--command") + 1 - result.run_command = check.gg_args[index_of_command] - - if "--motivation" in check.gg_args: - index_of_motivation = check.gg_args.index("--motivation") + 1 - result.motivation = check.gg_args[index_of_motivation] - - return result def run_checks( - checks: List[Union[ShellCheck, GatorGraderCheck]], - report: Tuple[str, str, str], - running_mode=False, - no_status_bar=False, - run_motivation=False, + checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str] ) -> bool: """Run shell and GatorGrader checks and display whether each has passed or failed. @@ -361,42 +294,50 @@ def run_checks( Args: checks: The list of shell and GatorGrader checks to run. - running_mode: Convert the Progress Bar to update based on checks ran/not ran. - no_status_bar: Option to completely disable all Progress Bar options. """ results = [] - # run each of the checks - total_checks = len(checks) - if no_status_bar: - for check in checks: - result = run_check(check) - if result is not None: - result.print() - results.append(result) - else: - with Progress( - TextColumn("[progress.description]{task.description}"), - BarColumn( - bar_width=40, - style="red", - complete_style="green", - finished_style="green", - ), - TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), - ) as progress: - task = progress.add_task("[green]Running checks...", total=total_checks) - for check in checks: - result = run_check(check) - if result is not None: - result.print() - results.append(result) - - if running_mode: - progress.update(task, advance=1) - else: - if result and result.passed: - progress.update(task, advance=1) - + # run each of the checks + for check in checks: + result = None + command_ran = None + motivation = "" + # run a shell check; this means + # that it is going to run a command + # in the shell as a part of a check; + # store the command that ran in the + # field called run_command that is + # inside of a CheckResult object but + # not initialized in the constructor + if isinstance(check, ShellCheck): + result = _run_shell_check(check) + command_ran = check.command + result.run_command = command_ran + result.motivation = motivation + # run a check that GatorGrader implements + elif isinstance(check, GatorGraderCheck): + result = _run_gg_check(check) + # check to see if there was a command in the + # GatorGraderCheck. This code finds the index of the + # word "--command" in the check.gg_args list if it + # is available (it is not available for all of + # the various types of GatorGraderCheck instances), + # and then it adds 1 to that index to get the actual + # command run and then stores that command in the + # result.run_command field that is initialized to + # an empty string in the constructor for CheckResult + if "--command" in check.gg_args: + index_of_command = check.gg_args.index("--command") + index_of_new_command = int(index_of_command) + 1 + result.run_command = check.gg_args[index_of_new_command] + if "--motivation" in check.gg_args: + index_of_hint = check.gg_args.index("--motivation") + index_of_new_hint = int(index_of_hint) + 1 + result.hint = check.gg_args[index_of_new_hint] + # there were results from running checks + # and thus they must be displayed + if result is not None: + result.print() + results.append(result) # determine if there are failures and then display them failed_results = list(filter(lambda result: not result.passed, results)) # print failures list if there are failures to print @@ -434,31 +375,13 @@ def run_checks( summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!" summary_color = "green" if passed_count == len(results) else "bright white" print_with_border(summary, summary_color) - """if run_motivation: - if 0.25 <= percent < 0.75: - console.print( - Panel( - motivation(quotes["low_motivation"], "You're just getting warmed up!"), - expand=False, - title="Motivation", - border_style="bright_cyan" - ) - ) - elif percent >= 0.75: - console.print( - Panel( - motivation(quotes["high_motivation"], "Finish Line Insight"), - expand=False, - title="Motivation", - border_style="bright_cyan" - ) - )""" # determine whether or not the run was a success or not: # if all of the tests pass then the function returns True; # otherwise the function must return False summary_status = True if passed_count == len(results) else False return summary_status + def print_with_border(text: str, rich_color: str): """Print text with a border. @@ -481,4 +404,4 @@ def print_with_border(text: str, rich_color: str): line = horz * (len(text) + 2) rich.print(f"[{rich_color}]\n\t{upleft}{line}{upright}") rich.print(f"[{rich_color}]\t{vert} {text} {vert}") - rich.print(f"[{rich_color}]\t{downleft}{line}{downright}\n") \ No newline at end of file + rich.print(f"[{rich_color}]\t{downleft}{line}{downright}\n") From 46e3858596b28af653551b37523eb60b88356a3a Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Mon, 9 Dec 2024 16:35:51 -0500 Subject: [PATCH 7/9] fix: fixed traceback error --- gatorgrade/generate/generate.py | 1 + gatorgrade/main.py | 10 +--------- gatorgrade/output/output.py | 11 +++++++++++ gatorgrade/output/quotes.json | 17 ----------------- 4 files changed, 13 insertions(+), 26 deletions(-) delete mode 100644 gatorgrade/output/quotes.json diff --git a/gatorgrade/generate/generate.py b/gatorgrade/generate/generate.py index 8dd91164..4dafe19e 100644 --- a/gatorgrade/generate/generate.py +++ b/gatorgrade/generate/generate.py @@ -119,6 +119,7 @@ def write_yaml_of_paths_list( "description": f"Complete all TODOs in {file_path_fixed}", "check": "MatchFileFragment", "options": {"fragment": "TODO", "count": 0, "exact": True}, + "motivation": "You're doint Great" } ] } diff --git a/gatorgrade/main.py b/gatorgrade/main.py index d6de48ee..bb215b2e 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -45,14 +45,6 @@ def gatorgrade( 3. the name of the file or environment variable\ 4. use 'env md GITHUB_STEP_SUMMARY' to create GitHub job summary in GitHub Action", ), - run_status_bar: bool = typer.Option( - False, - "--run_status_bar", - help="Enable a progress bar for checks running/not running.", - ), - no_status_bar: bool = typer.Option( - False, "--no_status_bar", help="Disable the progress bar entirely." - ), run_motivation: bool = typer.Option( False, "--motivation", help="Enable a motivational message" ), ): """Run the GatorGrader checks in the specified gatorgrade.yml file.""" @@ -64,7 +56,7 @@ def gatorgrade( # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: - checks_status = run_checks(checks, report, run_status_bar, no_status_bar, run_motivation) + checks_status = run_checks(checks, report, )#run_status_bar, #no_status_bar, run_motivation) # no checks were created and this means # that, most likely, the file was not # valid and thus the tool cannot run checks diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 3c447e5f..4cbca8c4 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -76,6 +76,10 @@ def _run_gg_check(check: GatorGraderCheck) -> CheckResult: file_name = check.gg_args[i + 3] file_path = dir_name + "/" + file_name break + motivation = "" + if "--motivation" in check.gg_args: + index_of_motivation = check.gg_args.index("--hint") + motivation = check.gg_args[index_of_motivation + 1] # If arguments are formatted incorrectly, catch the exception and # return it as the diagnostic message # Disable pylint to catch any type of exception thrown by GatorGrader @@ -90,6 +94,7 @@ def _run_gg_check(check: GatorGraderCheck) -> CheckResult: json_info=check.json_info, diagnostic=diagnostic, path=file_path, + motivation=motivation ) @@ -189,6 +194,9 @@ def create_markdown_report_file(json: dict) -> str: if "file" == i: val = check["options"]["file"] markdown_contents += f"\n\t- **file:** {val}" + if "motivation" == i: + val = check["options"]["motivation"] + markdown_contents += f"\n\t- **file:** {val}" elif "command" in check: val = check["command"] markdown_contents += f"\n\t- **command:** {val}" @@ -344,6 +352,7 @@ def run_checks( # and print what ShellCheck command that Gatorgrade ran if len(failed_results) > 0: print("\n-~- FAILURES -~-\n") + print("Rebekah Test") for result in failed_results: # main.console.print("This is a result") # main.console.print(result) @@ -359,6 +368,8 @@ def run_checks( rich.print( f"[blue] → Run this command: [green]{result.run_command}\n" ) + #if result.motivation != "": + # determine how many of the checks passed and then # compute the total percentage of checks passed passed_count = len(results) - len(failed_results) diff --git a/gatorgrade/output/quotes.json b/gatorgrade/output/quotes.json deleted file mode 100644 index 219954ac..00000000 --- a/gatorgrade/output/quotes.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "low_motivation": [ - "The journey of 1000 miles begins with a single step", - "Keep pushing, you're just getting started!", - "Hang in there; the best is yet to come!" - ], - "mid_motivation": [ - "50% there", - "Halfway there, keep going strong!", - "You're making great progress!" - ], - "high_motivation": [ - "Congrats, you're done!", - "Almost there, don't give up now!", - "Finish line is in sight!" - ] -} From ee0c60c3cc4eadc8a801d2b92805668f2b04b3d2 Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Tue, 10 Dec 2024 00:57:34 -0500 Subject: [PATCH 8/9] fix(output.py): added Motivational class --- gatorgrade/input/in_file_path.py | 46 ++++++++++++++++++++++++++- gatorgrade/main.py | 2 +- gatorgrade/output/output.py | 53 ++++++++++++++++++++++++++++---- 3 files changed, 93 insertions(+), 8 deletions(-) diff --git a/gatorgrade/input/in_file_path.py b/gatorgrade/input/in_file_path.py index a9cd048d..f6743acc 100644 --- a/gatorgrade/input/in_file_path.py +++ b/gatorgrade/input/in_file_path.py @@ -1,5 +1,6 @@ """Generates a list of commands to be run through gatorgrader.""" +import random from collections import namedtuple from pathlib import Path from typing import Any @@ -35,7 +36,6 @@ def parse_yaml_file(file_path: Path) -> List[Any]: # return a blank list that calling function handles return [] - def reformat_yaml_data(data): """Reformat the raw data from a YAML file into a list of tuples.""" reformatted_data = [] @@ -65,3 +65,47 @@ def add_checks_to_list(path, data_list, reformatted_data): else: # Adds the current check to the reformatted data list reformatted_data.append(CheckData(file_context=path, check=ddict)) break + + +def add_quotes_to_yaml(file_path: Path, new_quotes: List[str]): + """Add motivational quotes to the YAML file under the key 'motivational_quotes'.""" + if not file_path.exists(): + print(f"Error: File not found at {file_path}") + return + + try: + with open(file_path, encoding=DEFAULT_ENCODING) as file: + data = yaml.safe_load(file) or {} # Load existing data or start fresh + + # Ensure 'motivational_quotes' exists in the YAML structure + if "motivational_quotes" not in data: + data["motivational_quotes"] = [] + + # Append new quotes, avoiding duplicates + existing_quotes = set(data["motivational_quotes"]) + for quote in new_quotes: + if quote not in existing_quotes: + data["motivational_quotes"].append(quote) + + # Write the updated data back to the file + with open(file_path, 'w', encoding=DEFAULT_ENCODING) as file: + yaml.safe_dump(data, file) + + print("Motivational quotes added successfully!") + + except yaml.YAMLError as e: + print(f"Error processing YAML file: {e}") + + +def get_random_quote(file_path: Path) -> str: + """Retrieve a random motivational quote from the YAML file.""" + if not file_path.exists(): + return "No motivational quotes available. Add some to the YAML file!" + + try: + with open(file_path, encoding=DEFAULT_ENCODING) as file: + data = yaml.safe_load(file) + quotes = data.get("motivational", []) + return random.choice(quotes) if quotes else "No motivational quotes available." + except Exception as e: + return f"Error retrieving quotes: {e}" \ No newline at end of file diff --git a/gatorgrade/main.py b/gatorgrade/main.py index bb215b2e..d1f60df8 100644 --- a/gatorgrade/main.py +++ b/gatorgrade/main.py @@ -56,7 +56,7 @@ def gatorgrade( # there are valid checks and thus the # tool should run them with run_checks if len(checks) > 0: - checks_status = run_checks(checks, report, )#run_status_bar, #no_status_bar, run_motivation) + checks_status = run_checks(checks, report, run_motivation)#run_status_bar, #no_status_bar, ) # no checks were created and this means # that, most likely, the file was not # valid and thus the tool cannot run checks diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 4cbca8c4..98f9faf1 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -8,6 +8,8 @@ from typing import List from typing import Tuple from typing import Union +from rich.console import Console +from rich.panel import Panel import gator import rich @@ -291,9 +293,28 @@ def write_json_or_md_file(file_name, content_type, content): "\n[red]Can't open or write the target file, check if you provide a valid path" ) from e +console = Console() + +class Motivation: + def __init__(self, quotes: dict, context: str = None): # type: ignore + """Construct a Motivation. + + Args: + quotes: A dictionary of motivational quotes with keys like "low_motivation", "high_motivation". + context: Additional context or description about the quote. Optional. + """ + self.quotes = quotes + self.context = context + + def get_motivation(self, motivation_level: str): + """Retrieve a motivational quote based on the motivation level.""" + quote = self.quotes.get(motivation_level, "Keep going, you're doing great!") + return f"{quote}\nContext: {self.context if self.context else 'Keep pushing forward!'}" + def run_checks( - checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str] + checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str], + run_motivation=False ) -> bool: """Run shell and GatorGrader checks and display whether each has passed or failed. @@ -337,10 +358,8 @@ def run_checks( index_of_command = check.gg_args.index("--command") index_of_new_command = int(index_of_command) + 1 result.run_command = check.gg_args[index_of_new_command] - if "--motivation" in check.gg_args: - index_of_hint = check.gg_args.index("--motivation") - index_of_new_hint = int(index_of_hint) + 1 - result.hint = check.gg_args[index_of_new_hint] + if "motivation" in check.gg_args: + result.motivation = check["motivation"] # there were results from running checks # and thus they must be displayed if result is not None: @@ -368,7 +387,10 @@ def run_checks( rich.print( f"[blue] → Run this command: [green]{result.run_command}\n" ) - #if result.motivation != "": + """if result.motivation != "": + rich.print( + f"[bright cyan] {result.motivation}\n" + )""" # determine how many of the checks passed and then # compute the total percentage of checks passed @@ -389,6 +411,25 @@ def run_checks( # determine whether or not the run was a success or not: # if all of the tests pass then the function returns True; # otherwise the function must return False + motivation_quotes = {"low_motivation": "You're just getting warmed up!", "high_motivation": "Finish Line Insight"} + motivation = Motivation(motivation_quotes) + if run_motivation: + if 0.25 <= percent < 0.75: + console.print( + Panel( + motivation.get_motivation("low_motivation"), + expand=False, title="Motivation", + border_style="bright_cyan" + ) + ) + elif percent >= 0.75: + console.print( + Panel( + motivation.get_motivation("high_motivation"), + expand=False, title="Motivation", + border_style="bright_cyan" + ) + ) summary_status = True if passed_count == len(results) else False return summary_status From d81e0feccd1719af3860a159c769562dcbe26c48 Mon Sep 17 00:00:00 2001 From: PhilipOlwoc Date: Wed, 11 Dec 2024 19:32:59 -0500 Subject: [PATCH 9/9] fix(output.py): fixed the output --- gatorgrade/output/output.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gatorgrade/output/output.py b/gatorgrade/output/output.py index 98f9faf1..724dc0ff 100644 --- a/gatorgrade/output/output.py +++ b/gatorgrade/output/output.py @@ -296,7 +296,7 @@ def write_json_or_md_file(file_name, content_type, content): console = Console() class Motivation: - def __init__(self, quotes: dict, context: str = None): # type: ignore + def __init__(self, quotes: dict): # type: ignore """Construct a Motivation. Args: @@ -304,12 +304,12 @@ def __init__(self, quotes: dict, context: str = None): # type: ignore context: Additional context or description about the quote. Optional. """ self.quotes = quotes - self.context = context + def get_motivation(self, motivation_level: str): """Retrieve a motivational quote based on the motivation level.""" quote = self.quotes.get(motivation_level, "Keep going, you're doing great!") - return f"{quote}\nContext: {self.context if self.context else 'Keep pushing forward!'}" + return f"{quote}" def run_checks(