Skip to content

Commit

Permalink
Merge pull request #111 from GatorEducator/feature/json-output
Browse files Browse the repository at this point in the history
Create json and md checks output
  • Loading branch information
burgess01 authored Mar 3, 2023
2 parents b96ff26 + 9727e6e commit 720312b
Show file tree
Hide file tree
Showing 8 changed files with 492 additions and 20 deletions.
9 changes: 7 additions & 2 deletions gatorgrade/input/checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,30 @@
class ShellCheck: # pylint: disable=too-few-public-methods
"""Represent a shell check."""

def __init__(self, command: str, description: str = None):
def __init__(self, command: str, description: str = None, json_info=None):
"""Construct a ShellCheck.
Args:
command: The command to run in a shell.
description: The description to use in output.
If no description is given, the command is used as the description.
json_info: The all-encompassing check information to include in json output.
If none is given, command is used
"""
self.command = command
self.description = description if description is not None else command
self.json_info = json_info


class GatorGraderCheck: # pylint: disable=too-few-public-methods
"""Represent a GatorGrader check."""

def __init__(self, gg_args: List[str]):
def __init__(self, gg_args: List[str], json_info):
"""Construct a GatorGraderCheck.
Args:
gg_args: The list of arguments to pass to GatorGrader.
json_info: The all-encompassing check information to include in json output.
"""
self.gg_args = gg_args
self.json_info = json_info
3 changes: 2 additions & 1 deletion gatorgrade/input/command_line_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def generate_checks(
ShellCheck(
command=check_data.check.get("command"),
description=check_data.check.get("description"),
json_info=check_data.check,
)
)
# Otherwise, it is a GatorGrader check
Expand Down Expand Up @@ -61,6 +62,6 @@ def generate_checks(
if dirname == "":
dirname = "."
gg_args.extend(["--directory", dirname, "--file", filename])
checks.append(GatorGraderCheck(gg_args=gg_args))
checks.append(GatorGraderCheck(gg_args=gg_args, json_info=check_data.check))

return checks
15 changes: 13 additions & 2 deletions gatorgrade/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import sys
from pathlib import Path
from typing import Tuple

import typer
from rich.console import Console
Expand Down Expand Up @@ -33,7 +34,17 @@
@app.callback(invoke_without_command=True)
def gatorgrade(
ctx: typer.Context,
filename: Path = typer.Option(FILE, "--config", "-c", help="Name of the YML file."),
filename: Path = typer.Option(FILE, "--config", "-c", help="Name of the yml file."),
report: Tuple[str, str, str] = typer.Option(
(None, None, None),
"--report",
"-r",
help="A tuple containing the following REQUIRED values: \
1. The destination of the report (either file or env) \
2. The format of the report (either json or md) \
3. the name of the file or environment variable\
4. use 'env md GITHUB_STEP_SUMMARY' to create GitHub job summary in GitHub Action",
),
):
"""Run the GatorGrader checks in the specified gatorgrade.yml file."""
# if ctx.subcommand is None then this means
Expand All @@ -44,7 +55,7 @@ def gatorgrade(
# there are valid checks and thus the
# tool should run them with run_checks
if len(checks) > 0:
checks_status = run_checks(checks)
checks_status = run_checks(checks, report)
# no checks were created and this means
# that, most likely, the file was not
# valid and thus the tool cannot run checks
Expand Down
3 changes: 3 additions & 0 deletions gatorgrade/output/check_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ def __init__(
self,
passed: bool,
description: str,
json_info,
diagnostic: str = "No diagnostic message available",
):
"""Construct a CheckResult.
Expand All @@ -18,10 +19,12 @@ def __init__(
passed: The passed or failed status of the check result. If true, indicates that the
check has passed.
description: The description to use in output.
json_info: the overall information to be included in json output
diagnostic: The message to use in output if the check has failed.
"""
self.passed = passed
self.description = description
self.json_info = json_info
self.diagnostic = diagnostic

def display_result(self, show_diagnostic: bool = False) -> str:
Expand Down
183 changes: 178 additions & 5 deletions gatorgrade/output/output.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""Run checks and display whether each has passed or failed."""

import json
import os
import subprocess
from pathlib import Path
from typing import List
from typing import Tuple
from typing import Union

import gator
Expand Down Expand Up @@ -36,11 +38,16 @@ def _run_shell_check(check: ShellCheck) -> CheckResult:
stderr=subprocess.STDOUT,
)
passed = result.returncode == 0

# Add spaces after each newline to indent all lines of diagnostic
diagnostic = (
"" if passed else result.stdout.decode().strip().replace("\n", "\n ")
) # Add spaces after each newline to indent all lines of diagnostic
)
return CheckResult(
passed=passed, description=check.description, diagnostic=diagnostic
passed=passed,
description=check.description,
json_info=check.json_info,
diagnostic=diagnostic,
)


Expand All @@ -65,10 +72,169 @@ def _run_gg_check(check: GatorGraderCheck) -> CheckResult:
passed = False
description = f'Invalid GatorGrader check: "{" ".join(check.gg_args)}"'
diagnostic = f'"{command_exception.__class__}" thrown by GatorGrader'
return CheckResult(passed=passed, description=description, diagnostic=diagnostic)
return CheckResult(
passed=passed,
description=description,
json_info=check.json_info,
diagnostic=diagnostic,
)


def create_report_json(
passed_count,
checkResults: List[CheckResult],
percent_passed,
) -> dict:
"""Take checks and put them into json format in a dictionary.
Args:
passed_count: the number of checks that passed
check_information: the basic information about checks and their params
checkResults: the list of check results that will be put in json
percent_passed: the percentage of checks that passed
"""
# create list to hold the key values for the dictionary that
# will be converted into json
overall_key_list = ["amount_correct", "percentage_score", "checks"]

checks_list = []
overall_dict = {}

# for each check:
for i in range(len(checkResults)):
# grab all of the information in it and add it to the checks list
results_json = checkResults[i].json_info
results_json["status"] = checkResults[i].passed
if not checkResults[i].passed:
results_json["diagnostic"] = checkResults[i].diagnostic
checks_list.append(results_json)

# create the dictionary for all of the check information
overall_dict = dict(
zip(overall_key_list, [passed_count, percent_passed, checks_list])
)
return overall_dict


def create_markdown_report_file(json: dict) -> str:
"""Create a markdown file using the created json to use in github actions summary, among other places.
Args:
json: a dictionary containing the json that should be converted to markdown
"""
markdown_contents = ""
passing_checks = []
failing_checks = []

num_checks = len(json.get("checks"))

# write the total, amt correct and percentage score to md file
markdown_contents += f"# Gatorgrade Insights\n\n**Project Name:** {Path.cwd().name}\n**Amount Correct:** {(json.get('amount_correct'))}/{num_checks} ({(json.get('percentage_score'))}%)\n"

# split checks into passing and not passing
for check in json.get("checks"):
# if the check is passing
if check["status"] == True:
passing_checks.append(check)
# if the check is failing
else:
failing_checks.append(check)

# give short info about passing checks
markdown_contents += "\n## Passing Checks\n"
for check in passing_checks:
if "description" in check:
markdown_contents += f"\n- [x] {check['description']}"
else:
markdown_contents += f"\n- [x] {check['check']}"

# give extended information about failing checks
markdown_contents += "\n\n## Failing Checks\n"
# for each failing check, print out all related information
for check in failing_checks:
# for each key val pair in the check dictionary
if "description" in check:
markdown_contents += f"\n- [ ] {check['description']}"
else:
markdown_contents += f"\n- [ ] {check['check']}"

if "options" in check:
for i in check.get("options"):
if "command" == i:
val = check["options"]["command"]
markdown_contents += f"\n\t- **command** {val}"
if "fragment" == i:
val = check["options"]["fragment"]
markdown_contents += f"\n\t- **fragment:** {val}"
if "tag" == i:
val = check["options"]["tag"]
markdown_contents += f"\n\t- **tag:** {val}"
if "count" == i:
val = check["options"]["count"]
markdown_contents += f"\n\t- **count:** {val}"
if "directory" == i:
val = check["options"]["directory"]
markdown_contents += f"\n\t- **directory:** {val}"
if "file" == i:
val = check["options"]["file"]
markdown_contents += f"\n\t- **file:** {val}"
elif "command" in check:
val = check["command"]
markdown_contents += f"\n\t- **command:** {val}"
if "diagnostic" in check:
markdown_contents += f"\n\t- **diagnostic:** {check['diagnostic']}"
markdown_contents += "\n"

return markdown_contents


def configure_report(report_params: Tuple[str, str, str], report_output_data: dict):
"""Put together the contents of the report depending on the inputs of the user.
def run_checks(checks: List[Union[ShellCheck, GatorGraderCheck]]) -> bool:
Args:
report_params: The details of what the user wants the report to look like
report_params[0]: file or env
report_params[1]: json or md
report_params[2]: name of the file or env
report_output_data: the json dictionary that will be used or converted to md
"""
# if the user wants markdown, convert the json into md
if report_params[1] == "md":
report_output_data = create_markdown_report_file(report_output_data)

# if the user wants the data stored in a file:
if report_params[0] == "file":
# try to store it in that file
try:
# Second argument has to be json or md
if report_params[1] != "json" and report_params[1] != "md":
rich.print(
"\n[red]The second argument of report has to be 'md' or 'json' "
)
else:
with open(report_params[2], "w", encoding="utf-8") as file:
if report_params[1] == "json":
file.write(json.dumps(report_output_data))
else:
file.write(str(report_output_data))
except:
rich.print(
"\n[red]Can't open or write the target file, check if you provide a valid path"
)
elif report_params[0] == "env":
if report_params[2] == "GITHUB_STEP_SUMMARY":
env_file = os.getenv("GITHUB_STEP_SUMMARY")
with open(env_file, "a") as env_file:
env_file.write(str(report_output_data))
else:
os.environ[report_params[2]] = str(report_output_data)
else:
rich.print("\n[red]The first argument of report has to be 'env' or 'file' ")


def run_checks(
checks: List[Union[ShellCheck, GatorGraderCheck]], report: Tuple[str, str, str]
) -> bool:
"""Run shell and GatorGrader checks and display whether each has passed or failed.
Also, print a list of all failed checks with their diagnostics and a summary message that
Expand All @@ -94,6 +260,7 @@ def run_checks(checks: List[Union[ShellCheck, GatorGraderCheck]]) -> bool:
if result is not None:
result.print()
results.append(result)

# determine if there are failures and then display them
failed_results = list(filter(lambda result: not result.passed, results))
# only print failures list if there are failures to print
Expand All @@ -109,6 +276,12 @@ def run_checks(checks: List[Union[ShellCheck, GatorGraderCheck]]) -> bool:
percent = 0
else:
percent = round(passed_count / len(results) * 100)

# if the report is wanted, create output in line with their specifications
if all(report):
report_output_data = create_report_json(passed_count, results, percent)
configure_report(report, report_output_data)

# compute summary results and display them in the console
summary = f"Passed {passed_count}/{len(results)} ({percent}%) of checks for {Path.cwd().name}!"
summary_color = "green" if passed_count == len(results) else "bright white"
Expand Down
Loading

0 comments on commit 720312b

Please sign in to comment.