Skip to content

Commit

Permalink
Merge branch 'master' into feature/remote-config-files
Browse files Browse the repository at this point in the history
  • Loading branch information
simojo committed Nov 11, 2023
2 parents 5ff526b + 90c4f81 commit e1b08c6
Show file tree
Hide file tree
Showing 11 changed files with 318 additions and 47 deletions.
3 changes: 3 additions & 0 deletions chasten/constants.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
"""Define constants with dataclasses for use in chasten."""

from dataclasses import dataclass
from pathlib import Path


# chasten constant
@dataclass(frozen=True)
class Chasten:
"""Define the Chasten dataclass for constant(s)."""

Analyze_Storage: Path
Application_Name: str
Application_Author: str
Chasten_Database_View: str
Expand All @@ -26,6 +28,7 @@ class Chasten:


chasten = Chasten(
Analyze_Storage=Path("analysis.md"),
Application_Name="chasten",
Application_Author="ChastenedTeam",
Chasten_Database_View="chasten_complete",
Expand Down
43 changes: 27 additions & 16 deletions chasten/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

from sqlite_utils import Database

from chasten import constants, enumerations, filesystem, output
from chasten import constants, enumerations, filesystem, output, util

CHASTEN_SQL_SELECT_QUERY = """
SELECT
Expand Down Expand Up @@ -66,7 +66,6 @@ def enable_full_text_search(chasten_database_name: str) -> None:
database["sources"].enable_fts(
[
"filename",
"filelines",
"check_id",
"check_name",
"check_description",
Expand Down Expand Up @@ -129,18 +128,6 @@ def display_datasette_details(
output.console.print()


def executable_name(OpSystem: str = "Linux") -> str:
"""Get the executable directory depending on OS"""
exe_directory = "/bin/"
executable_name = constants.datasette.Datasette_Executable
# Checks if the OS is windows and changed where to search if true
if OpSystem == "Windows":
exe_directory = "/Scripts/"
executable_name += ".exe"
virtual_env_location = sys.prefix
return virtual_env_location + exe_directory + executable_name


def start_datasette_server( # noqa: PLR0912, PLR0913
database_path: Path,
datasette_metadata: Path,
Expand All @@ -160,7 +147,9 @@ def start_datasette_server( # noqa: PLR0912, PLR0913
# chasten will exist in a bin directory. For instance, the "datasette"
# executable that is a dependency of chasten can be found by starting
# the search from this location for the virtual environment.
full_executable_name = executable_name(OpSystem)
full_executable_name = util.executable_name(
constants.datasette.Datasette_Executable, OpSystem
)
(found_executable, executable_path) = filesystem.can_find_executable(
full_executable_name
)
Expand Down Expand Up @@ -224,7 +213,7 @@ def start_datasette_server( # noqa: PLR0912, PLR0913
# datasette-publish-fly plugin) and thus need to exit and not proceed
if not found_publish_platform_executable:
output.console.print(
":person_shrugging: Was not able to find '{datasette_platform}'"
f":person_shrugging: Was not able to find '{datasette_platform}'"
)
return None
# was able to find the fly or vercel executable that will support the
Expand Down Expand Up @@ -276,3 +265,25 @@ def start_datasette_server( # noqa: PLR0912, PLR0913
# there is debugging output in the console to indicate this option.
proc = subprocess.Popen(cmd)
proc.wait()


def display_results_frog_mouth(result_file, OpSystem) -> None:
"""Run frogmouth as a subprocess of chasten"""
cmd = [
"frogmouth",
result_file,
]
executable = util.executable_name("frogmouth", OpSystem)
exec_found, executable_path = filesystem.can_find_executable(executable)
if exec_found:
# run frogmouth with specified path
output.console.print("\n🐸 Frogmouth Information\n")
output.console.print(f" {small_bullet_unicode} Venv: {sys.prefix}")
output.console.print(f" {small_bullet_unicode} Program: {executable_path}")
proc = subprocess.Popen(cmd)
proc.wait()
else:
output.console.print(
":person_shrugging: Was not able to find frogmouth executable try installing it separately"
)
return None
4 changes: 2 additions & 2 deletions chasten/filesystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def write_dict_results(
# using indentation to ensure that JSON file is readable
results_path_with_file = results_path / complete_results_file_name
# use the built-in method from pathlib Path to write the JSON contents
results_path_with_file.write_text(results_json)
results_path_with_file.write_text(results_json, "utf-8")
# return the name of the file that contains the JSON dictionary contents
return complete_results_file_name

Expand Down Expand Up @@ -295,7 +295,7 @@ def get_json_results(json_paths: List[Path]) -> List[Dict[Any, Any]]:
# iterate through each of the provided paths to a JSON file
for json_path in json_paths:
# turn the contents of the current JSON file into a dictionary
json_dict = json.loads(json_path.read_text())
json_dict = json.loads(json_path.read_text("utf-8"))
# add the current dictionary to the list of dictionaries
json_dicts_list.append(json_dict)
# return the list of JSON dictionaries
Expand Down
99 changes: 90 additions & 9 deletions chasten/main.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""💫 Chasten checks the AST of a Python program."""

import os
import sys
import time
from pathlib import Path
Expand Down Expand Up @@ -30,6 +31,7 @@

# create a small bullet for display in the output
small_bullet_unicode = constants.markers.Small_Bullet_Unicode
ANALYSIS_FILE = constants.chasten.Analyze_Storage


# ---
Expand Down Expand Up @@ -178,7 +180,8 @@ def configure( # noqa: PLR0913
)
# write the configuration file for the chasten tool in the created directory
filesystem.create_configuration_file(
created_directory_path, constants.filesystem.Main_Configuration_File
created_directory_path,
constants.filesystem.Main_Configuration_File,
)
# write the check file for the chasten tool in the created directory
filesystem.create_configuration_file(
Expand Down Expand Up @@ -246,7 +249,19 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
writable=True,
resolve_path=True,
),
config: str = typer.Option(
store_result: Path = typer.Option(
None,
"--markdown-storage",
"-r",
help="A directory for storing results in a markdown file",
exists=True,
file_okay=False,
dir_okay=True,
readable=True,
writable=True,
resolve_path=True,
),
config: Path = typer.Option(
None,
"--config",
"-c",
Expand All @@ -264,8 +279,10 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
"-t",
help="Specify the destination for debugging output.",
),
display: bool = typer.Option(False, help="Display results using frogmouth"),
verbose: bool = typer.Option(False, help="Enable verbose mode output."),
save: bool = typer.Option(False, help="Enable saving of output file(s)."),
force: bool = typer.Option(False, help="Force creation of new markdown file"),
) -> None:
"""💫 Analyze the AST of Python source code."""
start_time = time.time()
Expand Down Expand Up @@ -344,6 +361,27 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
"\n:person_shrugging: Cannot perform analysis due to invalid search directory.\n"
)
sys.exit(constants.markers.Non_Zero_Exit)
if store_result:
# creates an empty string for storing results temporarily
analysis_result = ""
analysis_file_dir = store_result / ANALYSIS_FILE
# clears markdown file of results if it exists and new results are to be store
if filesystem.confirm_valid_file(analysis_file_dir):
if not force:
if display:
database.display_results_frog_mouth(
analysis_file_dir, util.get_OS()
)
sys.exit(0)
else:
output.console.print(
"File already exists: use --force to recreate markdown directory."
)
sys.exit(constants.markers.Non_Zero_Exit)
else:
analysis_file_dir.write_text("")
# creates file if doesn't exist already
analysis_file_dir.touch()
# create the list of directories
valid_directories = [input_path]
# output the list of directories subject to checking
Expand All @@ -358,7 +396,9 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
# iterate through and perform each of the checks
for current_check in check_list:
# extract the pattern for the current check
current_xpath_pattern = str(current_check[constants.checks.Check_Pattern]) # type: ignore
current_xpath_pattern = str(
current_check[constants.checks.Check_Pattern]
) # type: ignore
# extract the minimum and maximum values for the checks, if they exist
# note that this function will return None for a min or a max if
# that attribute does not exist inside of the current_check; importantly,
Expand All @@ -383,8 +423,7 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
match_generator = pyastgrepsearch.search_python_files(
paths=valid_directories, expression=current_xpath_pattern, xpath2=True
)

# materialize a list from the generator of (potential) matches;
# materia>>> mastlize a list from the generator of (potential) matches;
# note that this list will also contain an object that will
# indicate that the analysis completed for each located file
match_generator_list = list(match_generator)
Expand Down Expand Up @@ -419,6 +458,19 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
f" {check_status_symbol} id: '{check_id}', name: '{check_name}'"
+ f", pattern: '{current_xpath_pattern_escape}', min={min_count}, max={max_count}"
)
if store_result:
# makes the check marks or x's appear as words instead for markdown
check_pass = (
"PASSED:"
if check_status_symbol == "[green]\u2713[/green]"
else "FAILED:"
)
# stores check type in a string to stored in file later
analysis_result += (
f"\n# {check_pass} **ID:** '{check_id}', **Name:** '{check_name}'"
+ f", **Pattern:** '{current_xpath_pattern_escape}', min={min_count}, max={max_count}\n\n"
)

# for each potential match, log and, if verbose model is enabled,
# display details about each of the matches
current_result_source = results.Source(
Expand Down Expand Up @@ -455,6 +507,9 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
output.console.print(
f" {small_bullet_unicode} {file_name} - {len(matches_list)} matches"
)
if store_result:
# stores details of checks in string to be stored later
analysis_result += f" - {file_name} - {len(matches_list)} matches\n"
# extract the lines of source code for this file; note that all of
# these matches are organized for the same file and thus it is
# acceptable to extract the lines of the file from the first match
Expand Down Expand Up @@ -484,17 +539,30 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
),
linematch_context=util.join_and_preserve(
current_match.file_lines,
max(0, position_end - constants.markers.Code_Context),
max(
0,
position_end - constants.markers.Code_Context,
),
position_end + constants.markers.Code_Context,
),
)
# save the entire current_match that is an instance of
# pyastgrepsearch.Match for verbose debugging output as needed
current_check_save._matches.append(current_match)
# add the match to the listing of matches for the current check
current_check_save.matches.append(current_match_for_current_check_save) # type: ignore
current_check_save.matches.append(
current_match_for_current_check_save
) # type: ignore
# add the current source to main object that contains a list of source
chasten_results_save.sources.append(current_result_source)
# add the amount of total matches in each check to the end of each checks output
output.console.print(f" = {len(match_generator_list)} total matches\n")
# calculate the final count of matches found
total_result = util.total_amount_passed(chasten_results_save, len(check_list))
# display checks passed, total amount of checks, and percentage of checks passed
output.console.print(
f":computer: {total_result[0]} / {total_result[1]} checks passed ({total_result[2]}%)\n"
)
# display all of the analysis results if verbose output is requested
output.print_analysis_details(chasten_results_save, verbose=verbose)
# save all of the results from this analysis
Expand All @@ -503,7 +571,7 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915
)
# output the name of the saved file if saving successfully took place
if saved_file_name:
output.console.print(f"\n:sparkles: Saved the file '{saved_file_name}'")
output.console.print(f":sparkles: Saved the file '{saved_file_name}'")
# confirm whether or not all of the checks passed
# and then display the appropriate diagnostic message
all_checks_passed = all(check_status_list)
Expand All @@ -512,10 +580,23 @@ def analyze( # noqa: PLR0912, PLR0913, PLR0915

if not all_checks_passed:
output.console.print("\n:sweat: At least one check did not pass.")
if store_result:
# writes results of analyze into a markdown file
analysis_file_dir.write_text(analysis_result, encoding="utf-8")
output.console.print(
f"\n:sparkles: Results saved in: {os.path.abspath(analysis_file_dir)}\n"
)
sys.exit(constants.markers.Non_Zero_Exit)
output.console.print(
f"\n:joy: All checks passed. Elapsed Time: {elapsed_time} seconds"
)
if store_result:
# writes results of analyze into a markdown file
result_path = os.path.abspath(analysis_file_dir)
analysis_file_dir.write_text(analysis_result, encoding="utf-8")
output.console.print(f"\n:sparkles: Results saved in: {result_path}\n")
if display:
database.display_results_frog_mouth(result_path, util.get_OS())


@cli.command()
Expand Down Expand Up @@ -582,7 +663,7 @@ def integrate( # noqa: PLR0913
if combined_json_file_name:
output.console.print(f"\n:sparkles: Saved the file '{combined_json_file_name}'")
# "flatten" (i.e., "un-nest") the now-saved combined JSON file using flatterer
# create the SQLite3 database and then configure the database for use in datasett
# create the SQLite3 database and then configure the database for use in datasette
combined_flattened_directory = filesystem.write_flattened_csv_and_database(
combined_json_file_name,
output_directory,
Expand Down
26 changes: 26 additions & 0 deletions chasten/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import importlib.metadata
import platform
import sys

from chasten import constants
from typing import List
Expand All @@ -27,6 +28,17 @@ def get_OS() -> str:
return OpSystem


def executable_name(executable_name: str, OpSystem: str = "Linux") -> str:
"""Get the executable directory depending on OS"""
exe_directory = "/bin/"
# Checks if the OS is windows and changed where to search if true
if OpSystem == "Windows":
exe_directory = "/Scripts/"
executable_name += ".exe"
virtual_env_location = sys.prefix
return virtual_env_location + exe_directory + executable_name


def get_symbol_boolean(answer: bool) -> str:
"""Produce a symbol-formatted version of a boolean value of True or False."""
if answer:
Expand Down Expand Up @@ -86,3 +98,17 @@ def is_url(url: str) -> bool:
url_reassembled += str(url_piece)
# determine if parsed and reconstructed url matches original
return str(parse_url(url)).lower() == url_reassembled.lower()


def total_amount_passed(analyze_result, count_total) -> tuple[int, int, float]:
"""Calculate amount of checks passed in analyze"""
try:
# iterate through check sources to find checks passed
list_passed = [x.check.passed for x in analyze_result.sources]
# set variables to count true checks and total counts
count_true = list_passed.count(True)
# return tuple of checks passed, total checks, percentage of checks passed
return (count_true, count_total, (count_true / count_total) * 100)
# return exception when dividing by zero
except ZeroDivisionError:
return (0, 0, 0.0)
Loading

0 comments on commit e1b08c6

Please sign in to comment.