diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml
index 8e314fb..ba727d8 100644
--- a/.github/workflows/linters.yml
+++ b/.github/workflows/linters.yml
@@ -52,7 +52,7 @@ jobs:
changed_files=$(git diff --diff-filter=d --name-only $(git merge-base HEAD origin/master) HEAD | grep '\.py$') || true
echo $changed_files
if [ -n "$changed_files" ]; then
- PYTHONPATH=. mypy $changed_files
+ PYTHONPATH=. mypy $changed_files --install-types --non-interactive --ignore-missing-imports
else
echo "No files changed, passing by"
exit 0
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..68bc17f
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,160 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
diff --git a/.pylintrc b/.pylintrc
index ddcb06b..f300563 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -3,3 +3,9 @@ max-line-length=120
[MESSAGES CONTROL]
disable=E1101,R0913,W0718
+
+[DESIGN]
+max-attributes=10
+
+[CLASS]
+min-public-methods=1
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..18c9147
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..2d53edd
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,51 @@
+# Contributing to Transcriptase
+We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's:
+
+- Reporting a bug
+- Discussing the current state of the code
+- Submitting a fix
+- Proposing new features
+- Becoming a maintainer
+
+## We Develop with GitHub
+We use GitHub to host code, to track issues and feature requests, as well as accept pull requests.
+
+## We Use [Github Flow](https://guides.github.com/introduction/flow/index.html), So All Code Changes Happen Through Pull Requests
+Pull requests are the best way to propose changes to the codebase (we use [Github Flow](https://guides.github.com/introduction/flow/index.html)). We actively welcome your pull requests:
+
+1. Fork the repo and create your branch from `master`.
+2. If you've added code that should be tested, add tests.
+3. Always update the documentation.
+4. Ensure the test suite passes.
+5. Make sure your code lints: pylint, mypy and black code formatter.
+6. Issue that pull request!
+
+## Any contributions you make will be under the MIT Software License
+In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern.
+
+## Report bugs using Github's [issues](https://github.com/wwakabobik/testrail_api_reporter/issues)
+We use GitHub issues to track public bugs. Report a bug by [opening a new issue](); it's that easy!
+
+## Write bug reports with detail, background, and sample code
+[This is an example](http://stackoverflow.com/q/12488905/180626) of a nice bug report, and I think it's not a bad model. Here's [another example from Craig Hockenberry](http://www.openradar.me/11905408), the great app developer.
+
+**Great Bug Reports** tend to have:
+
+- A quick summary and/or background
+- Steps to reproduce
+ - Be specific!
+ - Give sample code if you can. [My stackoverflow question](http://stackoverflow.com/q/12488905/180626) includes sample code that *anyone* with a base R setup can run to reproduce what I was seeing
+- What you expected would happen
+- What actually happens
+- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
+
+People *love* thorough bug reports. I'm not even kidding.
+
+## Use a Consistent Coding Style
+Always use [PEP8](https://peps.python.org/pep-0008/). Always check your code using mypy and pylint, and use black code formatter. If possible, use and apply flake8 extended rules of snowflake plugin.
+
+## License
+By contributing, you agree that your contributions will be licensed under its MIT License.
+
+## References
+This document was adapted from the open-source contribution guidelines for [Hitchhiker's Guide to Python](https://docs.python-guide.org/writing/style/)
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 0000000..ca31eb8
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,17 @@
+# Security Policy
+
+## Supported Versions
+
+Use this section to tell people about which versions of your project are
+currently being supported with security updates.
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.0.1 | :white_check_mark: |
+
+
+## Reporting a Vulnerability
+
+In case of issues, please report to: https://github.com/wwakabobik/testrail_api_reporter/issues
+
+Some ignored/declined issues will be described bellow, please check them prior to create new issues.
diff --git a/__init__.py b/__init__.py
index 90b7e87..e726325 100644
--- a/__init__.py
+++ b/__init__.py
@@ -1,12 +1,4 @@
+""" This module is used to import all the classes and functions from the package """
# Engines
-from .testrail_api_reporter.engines.at_coverage_reporter import ATCoverageReporter
-from .testrail_api_reporter.engines.results_reporter import TestRailResultsReporter
-from .testrail_api_reporter.engines.plotly_reporter import PlotlyReporter
-from .testrail_api_reporter.engines.case_backup import TCBackup
# Publishers
-from .testrail_api_reporter.publishers.confluence_sender import ConfluenceSender
-from .testrail_api_reporter.publishers.email_sender import EmailSender
-from .testrail_api_reporter.publishers.slack_sender import SlackSender
-from .testrail_api_reporter.publishers.gdrive_uploader import GoogleDriveUploader
# Utils
-from .testrail_api_reporter.utils.reporter_utils import upload_image, zip_file, delete_file
diff --git a/__main__.py b/__main__.py
index b38aefd..80aa362 100644
--- a/__main__.py
+++ b/__main__.py
@@ -1,12 +1,4 @@
+""" TestRail API Reporter """
# Engines
-from testrail_api_reporter.engines.at_coverage_reporter import ATCoverageReporter
-from testrail_api_reporter.engines.results_reporter import TestRailResultsReporter
-from testrail_api_reporter.engines.plotly_reporter import PlotlyReporter
-from testrail_api_reporter.engines.case_backup import TCBackup
# Publishers
-from testrail_api_reporter.publishers.confluence_sender import ConfluenceSender
-from testrail_api_reporter.publishers.email_sender import EmailSender
-from testrail_api_reporter.publishers.slack_sender import SlackSender
-from testrail_api_reporter.publishers.gdrive_uploader import GoogleDriveUploader
# Utils
-from testrail_api_reporter.utils.reporter_utils import upload_image, zip_file, delete_file
diff --git a/pull_request_template.md b/pull_request_template.md
new file mode 100644
index 0000000..b91d916
--- /dev/null
+++ b/pull_request_template.md
@@ -0,0 +1,11 @@
+### Code linters/formatters
+- [ ] PyCharm inspection passed on changed files (or any local IDE)
+- [ ] pylint inspection executed and passed on changed files
+- [ ] mypy inspection executed and passed on changed files
+- [ ] Black code formatter executed and passed on changed files
+
+### Documentation
+- [ ] Documentation / README.md has been updated
+
+### Other code changes
+- [ ] The soul is pure
diff --git a/pyproject.toml b/pyproject.toml
index 6ce372c..027097d 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -18,6 +18,12 @@ extend-ignore = """
[tool.pylint]
max-line-length = 120
+[tool.pylint.'DESIGN']
+max-attributes = 15
+
+[tool.pylint.classes]
+min-public-methods = 1
+
[tool.mypy]
[[tool.mypy.overrides]]
diff --git a/requirements.txt b/requirements.txt
index 9275034..74f43f1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,12 @@
requests==2.31.0
xmltodict==0.13.0
-testrail-api==1.12.0
-plotly==5.16.1
+testrail-api==1.12.1
+plotly==5.17.0
psutil==5.9.5
atlassian-python-api==3.41.2
kaleido==0.2.1
httplib2==0.22.0
google-auth-httplib2==0.1.1
google-auth-oauthlib==1.1.0
-google-api-python-client==2.99.0
+google-api-python-client==2.100.0
oauth2client==4.1.3
diff --git a/setup.cfg b/setup.cfg
index d2dfd68..30e4824 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
[flake8]
max-line-length = 120
extend-ignore = Q000,WPS306,I001,I005,WPS229,D400,WPS317,S101,WPS507
+
+[pylint]
+max-attributes=15
+
+[pylint.CLASSES]
+min-public-methods=1
\ No newline at end of file
diff --git a/testrail_api_reporter/__init__.py b/testrail_api_reporter/__init__.py
index c005845..fce936b 100644
--- a/testrail_api_reporter/__init__.py
+++ b/testrail_api_reporter/__init__.py
@@ -1,10 +1,13 @@
+""" testrail_api_reporter package """
# Engines
from .engines.at_coverage_reporter import ATCoverageReporter
from .engines.plotly_reporter import PlotlyReporter
from .engines.results_reporter import TestRailResultsReporter
+
# Publishers
from .publishers.confluence_sender import ConfluenceSender
from .publishers.email_sender import EmailSender
from .publishers.slack_sender import SlackSender
+
# Utils
from .utils.reporter_utils import upload_image, delete_file, zip_file
diff --git a/testrail_api_reporter/engines/__init__.py b/testrail_api_reporter/engines/__init__.py
index 8c2f6d0..78f1681 100644
--- a/testrail_api_reporter/engines/__init__.py
+++ b/testrail_api_reporter/engines/__init__.py
@@ -1,5 +1,5 @@
-# Engines
+""" Engines package """
from .at_coverage_reporter import ATCoverageReporter
-from .results_reporter import TestRailResultsReporter
-from .plotly_reporter import PlotlyReporter
from .case_backup import TCBackup
+from .plotly_reporter import PlotlyReporter
+from .results_reporter import TestRailResultsReporter
diff --git a/testrail_api_reporter/engines/at_coverage_reporter.py b/testrail_api_reporter/engines/at_coverage_reporter.py
index f6125af..699fafb 100644
--- a/testrail_api_reporter/engines/at_coverage_reporter.py
+++ b/testrail_api_reporter/engines/at_coverage_reporter.py
@@ -4,7 +4,7 @@
from ..utils.case_stat import CaseStat
from ..utils.csv_parser import CSVParser
-from ..utils.reporter_utils import format_error
+from ..utils.reporter_utils import format_error, init_get_cases_process
class ATCoverageReporter:
@@ -135,12 +135,7 @@ def __get_all_cases(
"""
project_id = project_id if project_id else self.__project
suite_id = suite_id if suite_id else self.__suite_id
- debug = debug if debug is not None else self.__debug
- cases_list = []
- first_run = True
- criteria = None
- response = None
- retry = 0
+ debug, cases_list, first_run, criteria, response, retry = init_get_cases_process(debug, self.__debug)
while criteria is not None or first_run:
if first_run:
try:
@@ -265,9 +260,9 @@ def automation_state_report(
results[index].set_automated(results[index].get_automated() + 1)
else:
if case[platform["internal_name"]] == platform["na_code"]:
- results[index].set_na(results[index].get_na() + 1)
+ results[index].set_not_applicable(results[index].get_not_applicable() + 1)
results[index].set_not_automated(
- results[index].get_total() - results[index].get_automated() - results[index].get_na()
+ results[index].get_total() - results[index].get_automated() - results[index].get_not_applicable()
)
# save history data
filename = f"{filename_pattern}_{results[index].get_name().replace(' ', '_')}.csv"
diff --git a/testrail_api_reporter/engines/case_backup.py b/testrail_api_reporter/engines/case_backup.py
index d3ca90e..6d06f05 100644
--- a/testrail_api_reporter/engines/case_backup.py
+++ b/testrail_api_reporter/engines/case_backup.py
@@ -1,3 +1,4 @@
+""" TestRails backup module """
import os
from datetime import datetime
@@ -5,8 +6,19 @@
class TCBackup:
- def __init__(self, test_rails_url, test_rails_username, test_rails_password, test_rails_suite,
- cleanup_needed=True, backup_filename='backup.xml', cookie_name='cookie.txt', debug=True):
+ """TestRails backup class"""
+
+ def __init__(
+ self,
+ test_rails_url,
+ test_rails_username,
+ test_rails_password,
+ test_rails_suite,
+ cleanup_needed=True,
+ backup_filename="backup.xml",
+ cookie_name="cookie.txt",
+ debug=True,
+ ):
"""
General init
@@ -39,11 +51,13 @@ def __get_tr_cookie(self):
:return: None
"""
if self.__debug:
- print(f'\nGet cookie {self.__cookie_name} from {self.__url} for {self.__username}')
- os.popen(f'curl -c {self.__cookie_name} '
- f'-H "Content-Type: application/x-www-form-urlencoded" '
- f'-d "name={self.__username}&password={self.__password}" -X POST '
- f'"{self.__url}/index.php?/auth/login"').read()
+ print(f"\nGet cookie {self.__cookie_name} from {self.__url} for {self.__username}")
+ os.popen(
+ f"curl -c {self.__cookie_name} "
+ f'-H "Content-Type: application/x-www-form-urlencoded" '
+ f'-d "name={self.__username}&password={self.__password}" -X POST '
+ f'"{self.__url}/index.php?/auth/login"'
+ ).read()
def __download_tr_xml(self, filename=None, suite=None):
"""
@@ -58,10 +72,10 @@ def __download_tr_xml(self, filename=None, suite=None):
if not suite:
suite = self.__suite
if self.__debug:
- print(f'\nDownload XML {filename} from from {self.__url}')
- os.popen(f'curl -b {self.__cookie_name} '
- f'"{self.__url}/index.php?/suites/export/{suite}" '
- f'--output {filename}').read()
+ print(f"\nDownload XML {filename} from from {self.__url}")
+ os.popen(
+ f"curl -b {self.__cookie_name} " f'"{self.__url}/index.php?/suites/export/{suite}" ' f"--output {filename}"
+ ).read()
return filename
def get_backup(self, filename=None, suite=None):
diff --git a/testrail_api_reporter/engines/plotly_reporter.py b/testrail_api_reporter/engines/plotly_reporter.py
index 1421c43..67f2218 100644
--- a/testrail_api_reporter/engines/plotly_reporter.py
+++ b/testrail_api_reporter/engines/plotly_reporter.py
@@ -1,15 +1,14 @@
+""" Confluence sender module """
import plotly
from ..utils.csv_parser import CSVParser
# Set path to orca for plotly
-plotly.io.orca.config.executable = '/usr/local/bin/orca'
+plotly.io.orca.config.executable = "/usr/local/bin/orca"
class PlotlyReporter:
- """
- Class contains wrapper for generate reports (images) via plot charts
- """
+ """Class contains wrapper for generate reports (images) via plot charts"""
def __init__(self, pr_colors=None, pr_labels=None, ar_colors=None, lines=None, type_platforms=None, debug=True):
"""
@@ -26,14 +25,25 @@ def __init__(self, pr_colors=None, pr_labels=None, ar_colors=None, lines=None, t
if debug:
print("\nPlotly Reporter init")
if not type_platforms:
- raise "Platform types is not provided, Plotly Reporter cannot be initialized!"
+ raise ValueError("Platform types is not provided, Plotly Reporter cannot be initialized!")
self.__debug = debug
- self.__pr_labels = pr_labels if pr_labels else ['Low', 'Medium', 'High', 'Critical']
- self.__pr_colors = pr_colors if pr_colors else ['rgb(173,216,230)', 'rgb(34,139,34)', 'rgb(255,255,51)',
- 'rgb(255, 153, 153)']
- self.__ar_colors = ar_colors if ar_colors else ['rgb(255, 153, 153)', 'rgb(255,255,51)', 'rgb(34,139,34)',
- 'rgb(173,216,230)', 'rgb(65,105,225)', 'rgb(192, 192, 192)']
- self.__lines = lines if lines else ({'color': 'rgb(0,0,51)', 'width': 1.5})
+ self.__pr_labels = pr_labels if pr_labels else ["Low", "Medium", "High", "Critical"]
+ self.__pr_colors = (
+ pr_colors if pr_colors else ["rgb(173,216,230)", "rgb(34,139,34)", "rgb(255,255,51)", "rgb(255, 153, 153)"]
+ )
+ self.__ar_colors = (
+ ar_colors
+ if ar_colors
+ else [
+ "rgb(255, 153, 153)",
+ "rgb(255,255,51)",
+ "rgb(34,139,34)",
+ "rgb(173,216,230)",
+ "rgb(65,105,225)",
+ "rgb(192, 192, 192)",
+ ]
+ )
+ self.__lines = lines if lines else ({"color": "rgb(0,0,51)", "width": 1.5})
self.__type_platforms = type_platforms
def draw_automation_state_report(self, filename=None, reports=None, state_markers=None, debug=None):
@@ -44,10 +54,11 @@ def draw_automation_state_report(self, filename=None, reports=None, state_marker
:param reports: report with stacked distribution, usually it's output of
ATCoverageReporter().automation_state_report()
:param state_markers: list of dicts, contains settings for markers on chart like following:
- {'Automated': {'marker': dict(color='rgb(34,139,34)',
- line=dict(color='rgb(0,0,51)',
- width=1.5)),
- 'opacity': 0.6, 'textposition': 'auto'}
+ {
+ "marker": {"color": "rgb(34,139,34)", "line": {"color": "rgb(0,0,51)", "width": 1.5}},
+ "opacity": 0.6,
+ "textposition": "auto",
+ }
:param debug: debug output is enabled, may be True or False, optional
:return: none
"""
@@ -66,55 +77,70 @@ def draw_automation_state_report(self, filename=None, reports=None, state_marker
axis_x.append(report.get_name())
axis_y_automated.append(report.get_automated())
axis_y_not_automated.append(report.get_not_automated())
- axis_y_na.append(report.get_na())
+ axis_y_na.append(report.get_not_applicable())
if not state_markers:
- state_markers = {'Automated': {'marker': dict(color='rgb(34,139,34)',
- line=dict(color='rgb(0,0,51)', width=1.5)),
- 'opacity': 0.6, 'textposition': 'auto'},
- 'Not automated': {'marker': dict(color='rgb(255, 153, 153)',
- line=dict(color='rgb(0,0,51)', width=1.5)),
- 'opacity': 0.6, 'textposition': 'auto'},
- 'N/A': {'marker': dict(color='rgb(192, 192, 192)',
- line=dict(color='rgb(0,0,51)', width=1.5)),
- 'opacity': 0.6, 'textposition': 'auto'}}
+ state_markers = {
+ "Automated": {
+ "marker": {"color": "rgb(34,139,34)", "line": {"color": "rgb(0,0,51)", "width": 1.5}},
+ "opacity": 0.6,
+ "textposition": "auto",
+ },
+ "Not automated": {
+ "marker": {"color": "rgb(255, 153, 153)", "line": {"color": "rgb(0,0,51)", "width": 1.5}},
+ "opacity": 0.6,
+ "textposition": "auto",
+ },
+ "N/A": {
+ "marker": {"color": "rgb(192, 192, 192)", "line": {"color": "rgb(0,0,51)", "width": 1.5}},
+ "opacity": 0.6,
+ "textposition": "auto",
+ },
+ }
- data.append(plotly.graph_objs.Bar(x=axis_x,
- y=axis_y_automated,
- text=axis_y_automated,
- name='Automated',
- textposition=state_markers['Automated']['textposition'],
- marker=state_markers['Automated']['marker'],
- opacity=state_markers['Automated']['opacity']
- )
- )
- data.append(plotly.graph_objs.Bar(x=axis_x,
- y=axis_y_not_automated,
- text=axis_y_not_automated,
- name='Not automated',
- textposition=state_markers['Not automated']['textposition'],
- marker=state_markers['Not automated']['marker'],
- opacity=state_markers['Not automated']['opacity']
- )
- )
- data.append(plotly.graph_objs.Bar(x=axis_x,
- y=axis_y_na,
- text=axis_y_na,
- name='N/A',
- textposition=state_markers['N/A']['textposition'],
- marker=state_markers['N/A']['marker'],
- opacity=state_markers['N/A']['opacity']
- )
- )
+ data.append(
+ plotly.graph_objs.Bar(
+ x=axis_x,
+ y=axis_y_automated,
+ text=axis_y_automated,
+ name="Automated",
+ textposition=state_markers["Automated"]["textposition"],
+ marker=state_markers["Automated"]["marker"],
+ opacity=state_markers["Automated"]["opacity"],
+ )
+ )
+ data.append(
+ plotly.graph_objs.Bar(
+ x=axis_x,
+ y=axis_y_not_automated,
+ text=axis_y_not_automated,
+ name="Not automated",
+ textposition=state_markers["Not automated"]["textposition"],
+ marker=state_markers["Not automated"]["marker"],
+ opacity=state_markers["Not automated"]["opacity"],
+ )
+ )
+ data.append(
+ plotly.graph_objs.Bar(
+ x=axis_x,
+ y=axis_y_na,
+ text=axis_y_na,
+ name="N/A",
+ textposition=state_markers["N/A"]["textposition"],
+ marker=state_markers["N/A"]["marker"],
+ opacity=state_markers["N/A"]["opacity"],
+ )
+ )
- layout = plotly.graph_objs.Layout(barmode='stack')
+ layout = plotly.graph_objs.Layout(barmode="stack")
if debug:
- print(f'Drawing chart to file {filename}')
+ print(f"Drawing chart to file {filename}")
fig = plotly.graph_objs.Figure(data=data, layout=layout)
plotly.io.write_image(fig, filename)
- def draw_test_case_by_priority(self, filename=None, values=None, pr_labels=None, pr_colors=None,
- lines=None, debug=None):
+ def draw_test_case_by_priority(
+ self, filename=None, values=None, pr_labels=None, pr_colors=None, lines=None, debug=None
+ ):
"""
Generates an image file (png) with priority distribution (pie chart)
@@ -136,22 +162,21 @@ def draw_test_case_by_priority(self, filename=None, values=None, pr_labels=None,
debug = debug if debug is not None else self.__debug
lines = lines if lines else self.__lines
fig = {
- 'data': [
+ "data": [
{
- 'values': values,
- 'labels': pr_labels,
- 'domain': {'column': 0},
- 'name': 'Test cases by priority',
- 'hoverinfo': 'label+percent+name',
- 'textinfo': 'value+percent',
- 'type': 'pie',
- 'marker': {'colors': pr_colors,
- 'line': lines},
+ "values": values,
+ "labels": pr_labels,
+ "domain": {"column": 0},
+ "name": "Test cases by priority",
+ "hoverinfo": "label+percent+name",
+ "textinfo": "value+percent",
+ "type": "pie",
+ "marker": {"colors": pr_colors, "line": lines},
},
]
}
if debug:
- print(f'Drawing chart to file {filename}')
+ print(f"Drawing chart to file {filename}")
plotly.io.write_image(fig, filename)
def draw_test_case_by_area(self, filename=None, cases=None, ar_colors=None, lines=None, debug=None):
@@ -182,72 +207,83 @@ def draw_test_case_by_area(self, filename=None, cases=None, ar_colors=None, line
ar_values.append(case.get_total())
fig = {
- 'data': [
+ "data": [
{
- 'values': ar_values,
- 'labels': ar_labels,
- 'domain': {'column': 0},
- 'name': 'Test cases by area',
- 'hoverinfo': 'label+percent+name',
- 'textinfo': 'value+percent',
- 'type': 'pie',
- 'marker': {
- 'colors': ar_colors,
- 'line': lines},
+ "values": ar_values,
+ "labels": ar_labels,
+ "domain": {"column": 0},
+ "name": "Test cases by area",
+ "hoverinfo": "label+percent+name",
+ "textinfo": "value+percent",
+ "type": "pie",
+ "marker": {"colors": ar_colors, "line": lines},
},
]
}
if debug:
- print(f'Drawing chart to file {filename}')
+ print(f"Drawing chart to file {filename}")
plotly.io.write_image(fig, filename)
- def draw_history_state_chart(self, chart_name: str, history_data=None, filename=None, trace1_decor=None,
- trace2_decor=None, filename_pattern='current_automation', reverse_traces=False,
- debug=None):
+ def draw_history_state_chart(
+ self,
+ chart_name: str,
+ history_data=None,
+ filename=None,
+ trace1_decor=None,
+ trace2_decor=None,
+ filename_pattern="current_automation",
+ reverse_traces=False,
+ debug=None,
+ ):
"""
Generates image file (png) with state distribution (staked line chart)
:param chart_name: chart name, string, required
:param history_data: history data, previously stored in CSV, by default it is CSVParser().load_history_data()
:param filename: output filename for image, png expected, optional
- :param trace1_decor: decoration for distribution stack (1), dict like {'fill': 'tonexty',
- 'line': dict(width=0.5,
- color='rgb(255, 153, 153)')}
- :param trace2_decor: decoration for distribution stack (2), dict like {'fill': 'tozeroy',
- 'line': dict(width=0.5,
- color='rgb(255, 153, 153)')}
+ :param trace1_decor: decoration for distribution stack (1), dict like {"fill": "tonexty",
+ "line": {"width": 0.5,
+ "color": "rgb(255, 153, 153)"},
+ "mode": "none"}
+ :param trace2_decor: decoration for distribution stack (2), dict like {"fill": "tozeroy",
+ "line": {"width": 0.5,
+ "color": "rgb(34,139,34)"},
+ "mode": "none"}
:param filename_pattern: pattern, what is prefix will be for filename, string, optional
:param reverse_traces: reverse traces order
:param debug: debug output is enabled, may be True or False, optional
:return: none
"""
if not chart_name:
- raise "No chart name is provided, report aborted!"
+ raise ValueError("No chart name is provided, report aborted!")
debug = debug if debug is not None else self.__debug
filename = filename if filename else f"{filename_pattern}_{chart_name.replace(' ', '_')}.csv"
- trace1_decor = trace1_decor if trace1_decor else {'fill': 'tonexty',
- 'line': dict(width=0.5, color='rgb(255, 153, 153)'),
- 'mode': 'none'}
- trace2_decor = trace2_decor if trace2_decor else {'fill': 'tozeroy',
- 'line': dict(width=0.5, color='rgb(34,139,34)'),
- 'mode': 'none'}
+ trace1_decor = (
+ trace1_decor
+ if trace1_decor
+ else {"fill": "tonexty", "line": {"width": 0.5, "color": "rgb(255, 153, 153)"}, "mode": "none"}
+ )
+ trace2_decor = (
+ trace2_decor
+ if trace2_decor
+ else {"fill": "tozeroy", "line": {"width": 0.5, "color": "rgb(34,139,34)"}, "mode": "none"}
+ )
history_data = history_data if history_data else CSVParser(debug=debug, filename=filename).load_history_data()
trace1 = plotly.graph_objs.Scatter(
x=history_data[0],
y=history_data[1],
- fill=trace1_decor['fill'],
- name='Total',
- line=trace1_decor['line'],
+ fill=trace1_decor["fill"],
+ name="Total",
+ line=trace1_decor["line"],
)
trace2 = plotly.graph_objs.Scatter(
x=history_data[0],
y=history_data[2],
-
- fill=trace2_decor['fill'],
- name='Automated',
- line=trace2_decor['line'],
+ fill=trace2_decor["fill"],
+ name="Automated",
+ line=trace2_decor["line"],
)
fig = plotly.graph_objs.Figure()
@@ -257,18 +293,24 @@ def draw_history_state_chart(self, chart_name: str, history_data=None, filename=
else:
fig.add_trace(trace1)
fig.add_trace(trace2)
- fig.update_layout(yaxis=dict(nticks=30), autotypenumbers="convert types")
- fig.update_yaxes(range=[0, max([eval(i) for i in history_data[1]])])
+ fig.update_layout(yaxis={"nticks": 30}, autotypenumbers="convert types")
+ fig.update_yaxes(range=[0, max((eval(i) for i in history_data[1]))]) # pylint: disable=eval-used
- filename = f'{filename[:-3]}png'
+ filename = f"{filename[:-3]}png"
if debug:
- print(f'Drawing chart to file {filename}')
+ print(f"Drawing chart to file {filename}")
plotly.io.write_image(fig, filename)
return filename
- def draw_history_type_chart(self, filename=None, type_platforms=None,
- history_filename_pattern='current_area_distribution', ar_colors=None, lines=None,
- debug=None):
+ def draw_history_type_chart(
+ self,
+ filename=None,
+ type_platforms=None,
+ history_filename_pattern="current_area_distribution",
+ ar_colors=None,
+ lines=None,
+ debug=None,
+ ):
"""
Generates an image file (png) with state distribution (staked line chart)
@@ -290,17 +332,19 @@ def draw_history_type_chart(self, filename=None, type_platforms=None,
debug = debug if debug is not None else self.__debug
index = 0
for platform in type_platforms:
- type_name = platform['name']
+ type_name = platform["name"]
history_filename = f"{history_filename_pattern}_{type_name.replace(' ', '_')}.csv"
history_data = CSVParser(debug=debug, filename=history_filename).load_history_data()
- data.append(plotly.graph_objs.Scatter(
- x=history_data[0],
- y=history_data[1],
- name=type_name,
- marker=dict(color=ar_colors[index], line=lines)
- ))
+ data.append(
+ plotly.graph_objs.Scatter(
+ x=history_data[0],
+ y=history_data[1],
+ name=type_name,
+ marker={"color": ar_colors[index], "line": lines},
+ )
+ )
index += 1
- fig = {'data': data}
+ fig = {"data": data}
if debug:
- print(f'Drawing chart to file {filename}')
+ print(f"Drawing chart to file {filename}")
plotly.io.write_image(fig, filename)
diff --git a/testrail_api_reporter/engines/results_reporter.py b/testrail_api_reporter/engines/results_reporter.py
index a1efdcb..63e2b0b 100644
--- a/testrail_api_reporter/engines/results_reporter.py
+++ b/testrail_api_reporter/engines/results_reporter.py
@@ -6,7 +6,7 @@
from testrail_api import TestRailAPI
from xmltodict import parse
-from ..utils.reporter_utils import format_error
+from ..utils.reporter_utils import format_error, init_get_cases_process
class TestRailResultsReporter:
@@ -200,6 +200,15 @@ def __enrich_with_tc_num(self, xml_dict_list, tc_dict_list, debug=None):
print(f"{len(enriched_list)} test results were prepared for send.")
return enriched_list
+ @staticmethod
+ def ___handle_read_timeout(retry, retries, debug, error):
+ if retry < retries:
+ retry += 1
+ if debug:
+ print(f"Timeout error, retrying {retry}/{retries}...")
+ return retry, True
+ raise ValueError(f"Get cases failed. Please validate your settings!nError{format_error(error)}") from error
+
# pylint: disable=R0912
def __get_all_auto_cases(self, retries=3, debug=None):
"""
@@ -209,25 +218,15 @@ def __get_all_auto_cases(self, retries=3, debug=None):
:param debug: debug output is enabled, may be True or False, optional
:return: list of dict with cases
"""
- debug = debug if debug is not None else self.__debug
- cases_list = []
- first_run = True
- criteria = None
- response = None
- retry = 0
+ debug, cases_list, first_run, criteria, response, retry = init_get_cases_process(debug, self.__debug)
while criteria is not None or first_run:
if first_run:
try:
response = self.__api.cases.get_cases(project_id=self.__project_id, suite_id=self.__suite_id)
except ReadTimeout as error:
- if retry < retries:
- retry += 1
- if debug:
- print(f"Timeout error, retrying {retry}/{retries}...")
+ retry, should_continue = self.___handle_read_timeout(retry, retries, debug, error)
+ if should_continue:
continue
- raise ValueError(
- f"Get cases failed. Please validate your settings!\nError{format_error(error)}"
- ) from error
except Exception as error:
print(f"Get cases failed. Please validate your settings!\nError{format_error(error)}")
self.__self_check()
@@ -241,14 +240,9 @@ def __get_all_auto_cases(self, retries=3, debug=None):
project_id=self.__project_id, suite_id=self.__suite_id, offset=offset
)
except ReadTimeout as error:
- if retry < retries:
- retry += 1
- if debug:
- print(f"Timeout error, retrying {retry}/{retries}...")
+ retry, should_continue = self.___handle_read_timeout(retry, retries, debug, error)
+ if should_continue:
continue
- raise ValueError(
- f"Get cases failed. Please validate your settings!\nError{format_error(error)}"
- ) from error
retry = 0
cases = response["cases"]
for item in cases:
@@ -497,6 +491,7 @@ def __search_for_run_by_name(self, title=None):
def __delete_run(self, run_id=None):
"""
Delete run
+
:param run_id: run id, integer
:return: True if deleted, False in case of error
"""
@@ -538,6 +533,7 @@ def __add_run(self, title, cases_list=None, include_all=False, debug=None):
def __add_results(self, run_id=None, results=None):
"""
Add results for test cases to TestRail
+
:param run_id: run id
:param results: payload (list of dicts)
:return: run id or False in case of error
@@ -580,6 +576,7 @@ def __prepare_runs(self, cases=None, title=None, run_id=None, run_name=None, del
def __close_run(self, title=None, run_id=None, debug=None):
"""
Closes run
+
:param title: title of test run
:param run_id: run id, integer
:param debug: debug output is enabled, may be True or False, optional
diff --git a/testrail_api_reporter/publishers/__init__.py b/testrail_api_reporter/publishers/__init__.py
index 32b98e9..3e5ca54 100644
--- a/testrail_api_reporter/publishers/__init__.py
+++ b/testrail_api_reporter/publishers/__init__.py
@@ -1,4 +1,4 @@
-# Publishers
+""" Publishers package. """
from .confluence_sender import ConfluenceSender
from .email_sender import EmailSender
from .gdrive_uploader import GoogleDriveUploader
diff --git a/testrail_api_reporter/publishers/confluence_sender.py b/testrail_api_reporter/publishers/confluence_sender.py
index 05aecbe..117b698 100644
--- a/testrail_api_reporter/publishers/confluence_sender.py
+++ b/testrail_api_reporter/publishers/confluence_sender.py
@@ -1,15 +1,23 @@
+""" Confluence sender module """
from atlassian import Confluence
from ..engines.plotly_reporter import PlotlyReporter
class ConfluenceSender:
- """
- Class contains wrapper for generate and send reports to Confluence
- """
-
- def __init__(self, url=None, username=None, password=None, confluence_page=None, automation_platforms=None,
- type_platforms=None, plotly_engine=None, debug=True):
+ """Class contains wrapper for generate and send reports to Confluence"""
+
+ def __init__(
+ self,
+ url=None,
+ username=None,
+ password=None,
+ confluence_page=None,
+ automation_platforms=None,
+ type_platforms=None,
+ plotly_engine=None,
+ debug=True,
+ ):
"""
General init
@@ -29,15 +37,14 @@ def __init__(self, url=None, username=None, password=None, confluence_page=None,
print("\nConfluence Reporter init")
if url is None or username is None or password is None:
raise ValueError("No confluence credentials are provided!")
- else:
- self.__confluence = Confluence(url=url, username=username, password=password)
+ self.__confluence = Confluence(url=url, username=username, password=password)
self.__confluence_page = confluence_page # confluence page may vary for each report if needed, None is possible
self.__debug = debug
self.__plotly = plotly_engine if plotly_engine else PlotlyReporter(type_platforms=type_platforms, debug=debug)
self.__automation_platforms = automation_platforms # should be passed with specific TestRails sections
self.__type_platforms = type_platforms
- def automation_state(self, confluence_page=None, reports=None, filename='current_automation.png', debug=None):
+ def automation_state(self, confluence_page=None, reports=None, filename="current_automation.png", debug=None):
"""
Generates and sends (attach) an image file (png) to confluence page with staked distribution (bar chart)
with automation type coverage (or similar).
@@ -55,10 +62,11 @@ def automation_state(self, confluence_page=None, reports=None, filename='current
raise ValueError("No TestRail reports are provided, report aborted!")
debug = debug if debug is not None else self.__debug
self.__plotly.draw_automation_state_report(reports=reports, filename=filename, debug=debug)
- self.__confluence.attach_file(filename, page_id=confluence_page, title='current_automation')
+ self.__confluence.attach_file(filename, page_id=confluence_page, title="current_automation")
- def test_case_priority_distribution(self, confluence_page=None, values=None,
- filename='current_priority_distribution.png', debug=None):
+ def test_case_priority_distribution(
+ self, confluence_page=None, values=None, filename="current_priority_distribution.png", debug=None
+ ):
"""
Generates and sends (attach) an image file (png) to confluence page with priority distribution (pie chart)
@@ -75,10 +83,11 @@ def test_case_priority_distribution(self, confluence_page=None, values=None,
raise ValueError("No TestRail reports are provided, report aborted!")
debug = debug if debug is not None else self.__debug
self.__plotly.draw_test_case_by_priority(values=values, filename=filename, debug=debug)
- self.__confluence.attach_file(filename, page_id=confluence_page, title='current_priority_distribution')
+ self.__confluence.attach_file(filename, page_id=confluence_page, title="current_priority_distribution")
- def test_case_area_distribution(self, confluence_page=None, cases=None, filename='current_area_distribution.png',
- debug=None):
+ def test_case_area_distribution(
+ self, confluence_page=None, cases=None, filename="current_area_distribution.png", debug=None
+ ):
"""
Generates and sends (attach) an image file (png) to confluence page with sections distribution (pie chart)
@@ -95,7 +104,7 @@ def test_case_area_distribution(self, confluence_page=None, cases=None, filename
raise ValueError("No TestRail cases are provided, report aborted!")
debug = debug if debug is not None else self.__debug
self.__plotly.draw_test_case_by_area(cases=cases, filename=filename, debug=debug)
- self.__confluence.attach_file(filename, page_id=confluence_page, title='current_area_distribution')
+ self.__confluence.attach_file(filename, page_id=confluence_page, title="current_area_distribution")
def history_state_chart(self, confluence_page=None, automation_platforms=None, debug=None):
"""
@@ -117,11 +126,12 @@ def history_state_chart(self, confluence_page=None, automation_platforms=None, d
for item in automation_platforms:
if debug:
print(f"generating chart for {item['name']}")
- filename = self.__plotly.draw_history_state_chart(debug=debug, chart_name=item['name'])
+ filename = self.__plotly.draw_history_state_chart(debug=debug, chart_name=item["name"])
self.__confluence.attach_file(filename, page_id=confluence_page, title=filename[:-4])
- def history_type_chart(self, confluence_page=None, type_platforms=None,
- filename='current_area_distribution_history.png', debug=None):
+ def history_type_chart(
+ self, confluence_page=None, type_platforms=None, filename="current_area_distribution_history.png", debug=None
+ ):
"""
Generates and sends (attach) an image file (png) to confluence page with state distribution (staked line chart)
@@ -139,10 +149,36 @@ def history_type_chart(self, confluence_page=None, type_platforms=None,
if not type_platforms:
raise ValueError("No type platforms specified, report aborted!")
self.__plotly.draw_history_type_chart(debug=debug, type_platforms=type_platforms, filename=filename)
- self.__confluence.attach_file(filename, page_id=confluence_page, title='current_area_distribution_history')
+ self.__confluence.attach_file(filename, page_id=confluence_page, title="current_area_distribution_history")
+
+ def generate_report(
+ self,
+ confluence_page=None,
+ reports=None,
+ cases=None,
+ values=None,
+ type_platforms=None,
+ automation_platforms=None,
+ debug=None,
+ ):
+ """
+ Generates and sends (attach) an image file (png) to confluence page with state distribution (staked line chart)
- def generate_report(self, confluence_page=None, reports=None, cases=None, values=None, type_platforms=None,
- automation_platforms=None, debug=None):
+ :param confluence_page: confluence page short URL, string - only last part of it (it's id or str), optional
+ :param reports: report with stacked distribution, usually it's output of
+ ATCoverageReporter().automation_state_report()
+ :param cases: list of values to draw report with priority distribution, usually it's output from
+ ATCoverageReporter().test_case_by_type()
+ :param values: list of values to draw report with priority distribution, usually it's output from
+ ATCoverageReporter().test_case_by_priority()
+ :param type_platforms: list of dicts, with sections ids, where dict = {'name': 'UI',
+ 'sections': [16276]}, optional
+ :param automation_platforms: list of dicts of automation platforms, dict = {'name': 'Desktop Chrome',
+ 'internal_name': 'type_id',
+ 'sections': [16276]}, optional
+ :param debug: debug output is enabled, may be True or False, optional
+ :return: none
+ """
confluence_page = confluence_page if confluence_page else self.__confluence_page
if not confluence_page:
raise ValueError("No confluence page is provided, report aborted!")
diff --git a/testrail_api_reporter/publishers/email_sender.py b/testrail_api_reporter/publishers/email_sender.py
index 3eaddd2..002201c 100644
--- a/testrail_api_reporter/publishers/email_sender.py
+++ b/testrail_api_reporter/publishers/email_sender.py
@@ -1,3 +1,4 @@
+""" Email sender module """
import base64
import os
import smtplib
@@ -10,10 +11,12 @@
from apiclient import discovery
from oauth2client import client, tools, file
-from ..utils.reporter_utils import format_error
+from ..utils.reporter_utils import format_error, check_captions_and_files
class EmailSender:
+ """Email sender class"""
+
def __init__(self, email=None, password=None, server_smtp=None, server_port=None, gmail_token=None, debug=True):
"""
General init
@@ -30,13 +33,13 @@ def __init__(self, email=None, password=None, server_smtp=None, server_port=None
self.__debug = debug
self.__method = None
if email is not None and password is not None and server_smtp is not None and server_port is not None:
- self.__method = 'regular'
+ self.__method = "regular"
elif gmail_token and email:
- gmail_token = f'{os.getcwd()}/{gmail_token}' if not os.path.exists(gmail_token) else gmail_token
+ gmail_token = f"{os.getcwd()}/{gmail_token}" if not os.path.exists(gmail_token) else gmail_token
if os.path.exists(gmail_token):
- self.__method = 'gmail'
- self.__gmail_scopes = 'https://www.googleapis.com/auth/gmail.send'
- self.__gmail_app_name = 'Gmail API Python Send Email'
+ self.__method = "gmail"
+ self.__gmail_scopes = "https://www.googleapis.com/auth/gmail.send"
+ self.__gmail_app_name = "Gmail API Python Send Email"
if not self.__method:
raise ValueError("No email credentials are provided, aborted!")
self.__email = email
@@ -45,9 +48,19 @@ def __init__(self, email=None, password=None, server_smtp=None, server_port=None
self.__server_port = server_port
self.__gmail_token = gmail_token
- def send_message(self, files=None, captions=None, image_width="400px", title=None, timestamp=None, recipients=None,
- method=None, custom_message=None,
- custom_folder=os.path.join(os.path.expanduser('~'), '.credentials'), debug=None):
+ def send_message( # pylint: disable=too-many-branches
+ self,
+ files=None,
+ captions=None,
+ image_width="400px",
+ title=None,
+ timestamp=None,
+ recipients=None,
+ method=None,
+ custom_message=None,
+ custom_folder=os.path.join(os.path.expanduser("~"), ".credentials"),
+ debug=None,
+ ):
"""
Send email to recipients with report (with attached images)
@@ -73,31 +86,32 @@ def send_message(self, files=None, captions=None, image_width="400px", title=Non
elif not isinstance(recipients, list) and not custom_message:
raise ValueError("Wrong list of recipients is provided, aborted!")
debug = debug if debug is not None else self.__debug
- if not isinstance(captions, list) or custom_message:
- if debug:
- print("Caption list is empty, no legend will be displayed")
- captions = None
- elif len(captions) != len(files):
+ captions = check_captions_and_files(captions=captions, files=files, debug=debug)
+ if not captions or custom_message:
if debug:
- print(f"Caption and file lists are not the same length {len(captions)} != {len(files)} thus "
- f"no legend will be displayed")
- captions = None
+ print("Caption list override by custom message, no legend will be displayed")
timestamp = timestamp if timestamp else datetime.now().strftime("%Y-%m-%d")
title = title if title else f"Test development & automation coverage report for {timestamp}"
# Connect and send message
if not custom_message:
- message = self.__prepare_payload(files=files, captions=captions, image_width=image_width, title=title,
- recipients=recipients, method=method)
+ message = self.__prepare_payload(
+ files=files,
+ captions=captions,
+ image_width=image_width,
+ title=title,
+ recipients=recipients,
+ method=method,
+ )
else:
if debug:
print("Ignoring payload preparations, assuming user custom message is right")
message = custom_message
- if method == 'regular':
+ if method == "regular":
connection = self.__connect_to_server()
self.__send_to_server(connection=connection, recipients=recipients, message=message)
self.__disconnect_from_server(connection=connection)
- elif method == 'gmail':
+ elif method == "gmail":
self.__gmail_send_message(message=message, custom_folder=custom_folder)
if debug:
print("Email sent!")
@@ -109,14 +123,14 @@ def __connect_to_server(self):
:return: connection handle ( smtplib.SMTP )
"""
if self.__debug:
- print(f'Connecting to custom mail server {self.__server_smtp}:{self.__server_port} using {self.__email}')
+ print(f"Connecting to custom mail server {self.__server_smtp}:{self.__server_port} using {self.__email}")
try:
connection = smtplib.SMTP(self.__server_smtp, self.__server_port)
connection.ehlo()
connection.starttls()
connection.login(self.__email, self.__password)
- except Exception as e:
- raise ValueError(f"Can't login to mail!\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Can't login to mail!\nError{format_error(error)}") from error
return connection
def __send_to_server(self, connection, recipients, message):
@@ -129,11 +143,11 @@ def __send_to_server(self, connection, recipients, message):
:return: none
"""
if self.__debug:
- print(f'Sending mail from {self.__email} to {recipients}')
+ print(f"Sending mail from {self.__email} to {recipients}")
try:
connection.sendmail(from_addr=self.__email, to_addrs=recipients, msg=message.as_string())
- except Exception as e:
- raise ValueError(f"Can't send mail!\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Can't send mail!\nError{format_error(error)}") from error
def __disconnect_from_server(self, connection):
"""
@@ -143,11 +157,11 @@ def __disconnect_from_server(self, connection):
:return: none
"""
if self.__debug:
- print(f'Disconnecting from custom server')
+ print("Disconnecting from custom server")
try:
connection.quit()
- except Exception as e:
- raise ValueError(f"Can't close connection!\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Can't close connection!\nError{format_error(error)}") from error
def __prepare_payload(self, files, image_width, title, recipients, captions=None, method=None):
"""
@@ -161,32 +175,36 @@ def __prepare_payload(self, files, image_width, title, recipients, captions=None
:param method: specify which method is used to set proper MIMEMultipart type ('gmail' or not)
:return: formatted multipart message
"""
- message = MIMEMultipart("alternative") if method != 'gmail' else MIMEMultipart()
+ message = MIMEMultipart("alternative") if method != "gmail" else MIMEMultipart()
message["Subject"] = title
message["From"] = self.__email
message["To"] = ", ".join(recipients)
- html = '
' \
- f'{title}' \
- f'{title}' \
- '
'
+ html = (
+ ''
+ f"{title}"
+ f'{title}'
+ ''
+ )
for j, val in enumerate(files):
- with open('{}'.format(val), "rb") as attachment:
+ with open(f"{val}", "rb") as attachment:
mime_image = MIMEImage(attachment.read())
# Define the image's ID with counter as you will reference it.
- mime_image.add_header('Content-ID', f'')
- mime_image.add_header('Content-Disposition', f'attachment; filename= {val}')
+ mime_image.add_header("Content-ID", f"")
+ mime_image.add_header("Content-Disposition", f"attachment; filename= {val}")
message.attach(mime_image)
# add to body
if captions:
html = f'{html}{captions[j]} '
- html = f'{html}> '
- html = f'{html} | '
+ html = (
+ f'{html} | >
'
+ )
+ html = f"{html}
"
message.attach(MIMEText(html, "html"))
return message
- def __gmail_get_credential_path(self, custom_folder=os.path.join(os.path.expanduser('~'), '.credentials')):
+ def __gmail_get_credential_path(self, custom_folder=os.path.join(os.path.expanduser("~"), ".credentials")):
"""
Service function target Google OAuth credentials path to storage
@@ -199,13 +217,13 @@ def __gmail_get_credential_path(self, custom_folder=os.path.join(os.path.expandu
if self.__debug:
print(f"No credential directory found, creating new one here: {custom_folder}")
os.makedirs(custom_folder, exist_ok=True)
- except OSError as e:
+ except OSError as error:
if self.__debug:
- print(f"Original Error{format_error(e)}")
- credential_path = os.path.join(custom_folder, 'gmail-python-email-send.json')
+ print(f"Original Error{format_error(error)}")
+ credential_path = os.path.join(custom_folder, "gmail-python-email-send.json")
return credential_path
- def __gmail_get_credentials(self, custom_folder=os.path.join(os.path.expanduser('~'), '.credentials')):
+ def __gmail_get_credentials(self, custom_folder=os.path.join(os.path.expanduser("~"), ".credentials")):
"""
Service function to get and convert Google OAuth credential from client_id and client_secret
@@ -217,27 +235,31 @@ def __gmail_get_credentials(self, custom_folder=os.path.join(os.path.expanduser(
print(f"Obtaining GMail credentials from {credential_path}")
try:
store = file.Storage(credential_path)
- except Exception as e:
- raise ValueError(f"Couldn't open storage\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Couldn't open storage\nError{format_error(error)}") from error
try:
credentials = store.get()
- except Exception as e:
- raise ValueError(f"Obtaining of credentials unexpectedly failed\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Obtaining of credentials unexpectedly failed\nError{format_error(error)}") from error
if not credentials or credentials.invalid:
try:
flow = client.flow_from_clientsecrets(self.__gmail_token, self.__gmail_scopes)
- except Exception as e:
- raise ValueError(f"Couldn't obtain new client secrets from Google OAuth\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(
+ f"Couldn't obtain new client secrets from Google OAuth\nError{format_error(error)}"
+ ) from error
flow.user_agent = self.__gmail_app_name
try:
credentials = tools.run_flow(flow, store)
- except Exception as e:
- raise ValueError(f"Couldn't obtain new credential from Google OAuth\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(
+ f"Couldn't obtain new credential from Google OAuth\nError{format_error(error)}"
+ ) from error
if self.__debug:
- print('Credentials stored to ' + credential_path)
+ print("Credentials stored to " + credential_path)
return credentials
- def __gmail_send_message(self, message, custom_folder=os.path.join(os.path.expanduser('~'), '.credentials')):
+ def __gmail_send_message(self, message, custom_folder=os.path.join(os.path.expanduser("~"), ".credentials")):
"""
Send Email via GMail
@@ -250,17 +272,17 @@ def __gmail_send_message(self, message, custom_folder=os.path.join(os.path.expan
credentials = self.__gmail_get_credentials(custom_folder=custom_folder)
try:
http = credentials.authorize(httplib2.Http())
- except Exception as e:
- raise ValueError(f"Can't authorize via Google OAuth\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Can't authorize via Google OAuth\nError{format_error(error)}") from error
try:
- service = discovery.build('gmail', 'v1', http=http)
- except Exception as e:
- raise ValueError(f"Can't build service for Google OAuth\nError{format_error(e)}")
+ service = discovery.build("gmail", "v1", http=http)
+ except Exception as error:
+ raise ValueError(f"Can't build service for Google OAuth\nError{format_error(error)}") from error
try:
raw = base64.urlsafe_b64encode(message.as_bytes()).decode()
- except Exception as e:
- raise ValueError(f"Can't convert payload to base64\nError{format_error(e)}")
- self.__gmail_send_message_internal(service, self.__email, {'raw': raw})
+ except Exception as error:
+ raise ValueError(f"Can't convert payload to base64\nError{format_error(error)}") from error
+ self.__gmail_send_message_internal(service, self.__email, {"raw": raw})
def __gmail_send_message_internal(self, service, user_id, message):
"""
@@ -272,9 +294,9 @@ def __gmail_send_message_internal(self, service, user_id, message):
:return: message
"""
try:
- message = (service.users().messages().send(userId=user_id, body=message).execute())
+ message = service.users().messages().send(userId=user_id, body=message).execute()
if self.__debug:
print(f'Message sent with Id: "{message["id"]}"!')
return message
- except Exception as e:
- raise ValueError(f"Can't send mail via GMail!\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Can't send mail via GMail!\nError{format_error(error)}") from error
diff --git a/testrail_api_reporter/publishers/gdrive_uploader.py b/testrail_api_reporter/publishers/gdrive_uploader.py
index 944dde2..b271bdb 100644
--- a/testrail_api_reporter/publishers/gdrive_uploader.py
+++ b/testrail_api_reporter/publishers/gdrive_uploader.py
@@ -1,3 +1,4 @@
+""" Google Drive uploader module """
import json
import os
@@ -5,12 +6,22 @@
class GoogleDriveUploader:
+ """Google Drive uploader class"""
+
# Google token needs to be configured firstly, to do it, you have to visit:
# https://console.developers.google.com/apis/credentials?pli=1
# Create Credentials => OAuth client ID => TV and limited Input Devices and get client_id and a client_secret
# Then pass it as google_id = client_id and google_secret = client_secret
- def __init__(self, google_id, google_secret, google_api_refresh_token=None, cleanup_needed=True,
- backup_filename='backup.zip', mime_type='application/zip', debug=True):
+ def __init__(
+ self,
+ google_id,
+ google_secret,
+ google_api_refresh_token=None,
+ cleanup_needed=True,
+ backup_filename="backup.zip",
+ mime_type="application/zip",
+ debug=True,
+ ):
"""
General init
@@ -34,7 +45,7 @@ def __init__(self, google_id, google_secret, google_api_refresh_token=None, clea
self.__backup_filename = backup_filename
self.__mime_type = mime_type
- if not google_api_refresh_token or google_api_refresh_token == '':
+ if not google_api_refresh_token or google_api_refresh_token == "":
self.__g_token, self.__g_refresh_token = self.__first_run()
else:
self.__g_refresh_token = google_api_refresh_token
@@ -47,12 +58,16 @@ def __get_new_device_codes(self):
"""
if self.__debug:
print("Get temporary Device ID and user code from Google Auth engine")
- response = json.loads(os.popen(f'curl '
- f'-d "client_id={self.__g_id}&scope=https://www.googleapis.com/auth/drive.file"'
- f' https://oauth2.googleapis.com/device/code').read())
+ response = json.loads(
+ os.popen(
+ f"curl "
+ f'-d "client_id={self.__g_id}&scope=https://www.googleapis.com/auth/drive.file"'
+ f" https://oauth2.googleapis.com/device/code"
+ ).read()
+ )
if self.__debug:
print(response)
- return response['device_code'], response['user_code'], response['verification_url']
+ return response["device_code"], response["user_code"], response["verification_url"]
def __get_new_oauth_token(self, device_code):
"""
@@ -63,11 +78,15 @@ def __get_new_oauth_token(self, device_code):
"""
if self.__debug:
print("Get OAuth token from google using Device ID")
- response = json.loads(os.popen(f'curl -d client_id={self.__g_id} -d client_secret={self.__g_secret} '
- f'-d device_code={device_code} '
- f'-d grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Adevice_code '
- f'https://accounts.google.com/o/oauth2/token').read())
- return response['access_token'], response['refresh_token']
+ response = json.loads(
+ os.popen(
+ f"curl -d client_id={self.__g_id} -d client_secret={self.__g_secret} "
+ f"-d device_code={device_code} "
+ f"-d grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Adevice_code "
+ f"https://accounts.google.com/o/oauth2/token"
+ ).read()
+ )
+ return response["access_token"], response["refresh_token"]
def __refresh_token(self):
"""
@@ -78,11 +97,15 @@ def __refresh_token(self):
"""
if self.__debug:
print("Google OAuth token needs to be refreshed, so, let's do this")
- response = json.loads(os.popen(f'curl -d client_id={self.__g_id} -d client_secret={self.__g_secret} '
- f'-d refresh_token={self.__g_refresh_token} '
- f'-d grant_type=refresh_token '
- f'https://accounts.google.com/o/oauth2/token').read())
- self.__g_token = response['access_token']
+ response = json.loads(
+ os.popen(
+ f"curl -d client_id={self.__g_id} -d client_secret={self.__g_secret} "
+ f"-d refresh_token={self.__g_refresh_token} "
+ f"-d grant_type=refresh_token "
+ f"https://accounts.google.com/o/oauth2/token"
+ ).read()
+ )
+ self.__g_token = response["access_token"]
return self.__g_token
def __first_run(self):
@@ -100,18 +123,22 @@ def __first_run(self):
print(f"Please fill device code {user_code} into web browser URL: {url}")
input("When your code will be submitted and account, press enter")
- print("Now you must ensure that your access rights are granted for this device! Proceed to:\n"
- "https://console.developers.google.com/apis/api/drive.googleapis.com/overview\n"
- "and open Credentials tab, now confirm OAuth API permissions for this device.\n"
- "After submit please wait at least 5 minutes.")
+ print(
+ "Now you must ensure that your access rights are granted for this device! Proceed to:\n"
+ "https://console.developers.google.com/apis/api/drive.googleapis.com/overview\n"
+ "and open Credentials tab, now confirm OAuth API permissions for this device.\n"
+ "After submit please wait at least 5 minutes."
+ )
input("When 5 minutes passed, press any enter")
access_token, refresh_token = self.__get_new_oauth_token(device_code=device_code)
- print(f"Your access token is:\n{access_token}\nYour refresh token is:\n{refresh_token}\n"
- f"Please save these credentials secure!\nYour access token will be valid for an 1 hour. "
- f"If you plan use it in advance, you need to refresh it every hour or before use any time. \n"
- f"Next time init this class with your refresh token to update access token automatically.")
+ print(
+ f"Your access token is:\n{access_token}\nYour refresh token is:\n{refresh_token}\n"
+ f"Please save these credentials secure!\nYour access token will be valid for an 1 hour. "
+ f"If you plan use it in advance, you need to refresh it every hour or before use any time. \n"
+ f"Next time init this class with your refresh token to update access token automatically."
+ )
return access_token, refresh_token
@@ -127,16 +154,20 @@ def __upload_to_gdrive(self, filename=None, mime_type=None):
if not mime_type:
mime_type = self.__mime_type
if self.__debug:
- print(f'Uploading {filename} to GoogleDrive')
- response = json.loads(os.popen(f'curl -X POST -L -H "Authorization: Bearer {self.__g_token}" '
- f'-F "metadata={{name :\'{filename.split(".")[0]}\'}};'
- f'type=application/json;charset=UTF-8" '
- f'-F "file=@{filename};type={mime_type}" '
- f'"https://www.googleapis.com/upload/drive/v3/'
- f'files?uploadType=multipart"').read())
- if response['id']:
+ print(f"Uploading {filename} to GoogleDrive")
+ response = json.loads(
+ os.popen(
+ f'curl -X POST -L -H "Authorization: Bearer {self.__g_token}" '
+ f'-F "metadata={{name :\'{filename.split(".")[0]}\'}};'
+ f'type=application/json;charset=UTF-8" '
+ f'-F "file=@{filename};type={mime_type}" '
+ f'"https://www.googleapis.com/upload/drive/v3/'
+ f'files?uploadType=multipart"'
+ ).read()
+ )
+ if response["id"]:
if self.__debug:
- print(f'Backup archive {filename} was uploaded to Google Drive')
+ print(f"Backup archive {filename} was uploaded to Google Drive")
else:
print("Something wrong, please check backup manually or re-run")
diff --git a/testrail_api_reporter/publishers/slack_sender.py b/testrail_api_reporter/publishers/slack_sender.py
index c80bfe6..d968b1b 100644
--- a/testrail_api_reporter/publishers/slack_sender.py
+++ b/testrail_api_reporter/publishers/slack_sender.py
@@ -1,12 +1,13 @@
+""" Slack sender module """
import json
import requests
-from ..utils.reporter_utils import format_error
+from ..utils.reporter_utils import format_error, check_captions_and_files
class SlackSender:
- """ See for details https://api.slack.com/messaging/webhooks """
+ """Slack sender class, see for details https://api.slack.com/messaging/webhooks"""
def __init__(self, hook_url=None, timeout=5, verify=True, debug=True):
"""
@@ -37,17 +38,25 @@ def __prepare_attachments(files, captions):
"""
legacy_attachments = []
for j, file in enumerate(files):
- legacy_attachments.append({
- 'pretext': '----',
- 'text': captions[j] if captions else '',
- 'mrkdwn_in': ['text', 'pretext'],
- "image_url": file,
- })
+ legacy_attachments.append(
+ {
+ "pretext": "----",
+ "text": captions[j] if captions else "",
+ "mrkdwn_in": ["text", "pretext"],
+ "image_url": file,
+ }
+ )
return legacy_attachments
@staticmethod
def __prepare_blocks(title):
- return [{'type': 'header', 'text': {'type': 'plain_text', 'text': title, 'emoji': True}}]
+ """
+ Prepares blocks
+
+ :param title: header title of message
+ :return: list of dict with blocks info
+ """
+ return [{"type": "header", "text": {"type": "plain_text", "text": title, "emoji": True}}]
def __prepare_payload(self, title, files, captions):
"""
@@ -58,8 +67,12 @@ def __prepare_payload(self, title, files, captions):
:param captions: list of captions for files, list of strings, if not provided, no captions will be added
:return: json with payload
"""
- return json.dumps({'attachments': self.__prepare_attachments(files=files, captions=captions),
- 'blocks': self.__prepare_blocks(title=title)})
+ return json.dumps(
+ {
+ "attachments": self.__prepare_attachments(files=files, captions=captions),
+ "blocks": self.__prepare_blocks(title=title),
+ }
+ )
@staticmethod
def __prepare_headers():
@@ -68,10 +81,11 @@ def __prepare_headers():
:return: json with headers
"""
- return {'Content-type': 'application/json', 'Accept': 'text/plain'}
+ return {"Content-type": "application/json", "Accept": "text/plain"}
- def send_message(self, files=None, captions=None, title=f"Test development & automation coverage report",
- debug=None):
+ def send_message(
+ self, files=None, captions=None, title="Test development & automation coverage report", debug=None
+ ):
"""
Send message to Slack
@@ -85,25 +99,22 @@ def send_message(self, files=None, captions=None, title=f"Test development & aut
if not isinstance(files, list):
raise ValueError("No file list for report provided, aborted!")
debug = debug if debug is not None else self.__debug
- if not isinstance(captions, list):
- if debug:
- print("Caption list is empty, no legend will be displayed")
- captions = None
- elif len(captions) != len(files):
- if debug:
- print(f"Caption and file lists are not the same length {len(captions)} != {len(files)} thus "
- f"no legend will be displayed")
- captions = None
-
+ captions = check_captions_and_files(captions=captions, files=files, debug=debug)
# Send to slack
try:
- response = requests.post(url=self.__hook_url,
- data=self.__prepare_payload(title=title, files=files, captions=captions),
- timeout=self.__timeout, verify=self.__verify, headers=self.__prepare_headers())
+ response = requests.post(
+ url=self.__hook_url,
+ data=self.__prepare_payload(title=title, files=files, captions=captions),
+ timeout=self.__timeout,
+ verify=self.__verify,
+ headers=self.__prepare_headers(),
+ )
if response.status_code != 200:
- raise ValueError(f"Message can't be sent! Error {response.status_code}: {response.text}: "
- f"{response.raise_for_status()}")
- elif debug:
+ raise ValueError(
+ f"Message can't be sent! Error {response.status_code}: {response.text}: "
+ f"{response.raise_for_status()}"
+ )
+ if debug:
print("Message sent!")
- except Exception as e:
- raise ValueError(f"Message can't be sent!\nError{format_error(e)}")
+ except Exception as error:
+ raise ValueError(f"Message can't be sent!\nError{format_error(error)}") from error
diff --git a/testrail_api_reporter/utils/__init__.py b/testrail_api_reporter/utils/__init__.py
index 07faa24..0007adf 100644
--- a/testrail_api_reporter/utils/__init__.py
+++ b/testrail_api_reporter/utils/__init__.py
@@ -1 +1,2 @@
+""" Utils for testrail_api_reporter """
from .reporter_utils import upload_image
diff --git a/testrail_api_reporter/utils/case_stat.py b/testrail_api_reporter/utils/case_stat.py
index 00d776b..6ea9684 100644
--- a/testrail_api_reporter/utils/case_stat.py
+++ b/testrail_api_reporter/utils/case_stat.py
@@ -1,43 +1,101 @@
+""" CaseStat class """
+
+
class CaseStat:
"""
Placeholder class for automation statistics
"""
def __init__(self, name):
+ """
+ Constructor
+
+ :param name: name of the test case
+ """
self.name = name
self.total = 0
self.automated = 0
self.not_automated = 0
- self.na = 0
+ self.not_applicable = 0
# getters
def get_name(self):
+ """
+ Returns the name of the test case
+
+ :return: name of the test case
+ """
return self.name
def get_total(self):
+ """
+ Returns the total number of test cases
+
+ :return: total number of test cases
+ """
return self.total
def get_automated(self):
+ """
+ Returns the number of automated test cases
+
+ :return: number of automated test cases
+ """
return self.automated
def get_not_automated(self):
+ """
+ Returns the number of not automated test cases
+
+ :return: number of not automated test cases
+ """
return self.not_automated
- def get_na(self):
- return self.na
+ def get_not_applicable(self):
+ """
+ Returns the number of not applicable test cases
+
+ :return: number of not applicable test cases
+ """
+ return self.not_applicable
# setters
def set_name(self, name):
+ """
+ Sets the name of the test case
+
+ :param name: name of the test case
+ """
self.name = name
def set_total(self, total):
+ """
+ Sets the total number of test cases
+
+ :param total: total number of test cases
+ """
self.total = total
def set_automated(self, automated):
+ """
+ Sets the number of automated test cases
+
+ :param automated: number of automated test cases
+ """
self.automated = automated
def set_not_automated(self, not_automated):
+ """
+ Sets the number of not automated test cases
+
+ :param not_automated: number of not automated test cases
+ """
self.not_automated = not_automated
- def set_na(self, na):
- self.na = na
+ def set_not_applicable(self, not_applicable):
+ """
+ Sets the number of not applicable test cases
+
+ :param not_applicable: number of not applicable test cases
+ """
+ self.not_applicable = not_applicable
diff --git a/testrail_api_reporter/utils/csv_parser.py b/testrail_api_reporter/utils/csv_parser.py
index 38af639..7c0b4ca 100644
--- a/testrail_api_reporter/utils/csv_parser.py
+++ b/testrail_api_reporter/utils/csv_parser.py
@@ -1,10 +1,11 @@
+""" CSV parser for TestRail API Reporter """
import csv
from datetime import datetime
from os.path import exists
class CSVParser:
- """ Parser for CSV files """
+ """Parser for CSV files"""
def __init__(self, filename=None, debug=True):
"""
@@ -33,33 +34,37 @@ def save_history_data(self, filename=None, report=None, debug=None):
raise ValueError("Filename for save report data is not provided, save history data aborted!")
if not report:
raise ValueError("Report couldn't be found, save history data aborted!")
- date = datetime.today().strftime('%Y-%m-%d')
- last_date = ''
- mode = 'r' if exists(filename) else 'w'
+ date = datetime.today().strftime("%Y-%m-%d")
+ last_date = ""
+ mode = "r" if exists(filename) else "w"
try:
- with open(filename, mode) as csvfile:
- if mode == 'r':
+ with open(filename, mode, encoding="utf-8") as csvfile:
+ if mode == "r":
for row in reversed(list(csv.reader(csvfile))):
- last_date = '{0}-{1}-{2}'.format(row[0], row[1], row[2])
+ last_date = f"{row[0]}-{row[1]}-{row[2]}"
break
except FileNotFoundError:
- raise ValueError("Can't open report file, save history data aborted!")
+ raise ValueError("Can't open report file, save history data aborted!") from FileNotFoundError
if last_date != date:
if debug:
- print('Saving data in {0} for {1}'.format(filename, date))
- with open(filename, 'a+', newline='') as csvfile:
- writer = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
+ print(f"Saving data in {filename} for {date}")
+ with open(filename, "a+", newline="", encoding="utf-8") as csvfile:
+ writer = csv.writer(csvfile, delimiter=",", quotechar="|", quoting=csv.QUOTE_MINIMAL)
- writer.writerow([datetime.today().strftime('%Y'),
- datetime.today().strftime('%m'),
- datetime.today().strftime('%d'),
- report.get_total(),
- report.get_automated(),
- report.get_not_automated(),
- report.get_na()])
+ writer.writerow(
+ [
+ datetime.today().strftime("%Y"),
+ datetime.today().strftime("%m"),
+ datetime.today().strftime("%d"),
+ report.get_total(),
+ report.get_automated(),
+ report.get_not_automated(),
+ report.get_not_applicable(),
+ ]
+ )
else:
if debug:
- print('Data already stored for today, skipping save')
+ print("Data already stored for today, skipping save")
def load_history_data(self, filename=None, debug=None):
"""
@@ -79,15 +84,15 @@ def load_history_data(self, filename=None, debug=None):
not_automated = []
nas = []
if debug:
- print('Loading history data from {}'.format(filename))
+ print(f"Loading history data from {filename}")
try:
- with open(filename, 'r') as csvfile:
- for row in (csv.reader(csvfile)):
+ with open(filename, "r", encoding="utf-8") as csvfile:
+ for row in csv.reader(csvfile):
timestamps.append(datetime(year=int(row[0]), month=int(row[1]), day=int(row[2])))
totals.append(row[3])
automated.append(row[4])
not_automated.append(row[5])
nas.append(row[6])
except FileNotFoundError:
- raise ValueError("Can't open report file, load history data aborted!")
+ raise ValueError("Can't open report file, load history data aborted!") from FileNotFoundError
return [timestamps, totals, automated, not_automated, nas]
diff --git a/testrail_api_reporter/utils/reporter_utils.py b/testrail_api_reporter/utils/reporter_utils.py
index 07233a9..782109b 100644
--- a/testrail_api_reporter/utils/reporter_utils.py
+++ b/testrail_api_reporter/utils/reporter_utils.py
@@ -1,3 +1,4 @@
+""" This module contains service functions for reporter """
import os
import requests
@@ -10,10 +11,10 @@ def format_error(error):
:param error: initial error
:return: formatted string with error details
"""
- err_msg = ''
+ err_msg = ""
error = error if isinstance(error, list) else [error]
for err in error:
- err_msg = f'{err_msg} : {err}'
+ err_msg = f"{err_msg} : {err}"
return err_msg
@@ -25,29 +26,84 @@ def upload_image(filename, api_token):
:param api_token: unique API token for image upload on https://freeimage.host
:return: dict with urls with image itself and its thumbnail
"""
- payload = {
- 'type': 'file',
- 'action': 'upload',
- 'key': api_token
+ payload = {"type": "file", "action": "upload", "key": api_token}
+ with open(filename, "rb", encoding="utf-8") as source_file:
+ files = {"source": source_file}
+ response = requests.post(
+ url="https://freeimage.host/api/1/upload", data=payload, timeout=5, verify=True, files=files
+ )
+ return {
+ "image": response.json()["image"]["file"]["resource"]["chain"]["image"],
+ "thumb": response.json()["image"]["file"]["resource"]["chain"]["thumb"],
}
- files = {'source': open(filename, 'rb')}
- response = requests.post(url='https://freeimage.host/api/1/upload',
- data=payload, timeout=5, verify=True, files=files)
- return {'image': response.json()['image']['file']['resource']['chain']['image'],
- 'thumb': response.json()['image']['file']['resource']['chain']['thumb']}
def delete_file(filename, debug=True):
- os.popen(f'rm {filename}').read()
+ """
+ Service function to delete file from filesystem
+
+ :param filename: filename or path to file, which should be deleted
+ :param debug: debug output is enabled, may be True or False, optional, by default is True
+ """
+ os.popen(f"rm {filename}").read()
if debug:
- print(f'Removed {filename}')
+ print(f"Removed {filename}")
def zip_file(filename, suffix=None, debug=True):
+ """
+ Service function to ZIP file
+
+ :param filename: filename or path to file, which should be zipped
+ :param suffix: suffix for zipped file, optional
+ :param debug: debug output is enabled, may be True or False, optional, by default is True
+ :return: zipped filename
+ """
if suffix is None:
- suffix = ''
+ suffix = ""
zipped_file = f'{filename.split(".")[0]}{suffix}.zip'
- os.popen(f'zip -r {zipped_file} {filename}').read()
+ os.popen(f"zip -r {zipped_file} {filename}").read()
if debug:
- print(f'ZIPped {filename} to {zipped_file}')
+ print(f"ZIPped {filename} to {zipped_file}")
return zipped_file
+
+
+def check_captions_and_files(captions, files, debug):
+ """
+ Service function to check captions and files lists
+
+ :param captions: list of captions for files, list of strings, if not provided, no captions will be added
+ :param files: list of urls of images
+ :param debug: debug output is enabled, may be True or False, optional
+ :return: captions list or None
+ """
+ return_value = captions
+ if not isinstance(captions, list):
+ if debug:
+ print("Caption list is empty, no legend will be displayed")
+ return_value = None
+ elif len(captions) != len(files):
+ if debug:
+ print(
+ f"Caption and file lists are not the same length {len(captions)} != {len(files)} thus "
+ f"no legend will be displayed"
+ )
+ return_value = None
+ return return_value
+
+
+def init_get_cases_process(debug, default_debug):
+ """
+ Service function to initialize process
+
+ :param debug: debug output is enabled, may be True or False, optional
+ :param default_debug: default debug output is enabled, may be True or False, optional
+ :return: debug, cases_list, first_run, criteria, response, retry
+ """
+ debug = debug if debug is not None else default_debug
+ cases_list = []
+ first_run = True
+ criteria = None
+ response = None
+ retry = 0
+ return debug, cases_list, first_run, criteria, response, retry