Skip to content

Commit

Permalink
Added test_engines_plotly_reporter_draw_automation_state_report.py
Browse files Browse the repository at this point in the history
  • Loading branch information
wwakabobik committed Aug 30, 2024
1 parent 86becd9 commit f5e64ae
Show file tree
Hide file tree
Showing 9 changed files with 292 additions and 21 deletions.
1 change: 1 addition & 0 deletions .github/workflows/master_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ jobs:
pip install pytest-xdist
pip install pytest-cov
pip install faker
pip install pillow
- name: Add 'testrail_api_reporter' to PYTHONPATH
run: echo "PYTHONPATH=$PYTHONPATH:$(pwd)/testrail_api_reporter:." >> $GITHUB_ENV
- name: Execute tests
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ jobs:
pip install pytest-xdist
pip install pytest-cov
pip install faker
pip install pillow
- name: Add 'testrail_api_reporter' to PYTHONPATH
run: echo "PYTHONPATH=$PYTHONPATH:$(pwd)/testrail_api_reporter:." >> $GITHUB_ENV
- name: Execute tests
Expand Down
2 changes: 1 addition & 1 deletion testrail_api_reporter/utils/reporter_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import requests


def format_error(error: list | str) -> str:
def format_error(error: list | str | Exception) -> str:
"""
Service function for parse errors to human-readable format
Expand Down
Binary file added tests/assets/expected_automation_state.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/expected_automation_state_empty.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
79 changes: 76 additions & 3 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,18 @@

import pytest
from faker import Faker
from PIL import Image, ImageChops

from testrail_api_reporter.engines.plotly_reporter import ( # pylint: disable=import-error,no-name-in-module
PlotlyReporter,
)
from testrail_api_reporter.utils.case_stat import CaseStat # pylint: disable=import-error,no-name-in-module



fake = Faker()


@pytest.fixture
def create_test_file() -> str:
"""
Expand All @@ -18,7 +26,7 @@ def create_test_file() -> str:
:return: filename
:rtype: str
"""
test_file = f"not_existing_{Faker().file_name()}"
test_file = f"not_existing_{fake.file_name()}"
with open(test_file, "w", encoding="utf-8") as file:
file.write("Test")
assert path.exists(test_file) is True
Expand Down Expand Up @@ -53,7 +61,7 @@ def case_stat() -> CaseStat:
:return: CaseStat
:rtype: CaseStat
"""
return CaseStat(Faker().word())
return CaseStat(fake.word())


@pytest.fixture
Expand All @@ -78,8 +86,9 @@ def csv_file() -> str:
Fixture to create random test file
:return: filename
:rtype: str
"""
test_file = f"not_existing_{Faker().file_name(extension='csv')}"
test_file = f"not_existing_{fake.file_name(extension='csv')}"
with open(test_file, "w", encoding="utf-8") as file:
file.write("")
assert path.exists(test_file) is True
Expand All @@ -89,3 +98,67 @@ def csv_file() -> str:
remove(test_file)
except FileNotFoundError:
pass


@pytest.fixture
def compare_image():
"""
Fixture to compare images using pixel threshold
:return: comparison function
:rtype: function
"""

def compare(actual: str, expected: str, threshold: int = 10) -> bool:
"""
Function to compare images using pixel threshold
:param actual: filename with path to actual image
:type actual: str
:param expected: filename with path to expected image
:type actual: str
:param threshold: pixel difference tolerance between images - lesser is better
:type actual: int
:return: comparison result. True if images match.
:rtype: bool
"""
# Ensure that images exists
assert path.exists(actual)
assert path.exists(expected)

# Load the generated image and the reference image
generated_image = Image.open(actual)
reference_image = Image.open(expected)

# Compare the two images
diff = ImageChops.difference(generated_image, reference_image)

# Count the number of pixels that are different
diff_pixels = sum(abs(r - g) + abs(g - b) + abs(b - a) + abs(a - r) > 20 for r, g, b, a in diff.getdata())

# Check that the number of different pixels is below the threshold
return diff_pixels < threshold

return compare


@pytest.fixture
def random_type_platforms() -> list[dict]:
"""
Returns random list with type platforms dict
:return: list with type platforms dict
:rtype: list[dict]
"""
return [{"name": fake.word(), "sections": [randint(1, 10000)]} for _ in range(randint(1, 5))]


@pytest.fixture
def random_plotly_reporter(random_type_platforms) -> PlotlyReporter:
"""
Returns PlotlyReporter object with random type platforms
:return: PlotlyReporter object with random type platforms
:rtype: PlotlyReporter
"""
return PlotlyReporter(type_platforms=random_type_platforms)
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
"""Tests for plotly_reporter module, the PlotlyReporter clas, draw_automation_state_report method"""

from os import path, remove, getcwd
from random import choice

import pytest
from faker import Faker

from testrail_api_reporter.engines.plotly_reporter import ( # pylint: disable=import-error,no-name-in-module
PlotlyReporter,
)


fake = Faker()


@pytest.fixture
def random_expected_image(case_stat):
"""
Fixture that chooses random expected image for draw automation state
:param case_stat: fixture returns empty CaseStat object
"""
if choice((False, True)):
case_stat.set_name("Automation State")
case_stat.total = 5905
case_stat.automated = 19100
case_stat.not_automated = 27205
case_stat.not_applicable = 10092
return {"filename": f"{getcwd()}/tests/assets/expected_automation_state.png", "data": [case_stat]}
else:
case_stat.set_name("Automation State")
return {"filename": f"{getcwd()}/tests/assets/expected_automation_state_empty.png", "data": [case_stat]}


def test_draw_automation_state_report_no_reports(caplog, random_plotly_reporter):
"""
Init PlotlyReporter and call draw_automation_state_report without reports should raise ValueError
:param caplog: caplog fixture
:param random_plotly_reporter: fixture returns PlotlyReporter
"""
with pytest.raises(ValueError, match="No TestRail reports are provided, report aborted!"):
random_plotly_reporter.draw_automation_state_report(
filename=fake.file_name(extension=choice(("png", "jpg", "jpeg", "webp")))
)


def test_draw_automation_state_report_no_filename(caplog, random_plotly_reporter):
"""
Init PlotlyReporter and call draw_automation_state_report without filename should raise ValueError
:param caplog: caplog fixture
:param random_plotly_reporter: fixture returns PlotlyReporter
"""
with pytest.raises(ValueError, match="No output filename is provided, report aborted!"):
random_plotly_reporter.draw_automation_state_report(reports=[fake.pydict()])


def test_draw_automation_state_report_creates_file(caplog, case_stat, case_stat_random, random_plotly_reporter):
"""
Init PlotlyReporter and call draw_automation_state_report with valid parameters should create file
:param caplog: caplog fixture
:param case_stat: fixture returns empty CaseStat object
:param case_stat_random: fixture returns filled with random data CaseStat object
:param random_plotly_reporter: fixture returns PlotlyReporter
"""
filename = fake.file_name(extension=choice(("png", "jpg", "jpeg", "webp")))
try:
reports = [case_stat, case_stat_random]
random_plotly_reporter.draw_automation_state_report(filename=filename, reports=reports)

assert path.exists(filename)
finally:
if path.exists(filename):
remove(filename)


def test_draw_automation_state_report_creates_correct_image(caplog, random_expected_image, compare_image):
"""
Init PlotlyReporter and call draw_automation_state_report with valid parameters should create correct image
:param caplog: caplog fixture
:param random_expected_image: fixture, returns any of possible expected cases
:param compare_image: fixture, returns function to compare images
"""
type_platforms = [{"name": "Automation State", "sections": [42, 1024, 0]}]
filename = "actual_automation_state.png"
try:
plotly_reporter = PlotlyReporter(type_platforms=type_platforms)
plotly_reporter.draw_automation_state_report(filename=filename, reports=random_expected_image["data"])
assert compare_image(actual=filename, expected=random_expected_image["filename"])
finally:
if path.exists(filename):
remove(filename)
88 changes: 88 additions & 0 deletions tests/engines/test_engines_plotly_reporter_init.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
# -*- coding: utf-8 -*-
"""Tests for plotly_reporter module, the PlotlyReporter class, init method"""

from logging import getLogger, INFO, WARNING, ERROR, FATAL
from os import path, remove
from random import randint, choice

import pytest
from faker import Faker

from testrail_api_reporter.engines.plotly_reporter import ( # pylint: disable=import-error,no-name-in-module
PlotlyReporter,
)
from testrail_api_reporter.utils.logger_config import ( # pylint: disable=import-error,no-name-in-module
setup_logger,
DEFAULT_LOGGING_LEVEL,
)

fake = Faker()


def test_plotly_reporter_init_default_params(caplog):
"""Init PlotlyReporter with default parameters"""
type_platforms = [{"name": fake.word(), "sections": [randint(1, 10000)]} for _ in range(randint(1, 5))]

plotly_reporter = PlotlyReporter(type_platforms=type_platforms)

logger = getLogger("PlotlyReporter")
assert logger.level == DEFAULT_LOGGING_LEVEL
assert path.exists("PlotlyReporter.log")

attributes = vars(plotly_reporter)
assert attributes['_PlotlyReporter__pr_labels'] == ["Low", "Medium", "High", "Critical"]
assert attributes['_PlotlyReporter__pr_colors'] == ["rgb(173,216,230)", "rgb(34,139,34)", "rgb(255,255,51)", "rgb(255, 153, 153)"]
assert attributes['_PlotlyReporter__ar_colors'] == [
"rgb(255, 153, 153)",
"rgb(255,255,51)",
"rgb(34,139,34)",
"rgb(173,216,230)",
"rgb(65,105,225)",
"rgb(192, 192, 192)",
]
assert attributes['_PlotlyReporter__lines'] == {"color": "rgb(0,0,51)", "width": 1.5}
assert attributes['_PlotlyReporter__type_platforms'] == type_platforms


def test_plotly_reporter_init_custom_params(caplog):
"""Init PlotlyReporter with custom parameters"""
logger_file = fake.file_name(extension="log")
logger_name = fake.name()
logger_level = choice((INFO, WARNING, ERROR, FATAL))
try:
logger = setup_logger(logger_name, logger_file, level=logger_level)
type_platforms = [{"name": fake.word(), "sections": [randint(1, 10000)]} for _ in range(randint(1, 5))]
pr_labels = [fake.word() for _ in range(4)]
pr_colors = [f"rgb({randint(0, 255)},{randint(0, 255)},{randint(0, 255)})" for _ in range(4)]
ar_colors = [f"rgb({randint(0, 255)},{randint(0, 255)},{randint(0, 255)})" for _ in range(6)]
lines = {"color": f"rgb({randint(0, 255)},{randint(0, 255)},{randint(0, 255)})", "width": randint(1, 3)}

plotly_reporter = PlotlyReporter(
pr_colors=pr_colors,
pr_labels=pr_labels,
ar_colors=ar_colors,
lines=lines,
type_platforms=type_platforms,
logger=logger,
log_level=INFO,
)

logger = getLogger(logger_name)
assert logger.level == logger_level
assert path.exists(logger_file)

attributes = vars(plotly_reporter)
assert attributes['_PlotlyReporter__pr_labels'] == pr_labels
assert attributes['_PlotlyReporter__pr_colors'] == pr_colors
assert attributes['_PlotlyReporter__ar_colors'] == ar_colors
assert attributes['_PlotlyReporter__lines'] == lines
assert attributes['_PlotlyReporter__type_platforms'] == type_platforms
finally:
if path.exists(logger_file):
remove(logger_file)


def test_plotly_reporter_init_no_type_platforms(caplog):
"""Init PlotlyReporter without type_platforms should raise ValueError"""
with pytest.raises(ValueError, match="Platform types is not provided, Plotly Reporter cannot be initialized!"):
PlotlyReporter()
45 changes: 28 additions & 17 deletions tests/utils/test_reporter_utils_logger_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,39 +2,50 @@
"""Tests for the logger_config module"""

from logging import DEBUG, INFO, WARNING, ERROR, FATAL, FileHandler, StreamHandler
from os import path, remove
from random import choice, randint

from faker import Faker

from testrail_api_reporter.utils.logger_config import setup_logger, DEFAULT_LOGGING_LEVEL

from testrail_api_reporter.utils.logger_config import ( # pylint: disable=import-error,no-name-in-module
setup_logger,
DEFAULT_LOGGING_LEVEL,
)

fake = Faker()


def test_setup_logger_default_level(caplog):
"""Init logger with default level"""
log_file = fake.file_name(extension="log")
logger = setup_logger(fake.name(), str(log_file))
try:
logger = setup_logger(fake.name(), str(log_file))

assert logger.level == DEFAULT_LOGGING_LEVEL
assert logger.level == DEBUG
assert logger.level == DEFAULT_LOGGING_LEVEL
assert logger.level == DEBUG

assert len(logger.handlers) == 2
assert isinstance(logger.handlers[0], FileHandler)
assert isinstance(logger.handlers[1], StreamHandler)
assert len(logger.handlers) == 2
assert isinstance(logger.handlers[0], FileHandler)
assert isinstance(logger.handlers[1], StreamHandler)

message = str(fake.random_letters(randint(1, 10))) * randint(1, 10)
logger.debug(message)
with open(log_file, "r") as f:
assert message in f.read()
assert message in caplog.text
message = str(fake.random_letters(randint(1, 10))) * randint(1, 10)
logger.debug(message)
with open(log_file, "r") as f:
assert message in f.read()
assert message in caplog.text
finally:
if path.exists(log_file):
remove(log_file)


def test_setup_logger_custom_level(tmp_path):
"""Init logger with any other level"""
log_file = fake.file_name(extension="log")
log_level = choice((INFO, WARNING, ERROR, FATAL))
logger = setup_logger(fake.name(), str(log_file), level=log_level)

assert logger.level == log_level
try:
log_level = choice((INFO, WARNING, ERROR, FATAL))
logger = setup_logger(fake.name(), str(log_file), level=log_level)

assert logger.level == log_level
finally:
if path.exists(log_file):
remove(log_file)

0 comments on commit f5e64ae

Please sign in to comment.