Skip to content

Commit

Permalink
AUTO docusaurus 20230814
Browse files Browse the repository at this point in the history
  • Loading branch information
GitHub CI committed Aug 14, 2023
1 parent 13a4600 commit 6d8a523
Show file tree
Hide file tree
Showing 10 changed files with 158 additions and 67 deletions.
22 changes: 13 additions & 9 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -152,42 +152,46 @@ else
set -o allexport; source tests_deployment/.env && OMP_NUM_THREADS=1 TF_CPP_MIN_LOG_LEVEL=2 poetry run pytest $(INTEGRATION_TEST_FOLDER) -n $(JOBS) -m $(INTEGRATION_TEST_PYTEST_MARKERS) --dist loadgroup && set +o allexport
endif

test-cli: PYTEST_MARKER=category_cli and (not flaky)
test-cli: PYTEST_MARKER=category_cli and (not flaky) and (not acceptance)
test-cli: DD_ARGS := $(or $(DD_ARGS),)
test-cli: test-marker

test-core-featurizers: PYTEST_MARKER=category_core_featurizers and (not flaky)
test-core-featurizers: PYTEST_MARKER=category_core_featurizers and (not flaky) and (not acceptance)
test-core-featurizers: DD_ARGS := $(or $(DD_ARGS),)
test-core-featurizers: test-marker

test-policies: PYTEST_MARKER=category_policies and (not flaky)
test-policies: PYTEST_MARKER=category_policies and (not flaky) and (not acceptance)
test-policies: DD_ARGS := $(or $(DD_ARGS),)
test-policies: test-marker

test-nlu-featurizers: PYTEST_MARKER=category_nlu_featurizers and (not flaky)
test-nlu-featurizers: PYTEST_MARKER=category_nlu_featurizers and (not flaky) and (not acceptance)
test-nlu-featurizers: DD_ARGS := $(or $(DD_ARGS),)
test-nlu-featurizers: prepare-spacy prepare-mitie prepare-transformers test-marker

test-nlu-predictors: PYTEST_MARKER=category_nlu_predictors and (not flaky)
test-nlu-predictors: PYTEST_MARKER=category_nlu_predictors and (not flaky) and (not acceptance)
test-nlu-predictors: DD_ARGS := $(or $(DD_ARGS),)
test-nlu-predictors: prepare-spacy prepare-mitie test-marker

test-full-model-training: PYTEST_MARKER=category_full_model_training and (not flaky)
test-full-model-training: PYTEST_MARKER=category_full_model_training and (not flaky) and (not acceptance)
test-full-model-training: DD_ARGS := $(or $(DD_ARGS),)
test-full-model-training: prepare-spacy prepare-mitie prepare-transformers test-marker

test-other-unit-tests: PYTEST_MARKER=category_other_unit_tests and (not flaky)
test-other-unit-tests: PYTEST_MARKER=category_other_unit_tests and (not flaky) and (not acceptance)
test-other-unit-tests: DD_ARGS := $(or $(DD_ARGS),)
test-other-unit-tests: prepare-spacy prepare-mitie test-marker

test-performance: PYTEST_MARKER=category_performance and (not flaky)
test-performance: PYTEST_MARKER=category_performance and (not flaky) and (not acceptance)
test-performance: DD_ARGS := $(or $(DD_ARGS),)
test-performance: test-marker

test-flaky: PYTEST_MARKER=flaky
test-flaky: PYTEST_MARKER=flaky and (not acceptance)
test-flaky: DD_ARGS := $(or $(DD_ARGS),)
test-flaky: prepare-spacy prepare-mitie test-marker

test-acceptance: PYTEST_MARKER=acceptance and (not flaky)
test-acceptance: DD_ARGS := $(or $(DD_ARGS),)
test-acceptance: prepare-spacy prepare-mitie test-marker

test-gh-actions:
OMP_NUM_THREADS=1 TF_CPP_MIN_LOG_LEVEL=2 poetry run pytest .github/tests --cov .github/scripts

Expand Down
2 changes: 1 addition & 1 deletion docs/docs/sources/rasa_interactive___help.txt
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ options:
--conversation-id CONVERSATION_ID
Specify the id of the conversation the messages are
in. Defaults to a UUID that will be randomly
generated. (default: a4c90ee2bc8c4230ae3be0ae1a178894)
generated. (default: 701c4627ac034cbfafdff81b424ab3ba)
--endpoints ENDPOINTS
Configuration file for the model server and the
connectors as a yml file. (default: endpoints.yml)
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/sources/rasa_shell___help.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ options:
-h, --help show this help message and exit
--conversation-id CONVERSATION_ID
Set the conversation ID. (default:
5a9d9a60d7564e518914e600c840526f)
3b4fd9167c0c49ca9d5f5c175d2662ee)
-m MODEL, --model MODEL
Path to a trained Rasa model. If a directory is
specified, it will use the latest model in this
Expand Down
6 changes: 4 additions & 2 deletions rasa/model_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

CODE_NEEDS_TO_BE_RETRAINED = 0b0001
CODE_FORCED_TRAINING = 0b1000
CODE_NO_NEED_TO_TRAIN = 0b0000


class TrainingResult(NamedTuple):
Expand Down Expand Up @@ -73,7 +74,9 @@ def _dry_run_result(
"No training of components required "
"(the responses might still need updating!)."
)
return TrainingResult(dry_run_results=fingerprint_results)
return TrainingResult(
code=CODE_NO_NEED_TO_TRAIN, dry_run_results=fingerprint_results
)


def get_unresolved_slots(domain: Domain, stories: StoryGraph) -> List[Text]:
Expand Down Expand Up @@ -260,7 +263,6 @@ def _train_graph(
rasa.engine.validation.validate(model_configuration)

tempdir_name = rasa.utils.common.get_temp_dir_name()

# Use `TempDirectoryPath` instead of `tempfile.TemporaryDirectory` as this
# leads to errors on Windows when the context manager tries to delete an
# already deleted temporary directory (e.g. https://bugs.python.org/issue29982)
Expand Down
18 changes: 18 additions & 0 deletions tests/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Tests
This directory contains all tests for the projects.
Tests are organized into several groups:
* unit tests and integration tests
* regression tests
* acceptance tests

### Unit tests and integration tests
These are executed by our CI for every Pull Request.
They are located in all directories except `tests/regression` and `tests/acceptance_tests`.

### Regression tests
These are executed by our CI before every release.
They are located in the `tests/regressions` directory.

### Acceptance tests
These are executed by our CI before every release.
They are located in the `tests/acceptance_tests` directory.
Empty file.
Empty file.
64 changes: 64 additions & 0 deletions tests/acceptance_tests/test_training.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from pathlib import Path
import secrets

from typing import Text

import rasa
from rasa.shared.core.domain import Domain
from rasa.shared.utils.io import write_yaml


def _new_model_path_in_same_dir(old_model_path: Text) -> Text:
return str(Path(old_model_path).parent / (secrets.token_hex(8) + ".tar.gz"))


def test_models_not_retrained_if_no_new_data(
trained_e2e_model: Text,
moodbot_domain_path: Path,
e2e_bot_config_file: Path,
e2e_stories_path: Text,
nlu_data_path: Text,
trained_e2e_model_cache: Path,
):
result = rasa.train(
str(moodbot_domain_path),
str(e2e_bot_config_file),
[e2e_stories_path, nlu_data_path],
output=_new_model_path_in_same_dir(trained_e2e_model),
dry_run=True,
)

assert result.code == 0


def test_dry_run_model_will_not_be_retrained_if_only_new_responses(
trained_e2e_model: Text,
moodbot_domain_path: Path,
e2e_bot_config_file: Path,
e2e_stories_path: Text,
nlu_data_path: Text,
trained_e2e_model_cache: Path,
tmp_path: Path,
):
domain = Domain.load(moodbot_domain_path)
domain_with_extra_response = """
version: '3.1'
responses:
utter_greet:
- text: "Hi from Rasa"
"""
domain_with_extra_response = Domain.from_yaml(domain_with_extra_response)

new_domain = domain.merge(domain_with_extra_response)
new_domain_path = tmp_path / "domain.yml"
write_yaml(new_domain.as_dict(), new_domain_path)

result = rasa.train(
str(new_domain_path),
str(e2e_bot_config_file),
[e2e_stories_path, nlu_data_path],
output=str(tmp_path),
dry_run=True,
)

assert result.code == 0
1 change: 1 addition & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@

# Defines how tests are parallelized in the CI
PATH_PYTEST_MARKER_MAPPINGS = {
"acceptance": [Path("tests", "acceptance_tests").absolute()],
"category_cli": [Path("tests", "cli").absolute()],
"category_core_featurizers": [Path("tests", "core", "featurizers").absolute()],
"category_policies": [
Expand Down
110 changes: 56 additions & 54 deletions tests/test_model_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import os
import textwrap
from pathlib import Path
from typing import Text
from typing import Text, Dict, Union, Any
from unittest.mock import Mock

import pytest
Expand All @@ -27,10 +27,18 @@
from rasa.engine.storage.local_model_storage import LocalModelStorage
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.graph import GraphModelConfiguration
from rasa.engine.training.components import FingerprintStatus
from rasa.engine.training.graph_trainer import GraphTrainer
from rasa.shared.data import TrainingType
from rasa.model_training import (
CODE_FORCED_TRAINING,
CODE_NEEDS_TO_BE_RETRAINED,
CODE_NO_NEED_TO_TRAIN,
_dry_run_result,
)
from rasa.shared.core.events import ActionExecuted, SlotSet
from rasa.shared.core.training_data.structures import RuleStep, StoryGraph, StoryStep
from rasa.shared.data import TrainingType

from rasa.nlu.classifiers.diet_classifier import DIETClassifier
from rasa.shared.constants import LATEST_TRAINING_DATA_FORMAT_VERSION
import rasa.shared.utils.io
Expand Down Expand Up @@ -284,25 +292,6 @@ def test_e2e_gives_experimental_warning(
]
)

def test_models_not_retrained_if_no_new_data(
self,
trained_e2e_model: Text,
moodbot_domain_path: Path,
e2e_bot_config_file: Path,
e2e_stories_path: Text,
nlu_data_path: Text,
trained_e2e_model_cache: Path,
):
result = rasa.train(
str(moodbot_domain_path),
str(e2e_bot_config_file),
[e2e_stories_path, nlu_data_path],
output=new_model_path_in_same_dir(trained_e2e_model),
dry_run=True,
)

assert result.code == 0

def test_retrains_nlu_and_core_if_new_e2e_example(
self,
trained_e2e_model: Text,
Expand Down Expand Up @@ -894,39 +883,6 @@ def test_model_finetuning_with_invalid_model_nlu(
assert "No model for finetuning found" in capsys.readouterr().out


def test_models_not_retrained_if_only_new_responses(
trained_e2e_model: Text,
moodbot_domain_path: Path,
e2e_bot_config_file: Path,
e2e_stories_path: Text,
nlu_data_path: Text,
trained_e2e_model_cache: Path,
tmp_path: Path,
):
domain = Domain.load(moodbot_domain_path)
domain_with_extra_response = """
version: '2.0'
responses:
utter_greet:
- text: "Hi from Rasa"
"""
domain_with_extra_response = Domain.from_yaml(domain_with_extra_response)

new_domain = domain.merge(domain_with_extra_response)
new_domain_path = tmp_path / "domain.yml"
rasa.shared.utils.io.write_yaml(new_domain.as_dict(), new_domain_path)

result = rasa.train(
str(new_domain_path),
str(e2e_bot_config_file),
[e2e_stories_path, nlu_data_path],
output=str(tmp_path),
dry_run=True,
)

assert result.code == 0


def test_models_not_retrained_if_only_new_action(
trained_e2e_model: Text,
moodbot_domain_path: Path,
Expand Down Expand Up @@ -1088,3 +1044,49 @@ def test_check_unresolved_slots(capsys: CaptureFixture):
]
)
assert rasa.model_training._check_unresolved_slots(domain, stories) is None


@pytest.mark.parametrize(
"fingerprint_results, expected_code",
[
(
{
"key 1": FingerprintStatus(
is_hit=True, output_fingerprint="fingerprint 1"
),
"key 2": FingerprintStatus(
is_hit=True, output_fingerprint="fingerprint 2"
),
"key 3": FingerprintStatus(
is_hit=True, output_fingerprint="fingerprint 3"
),
},
CODE_NO_NEED_TO_TRAIN,
),
(
{
"key 1": FingerprintStatus(
is_hit=False, output_fingerprint="fingerprint 1"
),
"key 2": FingerprintStatus(
is_hit=True, output_fingerprint="fingerprint 2"
),
"key 3": FingerprintStatus(
is_hit=True, output_fingerprint="fingerprint 3"
),
},
CODE_NEEDS_TO_BE_RETRAINED,
),
],
)
def test_dry_run_result_no_force_retraining(
fingerprint_results: Dict[Text, Union[FingerprintStatus, Any]],
expected_code: int,
):
result = _dry_run_result(fingerprint_results, force_full_training=False)
assert result.code == expected_code


def test_dry_run_result_force_retraining():
result = _dry_run_result({}, force_full_training=True)
assert result.code == CODE_FORCED_TRAINING

0 comments on commit 6d8a523

Please sign in to comment.