From 053b068fb02f6fffdf5174191ee4fba21f13477e Mon Sep 17 00:00:00 2001 From: anish-mudaraddi Date: Fri, 3 Jan 2025 12:13:37 +0000 Subject: [PATCH] WIP: add rabbit-consumer source code move rabbit-consumer source code out of SCD-Openstack-Utils --- .github/workflows/build_images.yaml | 14 +- .github/workflows/{codeql.yml => codeql.yaml} | 0 .github/workflows/rabbit_consumer.yaml | 125 ++++ .../hartree-notebook}/Dockerfile | 0 .../hartree-notebook}/requirements.txt | 0 .../jupyter-cil-notebook}/Dockerfile | 0 .../jupyter-opengl}/Dockerfile | 0 .../Dockerfile | 0 .../jupyter-openstack-notebook}/Dockerfile | 0 .../jupyter-pytorch-notebook}/Dockerfile | 0 .../jupyter-tensorflow-notebook}/Dockerfile | 0 openstack-rabbit-consumer/.pylintrc | 616 ++++++++++++++++++ openstack-rabbit-consumer/Dockerfile | 33 + openstack-rabbit-consumer/README.md | 46 ++ openstack-rabbit-consumer/entrypoint.py | 27 + .../rabbit_consumer/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 214 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 212 bytes .../__pycache__/aq_metadata.cpython-310.pyc | Bin 0 -> 1758 bytes .../__pycache__/aq_metadata.cpython-38.pyc | Bin 0 -> 1750 bytes .../rabbit_consumer/aq_api.py | 301 +++++++++ .../rabbit_consumer/aq_metadata.py | 55 ++ .../rabbit_consumer/consumer_config.py | 66 ++ .../rabbit_consumer/message_consumer.py | 292 +++++++++ .../rabbit_consumer/openstack_address.py | 70 ++ .../rabbit_consumer/openstack_api.py | 112 ++++ .../rabbit_consumer/rabbit_message.py | 60 ++ .../rabbit_consumer/vm_data.py | 28 + .../requirements-test.txt | 3 + openstack-rabbit-consumer/requirements.txt | 8 + openstack-rabbit-consumer/tests/__init__.py | 0 .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 204 bytes .../tests/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 202 bytes .../conftest.cpython-310-pytest-7.4.0.pyc | Bin 0 -> 2579 bytes .../conftest.cpython-38-pytest-7.4.0.pyc | Bin 0 -> 2583 bytes openstack-rabbit-consumer/tests/conftest.py | 87 +++ .../tests/test_aq_api.py | 454 +++++++++++++ .../tests/test_aq_metadata.py | 82 +++ .../tests/test_consumer_config.py | 40 ++ .../tests/test_message_consumer.py | 445 +++++++++++++ .../tests/test_openstack_address.py | 161 +++++ .../tests/test_openstack_api.py | 161 +++++ .../tests/test_rabbit_message.py | 76 +++ openstack-rabbit-consumer/version.txt | 1 + 44 files changed, 3357 insertions(+), 6 deletions(-) rename .github/workflows/{codeql.yml => codeql.yaml} (100%) create mode 100644 .github/workflows/rabbit_consumer.yaml rename {hartree-notebook => notebooks/hartree-notebook}/Dockerfile (100%) rename {hartree-notebook => notebooks/hartree-notebook}/requirements.txt (100%) rename {jupyter-cil-notebook => notebooks/jupyter-cil-notebook}/Dockerfile (100%) rename {jupyter-opengl => notebooks/jupyter-opengl}/Dockerfile (100%) rename {jupyter-openstack-notebook-centos => notebooks/jupyter-openstack-notebook-centos}/Dockerfile (100%) rename {jupyter-openstack-notebook => notebooks/jupyter-openstack-notebook}/Dockerfile (100%) rename {jupyter-pytorch-notebook => notebooks/jupyter-pytorch-notebook}/Dockerfile (100%) rename {jupyter-tensorflow-notebook => notebooks/jupyter-tensorflow-notebook}/Dockerfile (100%) create mode 100644 openstack-rabbit-consumer/.pylintrc create mode 100644 openstack-rabbit-consumer/Dockerfile create mode 100644 openstack-rabbit-consumer/README.md create mode 100644 openstack-rabbit-consumer/entrypoint.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/__init__.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/__pycache__/__init__.cpython-310.pyc create mode 100644 openstack-rabbit-consumer/rabbit_consumer/__pycache__/__init__.cpython-38.pyc create mode 100644 openstack-rabbit-consumer/rabbit_consumer/__pycache__/aq_metadata.cpython-310.pyc create mode 100644 openstack-rabbit-consumer/rabbit_consumer/__pycache__/aq_metadata.cpython-38.pyc create mode 100644 openstack-rabbit-consumer/rabbit_consumer/aq_api.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/aq_metadata.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/consumer_config.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/message_consumer.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/openstack_address.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/openstack_api.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/rabbit_message.py create mode 100644 openstack-rabbit-consumer/rabbit_consumer/vm_data.py create mode 100644 openstack-rabbit-consumer/requirements-test.txt create mode 100644 openstack-rabbit-consumer/requirements.txt create mode 100644 openstack-rabbit-consumer/tests/__init__.py create mode 100644 openstack-rabbit-consumer/tests/__pycache__/__init__.cpython-310.pyc create mode 100644 openstack-rabbit-consumer/tests/__pycache__/__init__.cpython-38.pyc create mode 100644 openstack-rabbit-consumer/tests/__pycache__/conftest.cpython-310-pytest-7.4.0.pyc create mode 100644 openstack-rabbit-consumer/tests/__pycache__/conftest.cpython-38-pytest-7.4.0.pyc create mode 100644 openstack-rabbit-consumer/tests/conftest.py create mode 100644 openstack-rabbit-consumer/tests/test_aq_api.py create mode 100644 openstack-rabbit-consumer/tests/test_aq_metadata.py create mode 100644 openstack-rabbit-consumer/tests/test_consumer_config.py create mode 100644 openstack-rabbit-consumer/tests/test_message_consumer.py create mode 100644 openstack-rabbit-consumer/tests/test_openstack_address.py create mode 100644 openstack-rabbit-consumer/tests/test_openstack_api.py create mode 100644 openstack-rabbit-consumer/tests/test_rabbit_message.py create mode 100644 openstack-rabbit-consumer/version.txt diff --git a/.github/workflows/build_images.yaml b/.github/workflows/build_images.yaml index f72e649..b4aeb8a 100644 --- a/.github/workflows/build_images.yaml +++ b/.github/workflows/build_images.yaml @@ -1,13 +1,15 @@ name: docker_images on: push: - paths-ignore: - - 'cloud-chatops/**' - - '.github/workflows/cloud_chatops.yaml' + paths: + - 'notebooks/**' + - '.github/workflows/codeql.yaml' + - '.github/workflows/build_images.yaml' pull_request: - paths-ignore: - - 'cloud-chatops/**' - - '.github/workflows/cloud_chatops.yaml' + paths: + - 'notebooks/**' + - '.github/workflows/codeql.yaml' + - '.github/workflows/build_images.yaml' schedule: [{cron: "14 14 * * TUE"}] # Every Tuesday at 14:14 jobs: setup: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yaml similarity index 100% rename from .github/workflows/codeql.yml rename to .github/workflows/codeql.yaml diff --git a/.github/workflows/rabbit_consumer.yaml b/.github/workflows/rabbit_consumer.yaml new file mode 100644 index 0000000..9ec8680 --- /dev/null +++ b/.github/workflows/rabbit_consumer.yaml @@ -0,0 +1,125 @@ +name: Rabbit Consumer + +on: + push: + branches: + - master + pull_request: + paths: + - ".github/workflows/rabbit_consumer.yaml" + - "openstack-rabbit-consumer/**" + +jobs: + test_and_lint: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.10"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + cache: "pip" + - name: Install dependencies + run: | + python -m pip install --upgrade pip + # Required for requests-kerberos + sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install libkrb5-dev + pip install -r openstack-rabbit-consumer/requirements.txt + pip install -r openstack-rabbit-consumer/requirements-test.txt + + - name: Run tests + # Using Python3 to launch the module sets up the Python path for us + run: cd openStack-rabbit-consumer && python3 -m coverage run -m pytest . + + - name: Analyse with pylint + run: | + cd openstack-rabbit-consumer && pylint $(git ls-files '*.py') + + - name: Prepare coverage + run: | + cd openstack-rabbit-consumer && python -m coverage xml + + - name: Upload coverage to codecov + uses: codecov/codecov-action@v3 + with: + files: openStack-rabbit-consumer/coverage.xml + fail_ci_if_error: true + flags: rabbit_consumer + + push_dev_image_harbor: + runs-on: ubuntu-latest + needs: test_and_lint + steps: + - uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to Harbor + uses: docker/login-action@v2 + with: + registry: harbor.stfc.ac.uk + username: ${{ secrets.HARBOR_USERNAME }} + password: ${{ secrets.HARBOR_TOKEN }} + + - name: Set commit SHA for later + id: commit_sha + run: echo "sha_short=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT + + - name: Build and push to staging project + uses: docker/build-push-action@v3 + with: + cache-from: type=gha + cache-to: type=gha,mode=max + push: true + context: "{{defaultContext}}:openstack-rabbit-consumer" + tags: "harbor.stfc.ac.uk/stfc-cloud-staging/openstack-rabbit-consumer:${{ steps.commit_sha.outputs.sha_short }}" + + - name: Inform of tagged name + run: echo "Image published to harbor.stfc.ac.uk/stfc-cloud-staging/openstack-rabbit-consumer:${{ steps.commit_sha.outputs.sha_short }}" + + push_release_image_harbor: + runs-on: ubuntu-latest + needs: test_and_lint + if: github.ref == 'refs/heads/master' + steps: + - uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to Harbor + uses: docker/login-action@v2 + with: + registry: harbor.stfc.ac.uk + username: ${{ secrets.HARBOR_USERNAME }} + password: ${{ secrets.HARBOR_TOKEN }} + + - name: Get release tag for later + id: release_tag + run: echo "version=$(cat openstack-rabbit-consumer/version.txt)" >> $GITHUB_OUTPUT + + - name: Check if release file has updated + uses: dorny/paths-filter@v2 + id: release_updated + with: + filters: | + version: + - 'openstack-rabbit-consumer/version.txt' + + - name: Build and push on version change + uses: docker/build-push-action@v3 + if: steps.release_updated.outputs.version == 'true' + with: + cache-from: type=gha + cache-to: type=gha,mode=max + push: true + context: "{{defaultContext}}:openstack-rabbit-consumer" + tags: "harbor.stfc.ac.uk/stfc-cloud/openstack-rabbit-consumer:v${{ steps.release_tag.outputs.version }}" + + - name: Inform of tagged name + if: steps.release_updated.outputs.version == 'true' + run: echo "Image published to harbor.stfc.ac.uk/stfc-cloud/openstack-rabbit-consumer:v${{ steps.release_tag.outputs.version }}" diff --git a/hartree-notebook/Dockerfile b/notebooks/hartree-notebook/Dockerfile similarity index 100% rename from hartree-notebook/Dockerfile rename to notebooks/hartree-notebook/Dockerfile diff --git a/hartree-notebook/requirements.txt b/notebooks/hartree-notebook/requirements.txt similarity index 100% rename from hartree-notebook/requirements.txt rename to notebooks/hartree-notebook/requirements.txt diff --git a/jupyter-cil-notebook/Dockerfile b/notebooks/jupyter-cil-notebook/Dockerfile similarity index 100% rename from jupyter-cil-notebook/Dockerfile rename to notebooks/jupyter-cil-notebook/Dockerfile diff --git a/jupyter-opengl/Dockerfile b/notebooks/jupyter-opengl/Dockerfile similarity index 100% rename from jupyter-opengl/Dockerfile rename to notebooks/jupyter-opengl/Dockerfile diff --git a/jupyter-openstack-notebook-centos/Dockerfile b/notebooks/jupyter-openstack-notebook-centos/Dockerfile similarity index 100% rename from jupyter-openstack-notebook-centos/Dockerfile rename to notebooks/jupyter-openstack-notebook-centos/Dockerfile diff --git a/jupyter-openstack-notebook/Dockerfile b/notebooks/jupyter-openstack-notebook/Dockerfile similarity index 100% rename from jupyter-openstack-notebook/Dockerfile rename to notebooks/jupyter-openstack-notebook/Dockerfile diff --git a/jupyter-pytorch-notebook/Dockerfile b/notebooks/jupyter-pytorch-notebook/Dockerfile similarity index 100% rename from jupyter-pytorch-notebook/Dockerfile rename to notebooks/jupyter-pytorch-notebook/Dockerfile diff --git a/jupyter-tensorflow-notebook/Dockerfile b/notebooks/jupyter-tensorflow-notebook/Dockerfile similarity index 100% rename from jupyter-tensorflow-notebook/Dockerfile rename to notebooks/jupyter-tensorflow-notebook/Dockerfile diff --git a/openstack-rabbit-consumer/.pylintrc b/openstack-rabbit-consumer/.pylintrc new file mode 100644 index 0000000..d4cfd47 --- /dev/null +++ b/openstack-rabbit-consumer/.pylintrc @@ -0,0 +1,616 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore= + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\' represents the directory delimiter on Windows systems, it +# can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=0 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=e, + i, + j, + k, + ex, + Run, + os, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-metaclass-classmethod-first-arg=mcs + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + line-too-long + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work.. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/openstack-rabbit-consumer/Dockerfile b/openstack-rabbit-consumer/Dockerfile new file mode 100644 index 0000000..37986f5 --- /dev/null +++ b/openstack-rabbit-consumer/Dockerfile @@ -0,0 +1,33 @@ +FROM python:3.10 + +WORKDIR /usr/src/app + +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive \ + apt-get install -y --no-install-recommends \ + krb5-user \ + && rm -rf /var/lib/apt/lists/* + +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +ENV AQ_PREFIX=NOT_SET \ + AQ_URL=NOT_SET\ + # + KRB5CCNAME=NOT_SET \ + # + RABBIT_HOST=NOT_SET \ + RABBIT_PORT=NOT_SET \ + RABBIT_USERNAME=NOT_SET \ + RABBIT_PASSWORD=NOT_SET\ + # + OPENSTACK_AUTH_URL=NOT_SET \ + OPENSTACK_COMPUTE_URL=NOT_SET \ + OPENSTACK_USERNAME=NOT_SET \ + OPENSTACK_PASSWORD=NOT_SET + +ENV LOG_LEVEL=INFO + +CMD [ "python", "./entrypoint.py"] diff --git a/openstack-rabbit-consumer/README.md b/openstack-rabbit-consumer/README.md new file mode 100644 index 0000000..4e7a28b --- /dev/null +++ b/openstack-rabbit-consumer/README.md @@ -0,0 +1,46 @@ +Openstack Rabbit Consumers +--------------------------- + +The script will monitor the rabbit consumers, and automatically register machines +with the configuration management tool. + +This container assumes that a sidecar container is running to handle krb5 machine authentication. + +Release +------- + +Pull requests will push a tagged image (with the commit sha) to +harbor.stfc.ac.uk/stfc-cloud-staging/openstack-rabbit-consumer:sha + +(Where the SHA can be found in the GH actions build logs) + +To release a new version, update version.txt with the updated version. +When the PR is merged, a new image will be pushed to harbor.stfc.ac.uk/stfc-cloud-staging/openstack-rabbit-consumer + +You may need to update the version in the helm chart to match the new version. + +Testing Locally +=============== + +Initial setup +------------- + +- Spin up minikube locally +- Install the secrets, as per the instructions in the chart +- Make docker use the minikube docker daemon in your current shell: + +Testing +------- + +- Build the docker image locally: +`eval $(minikube docker-env)` +`docker build -t rabbit-consumer:1 .` +- cd to the chart directory: +`cd ../charts/rabbit-consumer` +- Install/Upgrade the chart with your changes: +`helm install rabbit-consumers . -f values.yaml -f dev-values.yaml -n rabbit-consumers` +- To deploy a new image, rebuild and delete the existing pod: +`docker build . -t rabbit-consumer:n . && helm upgrade rabbit-consumers . -f values.yaml -f prod-values.yaml -n rabbit-consumers` +- Logs can be found with: +`kubectl logs deploy/rabbit-consumers -n rabbit-consumers` + diff --git a/openstack-rabbit-consumer/entrypoint.py b/openstack-rabbit-consumer/entrypoint.py new file mode 100644 index 0000000..e15fc2d --- /dev/null +++ b/openstack-rabbit-consumer/entrypoint.py @@ -0,0 +1,27 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +Prepares the logging and initiates the consumers. +""" +import logging +import logging.handlers +import os +import sys + + +def _prep_logging(): + logger = logging.getLogger("rabbit_consumer") + logger.setLevel(os.getenv("LOG_LEVEL", "INFO").upper()) + logger.addHandler(logging.StreamHandler(sys.stdout)) + + logging.getLogger("requests").setLevel(logging.WARNING) + logging.getLogger("urllib3").setLevel(logging.WARNING) + + +if __name__ == "__main__": + _prep_logging() + + from rabbit_consumer.message_consumer import initiate_consumer + + initiate_consumer() diff --git a/openstack-rabbit-consumer/rabbit_consumer/__init__.py b/openstack-rabbit-consumer/rabbit_consumer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openstack-rabbit-consumer/rabbit_consumer/__pycache__/__init__.cpython-310.pyc b/openstack-rabbit-consumer/rabbit_consumer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb9f83a2e948d9db7d038d5287765d6114872b17 GIT binary patch literal 214 zcmd1j<>g`kf{4@Q=^*+sh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o5jrKRGAAGzCKHrsOAQrxxjE<|d}6 z7VGC1q~;ZuBqnF;79}PnWtQkB=jRmz^%oWELj>Ys0{ZdsnR%Hd@$q^EmA5!-a`RJ4 Pb5iXg`kf{4@Q=^*+sh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o5jrKRGAAGzCKHrsOAQrxxjE<|d}6 z7VGC1q~;ZuBqnF;79}PnWtQkB=jRmz^%oWELj>Ys0{ZdsnR%Hd@$q^EmA5!-a`RJ4 Nb5iXI6j5i~d*kXCj6gLgWPl|PIV;d3#JGok!qEc_^83b@=K zvy7KEC1lK!SPAZ@qF|gA%pc~`_c|BTILR~bZ-|-nT86Hot#r_$+Umwpq22zZi1Umk zV7}W0^C9|2(@Fd~&OqxuW=b69*)SeO=;-FKKzIaGO@IgjaFBrh1V99)u!PO&0V!8= zwOXsyUaignq#rnkAwAwDkp7vl>P_(oB_5XmZ ztbzRe29O9Nr`+Tgx4DCT^1BmQyb7s%^E)3?-(OZ|{&g8AqA(D}vRfb^s2;QtB!%6x zX^?Yqr8b`jwa$-W=h@-+N2f2J9tBm9y8V;RaWC+I9dvr#$Nd*U1Nf&$&j$To=gINu z%W_kH@EkP9{a#>$wLfS%(uE9U6-gZl-YdCxi%X2kh zk!VLrKIKcg2M*7L+$#&M+WETjdy<`<#l>EfXA1U9)m|#XwW1AmTjRDrxzf%OZmg8L z3&bUsL2YWRf8GaqAJn=sOUr!`1!+C-#yBt!VX6l}7UUIqWxS*BNtgW6{F%O?Z_I_c zFlI#V6y|~~=)zc-yW|=kmzIq&HESUluXa6nsCA560S=om& zPfHoQeA5N%95;$y%PKRGn5DMkLZnJp;102(kS$AFN+d(MgGLKg(COH29}6Y#q3wRY ziZR4+_PPyY0zJ-f9C#;p1yTct0o(;MQ$T$BDn)Zn+gu!4nvk@IRpon zPKDvJ{Sa@7fG;yKk^2nErkj==Mk?_ZqCY%YaqG-=cPv{8Q1+FqVoN%GMs z&PIA`^Q;ObvDn&yCju<8JPtz$swq?BY06|S@xLT!ZsdLFGk9CeAa!*liYMR}ii(M9 k2hRWjBfBkR?H(*|(6jVNSczBG$~-92cnlk2U|su0xe(^Q7of%5ld<01PRfCSaVkrWB$my zlpPDo$$5d^b4ZSTmc9Y6J@plG>I^9 z?;{PGLjQ^I2Djf4ZVUTbzotQpYa`Ne7QeVB(^&cAI1xS<<2Vz_FQ&qe5~hI5{VB_M zWm7`NEQyuiekuyaS;72qE`7ImK97?;bN_*u$)H0^2W^#uj#e7|I4VkeI4j~jV+ok= z_Q8CB{?T+Czlk%@x{sL>2YEJ*ClNZjISddU!BjIKf&d&O(4GSjfi4VTa&4bftG-%| z)oQL*Yah}NEYT3g4uS9OeFA_u+yy{f1SD{T3m%NIwnO;Vd&)iDd}jn(ya6D!N=Wf| zmGUBVeHeXl8yv-y6+WP;MB$4VV3dBI`7HB?Gm$C4b5@F;T*SWz>ZegFiw*rGZkh;6L^Evgsx;l&Rt2hybktphJfq(kcU&G$BgP#viUOhbw9FY3MR8f-45Wjki3IPJ+(P038Fb9j4#SM4A`DA64AY#?6Xd-xJfE{< zWod)}ZUpMaDuX|X`~saI`Lyg#^Hg*%Cee34Jb3spO!9~&-F_a;Q;`*_dzs5KHDi(J zMoB*BHQfb=XF~2)g;w2sUHM(fPEX@vH_9^w`=#pEig2yyLfzK5?ai)AtA-mZrP@Gj zYM2_`{65M1poELlxJavgkp}X5;El0h9>7!&fGo*t@|u3oK9WB9-TzH{tG(5i`jVbm z(kt{OS!zqV)OW}=JTT`Tr%-%tQ8y}9dI()6W4?MEE?6=LHqQQA>`Q3*O0%+$Wu8_t zcKN0Y);VqzgN{+P25GxAVF-k*;WGuf#qk$^ubZocJg_8Hsc0XUm z7~;9%3YBlcq!0;W-lH~M_&56b)(ss`w%}{IjRbL*cYu_-f`hlikY!m8fx(4SVOX~w zVlDBAItv}SuaImyspc?Bi8l}(DoobZ4AZ8z1`}voZ?`IYXS+VLk>ry}oK4ED&4VhG z#4?u_JPu%y)nOPw;G|4V=P8r9#D9^Xy^;5z!{Ak|LM)w$D4u{@C?}>_T6hKs7}sq9 bm-c>rcYeg9szx*nn|idZdDPQfU8Da3@IuBL literal 0 HcmV?d00001 diff --git a/openstack-rabbit-consumer/rabbit_consumer/aq_api.py b/openstack-rabbit-consumer/rabbit_consumer/aq_api.py new file mode 100644 index 0000000..dd396c7 --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/aq_api.py @@ -0,0 +1,301 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file defines methods to be used to interact with the +Aquilon API +""" +import logging +import subprocess +from typing import Optional, List + +import requests +from requests.adapters import HTTPAdapter +from requests_kerberos import HTTPKerberosAuth +from urllib3.util.retry import Retry + +from rabbit_consumer.consumer_config import ConsumerConfig +from rabbit_consumer.aq_metadata import AqMetadata +from rabbit_consumer.openstack_address import OpenstackAddress +from rabbit_consumer.rabbit_message import RabbitMessage +from rabbit_consumer.vm_data import VmData + +HOST_CHECK_SUFFIX = "/host/{0}" + +UPDATE_INTERFACE_SUFFIX = "/machine/{0}/interface/{1}?boot&default_route" + +DELETE_HOST_SUFFIX = "/host/{0}" +DELETE_MACHINE_SUFFIX = "/machine/{0}" + +logger = logging.getLogger(__name__) + + +class AquilonError(Exception): + """ + Base class for Aquilon errors + """ + + +def verify_kerberos_ticket() -> bool: + """ + Check for a valid Kerberos ticket from a sidecar, or on the host + Raises a RuntimeError if no ticket is found + """ + logger.debug("Checking for valid Kerberos Ticket") + + if subprocess.call(["klist", "-s"]) == 1: + raise RuntimeError("No shared Kerberos ticket found.") + + logger.debug("Kerberos ticket success") + return True + + +def setup_requests( + url: str, method: str, desc: str, params: Optional[dict] = None +) -> str: + """ + Passes a request to the Aquilon API + """ + verify_kerberos_ticket() + logger.debug("%s: %s - params: %s", method, url, params) + + session = requests.Session() + session.verify = "/etc/grid-security/certificates/aquilon-gridpp-rl-ac-uk-chain.pem" + retries = Retry(total=5, backoff_factor=0.1, status_forcelist=[503]) + session.mount("https://", HTTPAdapter(max_retries=retries)) + if method == "post": + response = session.post(url, auth=HTTPKerberosAuth(), params=params) + elif method == "put": + response = session.put(url, auth=HTTPKerberosAuth(), params=params) + elif method == "delete": + response = session.delete(url, auth=HTTPKerberosAuth(), params=params) + else: + response = session.get(url, auth=HTTPKerberosAuth(), params=params) + + if response.status_code == 400: + # This might be an expected error, so don't log it + logger.debug("AQ Error Response: %s", response.text) + raise AquilonError(response.text) + + if response.status_code != 200: + logger.error("%s: Failed: %s", desc, response.text) + logger.error(url) + raise ConnectionError( + f"Failed {desc}: {response.status_code} -" "{response.text}" + ) + + logger.debug("Success: %s ", desc) + logger.debug("AQ Response: %s", response.text) + return response.text + + +def aq_make(addresses: List[OpenstackAddress]) -> None: + """ + Runs AQ make against a list of addresses passed to refresh + the given host + """ + # Manage and make these back to default domain and personality + address = addresses[0] + hostname = address.hostname + logger.debug("Attempting to make templates for %s", hostname) + + if not hostname or not hostname.strip(): + raise ValueError("Hostname cannot be empty") + + url = ConsumerConfig().aq_url + f"/host/{hostname}/command/make" + setup_requests(url, "post", "Make Template") + + +def aq_manage(addresses: List[OpenstackAddress], image_meta: AqMetadata) -> None: + """ + Manages the list of Aquilon addresses passed to it back to the production domain + """ + address = addresses[0] + hostname = address.hostname + logger.debug("Attempting to manage %s", hostname) + + params = { + "hostname": hostname, + "force": True, + } + if image_meta.aq_sandbox: + params["sandbox"] = image_meta.aq_sandbox + else: + params["domain"] = image_meta.aq_domain + + url = ConsumerConfig().aq_url + f"/host/{hostname}/command/manage" + setup_requests(url, "post", "Manage Host", params=params) + + +def create_machine(message: RabbitMessage, vm_data: VmData) -> str: + """ + Creates a machine in Aquilon. Returns the machine name + """ + logger.debug("Attempting to create machine for %s ", vm_data.virtual_machine_id) + + params = { + "model": "vm-openstack", + "serial": vm_data.virtual_machine_id, + "vmhost": message.payload.vm_host, + "cpucount": message.payload.vcpus, + "memory": message.payload.memory_mb, + } + + url = ConsumerConfig().aq_url + f"/next_machine/{ConsumerConfig().aq_prefix}" + response = setup_requests(url, "put", "Create Machine", params=params) + return response + + +def delete_machine(machine_name: str) -> None: + """ + Deletes a machine in Aquilon + """ + logger.debug("Attempting to delete machine for %s", machine_name) + + url = ConsumerConfig().aq_url + DELETE_MACHINE_SUFFIX.format(machine_name) + + setup_requests(url, "delete", "Delete Machine") + + +def create_host( + image_meta: AqMetadata, addresses: List[OpenstackAddress], machine_name: str +) -> None: + """ + Creates a host in Aquilon + """ + config = ConsumerConfig() + + address = addresses[0] + params = { + "machine": machine_name, + "ip": address.addr, + "archetype": image_meta.aq_archetype, + "personality": image_meta.aq_personality, + "osname": image_meta.aq_os, + "osversion": image_meta.aq_os_version, + } + + if image_meta.aq_sandbox: + params["sandbox"] = image_meta.aq_sandbox + else: + params["domain"] = image_meta.aq_domain + + logger.debug("Attempting to create host for %s ", address.hostname) + url = config.aq_url + f"/host/{address.hostname}" + setup_requests(url, "put", "Host Create", params=params) + + +def delete_host(hostname: str) -> None: + """ + Deletes a host in Aquilon + """ + logger.debug("Attempting to delete host for %s ", hostname) + url = ConsumerConfig().aq_url + DELETE_HOST_SUFFIX.format(hostname) + setup_requests(url, "delete", "Host Delete") + + +def delete_address(address: str, machine_name: str) -> None: + """ + Deletes an address in Aquilon + """ + logger.debug("Attempting to delete address for %s ", address) + url = ConsumerConfig().aq_url + "/interface_address" + params = {"ip": address, "machine": machine_name, "interface": "eth0"} + setup_requests(url, "delete", "Address Delete", params=params) + + +def delete_interface(machine_name: str) -> None: + """ + Deletes a host interface in Aquilon + """ + logger.debug("Attempting to delete interface for %s ", machine_name) + url = ConsumerConfig().aq_url + "/interface/command/del" + params = {"interface": "eth0", "machine": machine_name} + setup_requests(url, "post", "Interface Delete", params=params) + + +def add_machine_nics(machine_name: str, addresses: List[OpenstackAddress]) -> None: + """ + Adds NICs to a given machine in Aquilon based on the VM addresses + """ + # We only add the first host interface for now + # this avoids having to do a lot of work to figure out + # which interface names we have to use to clean-up + address = addresses[0] + interface_name = "eth0" + + logger.debug( + "Attempting to add interface %s to machine %s ", + interface_name, + machine_name, + ) + url = ( + ConsumerConfig().aq_url + f"/machine/{machine_name}/interface/{interface_name}" + ) + setup_requests( + url, "put", "Add Machine Interface", params={"mac": address.mac_addr} + ) + + +def set_interface_bootable(machine_name: str, interface_name: str) -> None: + """ + Sets a given interface on a machine to be bootable + """ + logger.debug("Attempting to bootable %s ", machine_name) + + url = ConsumerConfig().aq_url + UPDATE_INTERFACE_SUFFIX.format( + machine_name, interface_name + ) + + setup_requests(url, "post", "Update Machine Interface") + + +def search_machine_by_serial(vm_data: VmData) -> Optional[str]: + """ + Searches for a machine in Aquilon based on a serial number + """ + logger.debug("Searching for host with serial %s", vm_data.virtual_machine_id) + url = ConsumerConfig().aq_url + "/find/machine" + params = {"serial": vm_data.virtual_machine_id} + response = setup_requests(url, "get", "Search Host", params=params).strip() + + if response: + return response + return None + + +def search_host_by_machine(machine_name: str) -> Optional[str]: + """ + Searches for a host in Aquilon based on a machine name + """ + logger.debug("Searching for host with machine name %s", machine_name) + url = ConsumerConfig().aq_url + "/find/host" + params = {"machine": machine_name} + response = setup_requests(url, "get", "Search Host", params=params).strip() + + if response: + return response + return None + + +def get_machine_details(machine_name: str) -> str: + """ + Gets a machine's details as a string + """ + logger.debug("Getting machine details for %s", machine_name) + url = ConsumerConfig().aq_url + f"/machine/{machine_name}" + return setup_requests(url, "get", "Get machine details").strip() + + +def check_host_exists(hostname: str) -> bool: + """ + Checks if a host exists in Aquilon + """ + logger.debug("Checking if hostname exists: %s", hostname) + url = ConsumerConfig().aq_url + HOST_CHECK_SUFFIX.format(hostname) + try: + setup_requests(url, "get", "Check Host") + except AquilonError as err: + if f"Host {hostname} not found." in str(err): + return False + raise + return True diff --git a/openstack-rabbit-consumer/rabbit_consumer/aq_metadata.py b/openstack-rabbit-consumer/rabbit_consumer/aq_metadata.py new file mode 100644 index 0000000..830cd64 --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/aq_metadata.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file defines the class to handle deserialised metadata for +Aquilon +""" +import logging +from dataclasses import dataclass +from typing import Dict, Optional + +from mashumaro import DataClassDictMixin +from mashumaro.config import BaseConfig + +logger = logging.getLogger(__name__) + + +@dataclass +class AqMetadata(DataClassDictMixin): + """ + Deserialised metadata that is set either on an Openstack image + or a VM's metadata + """ + + aq_archetype: str + aq_domain: str + + aq_personality: str + aq_os_version: str + aq_os: str + + aq_sandbox: Optional[str] = None + + # pylint: disable=too-few-public-methods + class Config(BaseConfig): + """ + Sets the aliases for the metadata keys + """ + + aliases = { + "aq_archetype": "AQ_ARCHETYPE", + "aq_domain": "AQ_DOMAIN", + "aq_sandbox": "AQ_SANDBOX", + "aq_personality": "AQ_PERSONALITY", + "aq_os_version": "AQ_OSVERSION", + "aq_os": "AQ_OS", + } + + def override_from_vm_meta(self, vm_meta: Dict[str, str]): + """ + Overrides the values in the metadata with the values from the VM's + metadata + """ + for attr, alias in self.Config.aliases.items(): + if alias in vm_meta: + setattr(self, attr, vm_meta[alias]) diff --git a/openstack-rabbit-consumer/rabbit_consumer/consumer_config.py b/openstack-rabbit-consumer/rabbit_consumer/consumer_config.py new file mode 100644 index 0000000..8e35d3c --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/consumer_config.py @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file allows us to set environment variables so that +credentials are not exposed +""" + +import os +from dataclasses import dataclass, field +from functools import partial + + +@dataclass +class _AqFields: + """ + Dataclass for all Aquilon config elements. These are pulled from + environment variables. + """ + + aq_prefix: str = field(default_factory=partial(os.getenv, "AQ_PREFIX")) + aq_url: str = field(default_factory=partial(os.getenv, "AQ_URL")) + + +@dataclass +class _OpenstackFields: + """ + Dataclass for all Openstack config elements. These are pulled from + environment variables. + """ + + openstack_auth_url: str = field( + default_factory=partial(os.getenv, "OPENSTACK_AUTH_URL") + ) + openstack_compute_url: str = field( + default_factory=partial(os.getenv, "OPENSTACK_COMPUTE_URL") + ) + openstack_username: str = field( + default_factory=partial(os.getenv, "OPENSTACK_USERNAME") + ) + openstack_password: str = field( + default_factory=partial(os.getenv, "OPENSTACK_PASSWORD") + ) + + +@dataclass +class _RabbitFields: + """ + Dataclass for all RabbitMQ config elements. These are pulled from + environment variables. + """ + + rabbit_host: str = field(default_factory=partial(os.getenv, "RABBIT_HOST", None)) + rabbit_port: str = field(default_factory=partial(os.getenv, "RABBIT_PORT", None)) + rabbit_username: str = field( + default_factory=partial(os.getenv, "RABBIT_USERNAME", None) + ) + rabbit_password: str = field( + default_factory=partial(os.getenv, "RABBIT_PASSWORD", None) + ) + + +@dataclass +class ConsumerConfig(_AqFields, _OpenstackFields, _RabbitFields): + """ + Mix-in class for all known config elements + """ diff --git a/openstack-rabbit-consumer/rabbit_consumer/message_consumer.py b/openstack-rabbit-consumer/rabbit_consumer/message_consumer.py new file mode 100644 index 0000000..1f8af1d --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/message_consumer.py @@ -0,0 +1,292 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file manages how rabbit messages stating AQ VM creation and deletion +should be handled and processed between the consumer and Aquilon +""" +import json +import logging +import socket +from typing import Optional, List + +import rabbitpy + +from rabbit_consumer import aq_api +from rabbit_consumer import openstack_api +from rabbit_consumer.aq_api import verify_kerberos_ticket +from rabbit_consumer.consumer_config import ConsumerConfig +from rabbit_consumer.aq_metadata import AqMetadata +from rabbit_consumer.openstack_address import OpenstackAddress +from rabbit_consumer.rabbit_message import RabbitMessage, MessageEventType +from rabbit_consumer.vm_data import VmData + +logger = logging.getLogger(__name__) +SUPPORTED_MESSAGE_TYPES = { + "create": "compute.instance.create.end", + "delete": "compute.instance.delete.start", +} + + +def is_aq_managed_image(vm_data: VmData) -> bool: + """ + Check to see if the metadata in the message contains entries that suggest it + is for an Aquilon VM. + """ + image = openstack_api.get_image(vm_data) + if not image: + logger.info("No image found for %s", vm_data.virtual_machine_id) + return False + + if "AQ_OS" not in image.metadata: + logger.debug("Skipping non-Aquilon image: %s", image.name) + return False + return True + + +def get_aq_build_metadata(vm_data: VmData) -> AqMetadata: + """ + Gets the Aq Metadata from either the image or VM (where + VM metadata takes precedence) to determine the AQ params + """ + image = openstack_api.get_image(vm_data) + image_meta = AqMetadata.from_dict(image.metadata) + + vm_metadata = openstack_api.get_server_metadata(vm_data) + image_meta.override_from_vm_meta(vm_metadata) + return image_meta + + +def consume(message: RabbitMessage) -> None: + """ + Consumes a message from the rabbit queue and calls the appropriate + handler based on the event type. + """ + if message.event_type == SUPPORTED_MESSAGE_TYPES["create"]: + handle_create_machine(message) + + elif message.event_type == SUPPORTED_MESSAGE_TYPES["delete"]: + handle_machine_delete(message) + + else: + raise ValueError(f"Unsupported message type: {message.event_type}") + + +def delete_machine( + vm_data: VmData, network_details: Optional[OpenstackAddress] = None +) -> None: + """ + Deletes a machine in Aquilon and all associated addresses based on + the serial, MAC and hostname provided. This is the best effort attempt + to clean-up, since we can have partial or incorrect information. + """ + # First handle hostnames + if network_details and aq_api.check_host_exists(network_details.hostname): + logger.info("Deleting host %s", network_details.hostname) + aq_api.delete_host(network_details.hostname) + + machine_name = aq_api.search_machine_by_serial(vm_data) + if not machine_name: + logger.info("No existing record found for %s", vm_data.virtual_machine_id) + return + + # We have to do this manually because AQ has neither a: + # - Just delete the machine please + # - Delete this if it exists + # So alas we have to do everything by hand, whilst adhering to random rules + # of deletion orders which it enforces... + + hostname = aq_api.search_host_by_machine(machine_name) + machine_details = aq_api.get_machine_details(machine_name) + + # We have to clean-up all the interfaces and addresses first + # we could have a machine which points to a different hostname + if hostname: + if aq_api.check_host_exists(hostname): + # This is a different hostname to the one we have in the message + # so, we need to delete it + logger.info("Host exists for %s. Deleting old", hostname) + aq_api.delete_host(hostname) + else: + # Delete the interfaces + ipv4_address = socket.gethostbyname(hostname) + if ipv4_address in machine_details: + aq_api.delete_address(ipv4_address, machine_name) + + if "eth0" in machine_details: + aq_api.delete_interface(machine_name) + + logger.info("Machine exists for %s. Deleting old", vm_data.virtual_machine_id) + + # Then delete the machine + aq_api.delete_machine(machine_name) + + +def check_machine_valid(rabbit_message: RabbitMessage) -> bool: + """ + Checks to see if the machine is valid for creating in Aquilon. + """ + vm_data = VmData.from_message(rabbit_message) + if not openstack_api.check_machine_exists(vm_data): + # User has likely deleted the machine since we got here + logger.warning( + "Machine %s does not exist, skipping creation", vm_data.virtual_machine_id + ) + return False + + if not is_aq_managed_image(vm_data): + logger.debug("Ignoring non AQ Image: %s", rabbit_message) + return False + + return True + + +def handle_create_machine(rabbit_message: RabbitMessage) -> None: + """ + Handles the creation of a machine in Aquilon. This includes + creating the machine, adding the nics, and managing the host. + """ + logger.info("=== Received Aquilon VM create message ===") + _print_debug_logging(rabbit_message) + + if not check_machine_valid(rabbit_message): + return + + vm_data = VmData.from_message(rabbit_message) + + image_meta = get_aq_build_metadata(vm_data) + network_details = openstack_api.get_server_networks(vm_data) + + if not network_details or not network_details[0].hostname: + vm_name = rabbit_message.payload.vm_name + logger.info("Skipping novalocal only host: %s", vm_name) + return + + logger.info("Clearing any existing records from Aquilon") + delete_machine(vm_data, network_details[0]) + + # Configure networking + machine_name = aq_api.create_machine(rabbit_message, vm_data) + aq_api.add_machine_nics(machine_name, network_details) + aq_api.set_interface_bootable(machine_name, "eth0") + + # Manage host in Aquilon + aq_api.create_host(image_meta, network_details, machine_name) + aq_api.aq_make(network_details) + + add_aq_details_to_metadata(vm_data, network_details) + + logger.info( + "=== Finished Aquilon creation hook for VM %s ===", vm_data.virtual_machine_id + ) + + +def _print_debug_logging(rabbit_message: RabbitMessage) -> None: + """ + Prints debug logging for the Aquilon message. + """ + vm_data = VmData.from_message(rabbit_message) + logger.debug( + "Project Name: %s (%s)", rabbit_message.project_name, vm_data.project_id + ) + logger.info( + "VM Name: %s (%s) ", rabbit_message.payload.vm_name, vm_data.virtual_machine_id + ) + logger.debug("Username: %s", rabbit_message.user_name) + + +def handle_machine_delete(rabbit_message: RabbitMessage) -> None: + """ + Handles the deletion of a machine in Aquilon. This includes + deleting the machine and the host. + """ + logger.info("=== Received Aquilon VM delete message ===") + _print_debug_logging(rabbit_message) + + vm_data = VmData.from_message(rabbit_message) + delete_machine(vm_data=vm_data) + + logger.info( + "=== Finished Aquilon deletion hook for VM %s ===", vm_data.virtual_machine_id + ) + + +def add_aq_details_to_metadata( + vm_data: VmData, network_details: List[OpenstackAddress] +) -> None: + """ + Adds the hostname to the metadata of the VM. + """ + if not openstack_api.check_machine_exists(vm_data): + # User has likely deleted the machine since we got here + logger.warning( + "Machine %s does not exist, skipping metadata update", + vm_data.virtual_machine_id, + ) + return + + hostnames = [i.hostname for i in network_details] + metadata = { + "HOSTNAMES": ",".join(hostnames), + "AQ_STATUS": "SUCCESS", + "AQ_MACHINE": aq_api.search_machine_by_serial(vm_data), + } + openstack_api.update_metadata(vm_data, metadata) + + +def on_message(message: rabbitpy.Message) -> None: + """ + Deserializes the message and calls the consume function on message. + """ + raw_body = message.body + logger.debug("New message: %s", raw_body) + + body = json.loads(raw_body.decode("utf-8"))["oslo.message"] + parsed_event = MessageEventType.from_json(body) + if parsed_event.event_type not in SUPPORTED_MESSAGE_TYPES.values(): + logger.info("Ignoring event_type: %s", parsed_event.event_type) + message.ack() + return + + decoded = RabbitMessage.from_json(body) + logger.debug("Decoded message: %s", decoded) + + consume(decoded) + message.ack() + + +def initiate_consumer() -> None: + """ + Initiates the message consumer and starts consuming messages in a loop. + This includes setting up the rabbit connection and channel. + """ + logger.debug("Initiating message consumer") + # Ensure we have valid creds before trying to contact rabbit + verify_kerberos_ticket() + + config = ConsumerConfig() + + host = config.rabbit_host + port = config.rabbit_port + login_user = config.rabbit_username + login_pass = config.rabbit_password + logger.debug( + "Connecting to rabbit with: amqp://%s:@%s:%s/", login_user, host, port + ) + exchanges = ["nova"] + + login_str = f"amqp://{login_user}:{login_pass}@{host}:{port}/" + with rabbitpy.Connection(login_str) as conn: + with conn.channel() as channel: + logger.debug("Connected to RabbitMQ") + + # Durable indicates that the queue will survive a broker restart + queue = rabbitpy.Queue(channel, name="ral.info", durable=True) + for exchange in exchanges: + logger.debug("Binding to exchange: %s", exchange) + queue.bind(exchange, routing_key="ral.info") + + # Consume the messages from generator + message: rabbitpy.Message + logger.debug("Starting to consume messages") + for message in queue: + on_message(message) diff --git a/openstack-rabbit-consumer/rabbit_consumer/openstack_address.py b/openstack-rabbit-consumer/rabbit_consumer/openstack_address.py new file mode 100644 index 0000000..c784c4e --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/openstack_address.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file deserializes a server's network address from an +OpenStack API response +""" +import logging +import socket +from dataclasses import dataclass, field +from typing import Dict, Optional + +from mashumaro import DataClassDictMixin, field_options + +logger = logging.getLogger(__name__) + + +@dataclass +class OpenstackAddress(DataClassDictMixin): + """ + Deserializes the Openstack API response for a server's + network addresses. This is expected to be called from the + OpenstackAPI. To get an actual list use the Openstack API. + """ + + version: int + addr: str + mac_addr: str = field(metadata=field_options(alias="OS-EXT-IPS-MAC:mac_addr")) + hostname: Optional[str] = None + + @staticmethod + def get_internal_networks(addresses: Dict) -> list["OpenstackAddress"]: + """ + Returns a list of internal network addresses. This + is expected to be called from the OpenstackAPI. To get an actual + list use the Openstack API wrapper directly. + """ + internal_networks = [] + for address in addresses["Internal"]: + found = OpenstackAddress.from_dict(address) + found.hostname = OpenstackAddress.convert_hostnames(found.addr) + internal_networks.append(found) + return internal_networks + + @staticmethod + def get_services_networks(addresses: Dict) -> list["OpenstackAddress"]: + """ + Returns a list of network addresses on the services subnet. This + is expected to be called from the OpenstackAPI. To get an actual + list use the Openstack API wrapper directly. + """ + services_networks = [] + for address in addresses["Services"]: + found = OpenstackAddress.from_dict(address) + found.hostname = OpenstackAddress.convert_hostnames(found.addr) + services_networks.append(found) + return services_networks + + @staticmethod + def convert_hostnames(ip_addr: str) -> str: + """ + Converts an ip address to a hostname using DNS lookup. + """ + try: + return socket.gethostbyaddr(ip_addr)[0] + except socket.herror: + logger.info("No hostname found for ip %s", ip_addr) + raise + except Exception: + logger.error("Problem converting ip to hostname") + raise diff --git a/openstack-rabbit-consumer/rabbit_consumer/openstack_api.py b/openstack-rabbit-consumer/rabbit_consumer/openstack_api.py new file mode 100644 index 0000000..3c6119c --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/openstack_api.py @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file defines methods for connecting and interacting with the +OpenStack API +""" +import logging +from typing import List, Optional + +import openstack +from openstack.compute.v2.image import Image +from openstack.compute.v2.server import Server + +from rabbit_consumer.consumer_config import ConsumerConfig +from rabbit_consumer.openstack_address import OpenstackAddress +from rabbit_consumer.vm_data import VmData + +logger = logging.getLogger(__name__) + + +class OpenstackConnection: + """ + Wrapper for Openstack connection, to reduce boilerplate code + in subsequent functions. + """ + + def __init__(self): + self.conn = None + + def __enter__(self): + self.conn = openstack.connect( + auth_url=ConsumerConfig().openstack_auth_url, + username=ConsumerConfig().openstack_username, + password=ConsumerConfig().openstack_password, + project_name="admin", + user_domain_name="Default", + project_domain_name="default", + ) + return self.conn + + def __exit__(self, exc_type, exc_val, exc_tb): + self.conn.close() + + +def check_machine_exists(vm_data: VmData) -> bool: + """ + Checks to see if the machine exists in Openstack. + """ + with OpenstackConnection() as conn: + return bool(conn.compute.find_server(vm_data.virtual_machine_id)) + + +def get_server_details(vm_data: VmData) -> Server: + """ + Gets the server details from Openstack with details included + """ + with OpenstackConnection() as conn: + # Workaround for details missing from find_server + # on the current version of openstacksdk + found = list( + conn.compute.servers(uuid=vm_data.virtual_machine_id, all_projects=True) + ) + if not found: + raise ValueError(f"Server not found for id: {vm_data.virtual_machine_id}") + return found[0] + + +def get_server_networks(vm_data: VmData) -> List[OpenstackAddress]: + """ + Gets the networks from Openstack for the virtual machine as a list + of deserialized OpenstackAddresses. + """ + server = get_server_details(vm_data) + if "Internal" in server.addresses: + return OpenstackAddress.get_internal_networks(server.addresses) + if "Services" in server.addresses: + return OpenstackAddress.get_services_networks(server.addresses) + logger.warning("No internal or services network found for server %s", server.name) + return [] + + +def get_server_metadata(vm_data: VmData) -> dict: + """ + Gets the metadata from Openstack for the virtual machine. + """ + server = get_server_details(vm_data) + return server.metadata + + +def get_image(vm_data: VmData) -> Optional[Image]: + """ + Gets the image name from Openstack for the virtual machine. + """ + server = get_server_details(vm_data) + uuid = server.image.id + if not uuid: + return None + + with OpenstackConnection() as conn: + image = conn.compute.find_image(uuid) + return image + + +def update_metadata(vm_data: VmData, metadata) -> None: + """ + Updates the metadata for the virtual machine. + """ + server = get_server_details(vm_data) + with OpenstackConnection() as conn: + conn.compute.set_server_metadata(server, **metadata) + + logger.debug("Setting metadata successful") diff --git a/openstack-rabbit-consumer/rabbit_consumer/rabbit_message.py b/openstack-rabbit-consumer/rabbit_consumer/rabbit_message.py new file mode 100644 index 0000000..384fbbc --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/rabbit_message.py @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file handles how messages from Rabbit are processed and the +message extracted +""" +from dataclasses import dataclass, field +from typing import Optional + +from mashumaro import field_options +from mashumaro.mixins.json import DataClassJSONMixin + + +@dataclass +class MessageEventType(DataClassJSONMixin): + """ + Parses a raw message from RabbitMQ to determine the event_type + """ + + event_type: str + + +@dataclass +class RabbitMeta(DataClassJSONMixin): + """ + Deserialised custom VM metadata + """ + + machine_name: Optional[str] = field( + metadata=field_options(alias="AQ_MACHINENAME"), default=None + ) + + +@dataclass +# pylint: disable=too-many-instance-attributes +class RabbitPayload(DataClassJSONMixin): + """ + Deserialises the payload of a RabbitMQ message + """ + + instance_id: str + vm_name: str = field(metadata=field_options(alias="display_name")) + vcpus: int + memory_mb: int + vm_host: str = field(metadata=field_options(alias="host")) + + metadata: RabbitMeta + + +@dataclass +class RabbitMessage(DataClassJSONMixin): + """ + Deserialised RabbitMQ message + """ + + event_type: str + project_name: str = field(metadata=field_options(alias="_context_project_name")) + project_id: str = field(metadata=field_options(alias="_context_project_id")) + user_name: str = field(metadata=field_options(alias="_context_user_name")) + payload: RabbitPayload diff --git a/openstack-rabbit-consumer/rabbit_consumer/vm_data.py b/openstack-rabbit-consumer/rabbit_consumer/vm_data.py new file mode 100644 index 0000000..b4e6820 --- /dev/null +++ b/openstack-rabbit-consumer/rabbit_consumer/vm_data.py @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +This file has a dataclass for creating VM data objects from messages +""" +from dataclasses import dataclass + +from rabbit_consumer.rabbit_message import RabbitMessage + + +@dataclass +class VmData: + """ + Holds fields that change between different virtual machines + """ + + project_id: str + virtual_machine_id: str + + @staticmethod + def from_message(message: RabbitMessage) -> "VmData": + """ + Creates a VmData object from a RabbitMessage + """ + return VmData( + project_id=message.project_id, + virtual_machine_id=message.payload.instance_id, + ) diff --git a/openstack-rabbit-consumer/requirements-test.txt b/openstack-rabbit-consumer/requirements-test.txt new file mode 100644 index 0000000..c74b26f --- /dev/null +++ b/openstack-rabbit-consumer/requirements-test.txt @@ -0,0 +1,3 @@ +coverage +pylint +pytest \ No newline at end of file diff --git a/openstack-rabbit-consumer/requirements.txt b/openstack-rabbit-consumer/requirements.txt new file mode 100644 index 0000000..7ed9bc5 --- /dev/null +++ b/openstack-rabbit-consumer/requirements.txt @@ -0,0 +1,8 @@ +rabbitpy +requests +requests_kerberos +pika +urllib3 +mashumaro +openstacksdk +six # for openstacksdk diff --git a/openstack-rabbit-consumer/tests/__init__.py b/openstack-rabbit-consumer/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/openstack-rabbit-consumer/tests/__pycache__/__init__.cpython-310.pyc b/openstack-rabbit-consumer/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9978e2c37673f83f125912d6254be449bcaccd4 GIT binary patch literal 204 zcmYk0F$w}P5Jj`Hg$N$RA_inF1Q82s8ymZjO@?*ZB(r3)E*{CVcmrE+VP%4ae)#Y6 z2WA+Y&u1*@{1}^@uZ({csqB-fOfc-k7RB+-LLUF{MsYz0;@ar-W>rs8g`kf{4@Q=^*+sh(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o10hKO;XkRlh7f z+0@d=#3Vi^KRGc+-z7h}G&eP`q*%W^zbL!7ATc>rKRGAAGzCKHrsOAQrxxjE<|d}6 z7VGC1q~;ZuBqnF;79}PnWtQkB=jRmz^%oWEm!uX04T+D>%*!l^kJl@xyv1RYo1ape NlWGTY$Y&sC007+JH*Np` literal 0 HcmV?d00001 diff --git a/openstack-rabbit-consumer/tests/__pycache__/conftest.cpython-310-pytest-7.4.0.pyc b/openstack-rabbit-consumer/tests/__pycache__/conftest.cpython-310-pytest-7.4.0.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90cee8f45b81e84c74871944f5c1110ab0d9336a GIT binary patch literal 2579 zcmaJ@&2HO95a#kv6h+DMPwWI~Qzq!40&2@~)1-eW5+o?lL)xMNieiC+pt);{F)7Ns zD?1jPQ+o9~q{lo?bB0`2TjlqEX}d|SC~8pOyQ>XK@uK^wT@Q4ii$*35nCkd@_+_7 zcd~Zgp`Cn*F6CX?MVXr|=PPuD7~Ffh${Nf&Bdp2%-;Ec9uA!`jvVgTwb_Hb}lr6C? z%GOb~jItHBin0xqt)c7+TSwVdWpVg$T#PRE3I5B|i4P?ZMnYk)SvJ;RfEbw?Feo;9;C@26nq43@S)OM z`Lj45WjxeQKG9BwcuiENomeCTu1-d9ZHN?Yu_BMtVa0SuT*zV=XQ?`=B#lB=lTIOz zU@Xt8T12s{f$9J;U6a&5_aD&)5$#lV0dh=}1*8cmcrXy@Phe6Z zt;h>whPM0MP$p}f8qCu*!&-=cFuAvge^_T9p7I{}X*bo0JWP0$GDIphY{Pi1qf3LM zJQ@^IRS0TMXD4z!m$v41(q_){LICKyrB|mjb&itJSklJSOsOZ@0$t)q07*2h+b&>p~~roZnbl==@Th=>VX1 z;4Uai9X!*qiTj^ehiSPyE1oE>y5c22XMOA{Y^eQIM42zGKcM?F`^*NPoypgWJ`b@K z&G*~)xIby#-nqB64ZqtvpG}sxx4&Bad#!`rvrAiFl6nvxye=eX)t*XEr@T&>o-MR) zX-OqYdpw4Oax}g3))-x@V{|DD_rZwNajbI0BtCKf6*_b1`X%KkOQlM5E2tj}CbIZ^fgqB}!^I$m+HhFxy(^aTZC?in!3S`n z_)rs_=HfaKD4z{@LC>ym=CUN|Tu?A)ZoD%d=;C8AAa%ldjz_wdO~g%Y`C8eC;5(2+ zQR(fD(@a5=DTDi?lW4|Wi$*8df@28{LBSVgOYmcnDqc2cqOwuTeAukj;ah`lly1e- z>(ffETl^M$U+UH+bkOO|iyD{E!UUl=F6u4z2<$Lbuh9iSH!iAAt4hGXWk%VU#tpw0 z*f+4eFkdi6bAdjuQ16?(VB?H`t20O50%DuDEYA$gfUJ>#_?A!n4vcjeO&CFk{0|3U B(aQh; literal 0 HcmV?d00001 diff --git a/openstack-rabbit-consumer/tests/__pycache__/conftest.cpython-38-pytest-7.4.0.pyc b/openstack-rabbit-consumer/tests/__pycache__/conftest.cpython-38-pytest-7.4.0.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78e06d1442ac2d78873cb6fd38ecfd2789bcce9c GIT binary patch literal 2583 zcmaJ@&2HO95Ztb=IK8N|QBl#bFLSYe_40nTu<7(oQ|* z;k=%7(k| zJ=CqEZWVQXehGCOsJo21HNK9zE8KcS1{<^6?sxH1J(WWFdztj7<5>GzD6KyAr%G^N zXMQ9_sD+xUqP`yzKsQHSNyPvLQrg&e#!eoY^MC)SNqunp2P&-OS>MP%N(cr`(|54Y;W zpb>vw<2JAJ#%UemYtHsvAFlftg7U+$|Gn{C9(f**e-!Ic5ugbKw;YY)Xyi*VfiT2a zo2}w$m`)Pmn;;*VAbmV0nzL>wqmj_Z6F4>?#o9be!+2Z>y@`-28;41(j|)vRQ^lm4 zsY6(dv%+*xXI^*^RL)onq{0=v;viUX#OfetigoZ2%)Lkppm<4S8y+~k&W58b6~n{* z=<_dc?d$|e7KO?1K^9F@2uux+GWkSJ!bl9GB%AVL+~g31kee8x8fGQ0n-baFjIyzU z{Zb7vTZSO-nc3FlIA7l@QYu(xv1f?t!ay1pt$uH02Q;i(_yx)_O_rFZVBo-wOn(BC zf@w{klZx%eGomftJR#gMHN)G;zc;(H%)eh|AHL;1^s`=U5_uenAm+$aOl(448FZO< zm4HHcne0Sq&|vL_ovf9LG?PI3Ztj%n%<6~HWU5$m?j|=AZ$ccu7L#tVG3`Fb+Z=`FIYFPD(m~y zbS`_geU90pb-agCxe89$#$hb=G)#gtj7IUeqIY1)OJ==)ab23^TZkJ=3xi)NGaU#t z0o(>dWvnr3V*MxAVVi#QD?y&U>#z7J*9hqiOUNx9hDe^H0 zC-B?1p3xKffc)NnW@)173(Ej_VgR?10K+|3!a6_X}kamX3h0?;(;MP1PAjZa;Q`5QF+67zM3}!_!2}xkUP86 zIML8%a_{csIH;tn!Q>d5aHyaqX!xdVNpU1&E%H{S%A2Lmh0VqszBU*}-Y8^tZC=Y| zliz|bOxe7o0iE4AZ*hSwOb~Ycyxnq_z#da{8&d#m Dict[str, str]: + """ + Creates a dictionary with mock data + which represents an example OpenStack image's metadata + """ + return { + "AQ_ARCHETYPE": "archetype_mock", + "AQ_DOMAIN": "domain_mock", + "AQ_PERSONALITY": "personality_mock", + "AQ_OS": "os_mock", + "AQ_OSVERSION": "osversion_mock", + } + + +def test_aq_metadata_from_initial_dict(image_metadata): + """ + Tests creating an AQ metadata object from an initial dictionary + """ + returned = AqMetadata.from_dict(image_metadata) + + assert returned.aq_archetype == "archetype_mock" + assert returned.aq_domain == "domain_mock" + assert returned.aq_personality == "personality_mock" + assert returned.aq_os == "os_mock" + assert returned.aq_os_version == "osversion_mock" + + +def test_aq_metadata_override_all(image_metadata): + """ + Tests overriding all values in an AQ metadata object + """ + returned = AqMetadata.from_dict(image_metadata) + returned.override_from_vm_meta( + { + "AQ_ARCHETYPE": "archetype_mock_override", + "AQ_DOMAIN": "domain_mock_override", + "AQ_PERSONALITY": "personality_mock_override", + } + ) + + assert returned.aq_archetype == "archetype_mock_override" + assert returned.aq_domain == "domain_mock_override" + assert returned.aq_personality == "personality_mock_override" + + # Check the original values are still there + assert returned.aq_os == "os_mock" + assert returned.aq_os_version == "osversion_mock" + + +def test_aq_metadata_sandbox(image_metadata): + """ + Tests the sandbox value in an AQ metadata object + maps correctly onto the sandbox value + """ + returned = AqMetadata.from_dict(image_metadata) + returned.override_from_vm_meta( + { + "AQ_SANDBOX": "sandbox_mock", + } + ) + # This should be the only value that has changed + assert returned.aq_sandbox == "sandbox_mock" + + assert returned.aq_archetype == "archetype_mock" + assert returned.aq_personality == "personality_mock" + assert returned.aq_os == "os_mock" + assert returned.aq_os_version == "osversion_mock" diff --git a/openstack-rabbit-consumer/tests/test_consumer_config.py b/openstack-rabbit-consumer/tests/test_consumer_config.py new file mode 100644 index 0000000..287b1c4 --- /dev/null +++ b/openstack-rabbit-consumer/tests/test_consumer_config.py @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +Test the consumer config class, this handles the environment variables +that are used to configure the consumer. +""" +import pytest + +from rabbit_consumer.consumer_config import ConsumerConfig + +AQ_FIELDS = [ + ("aq_prefix", "AQ_PREFIX"), + ("aq_url", "AQ_URL"), +] + +OPENSTACK_FIELDS = [ + ("openstack_auth_url", "OPENSTACK_AUTH_URL"), + ("openstack_compute_url", "OPENSTACK_COMPUTE_URL"), + ("openstack_username", "OPENSTACK_USERNAME"), + ("openstack_password", "OPENSTACK_PASSWORD"), +] + +RABBIT_FIELDS = [ + ("rabbit_host", "RABBIT_HOST"), + ("rabbit_port", "RABBIT_PORT"), + ("rabbit_username", "RABBIT_USERNAME"), + ("rabbit_password", "RABBIT_PASSWORD"), +] + + +@pytest.mark.parametrize( + "config_name,env_var", AQ_FIELDS + OPENSTACK_FIELDS + RABBIT_FIELDS +) +def test_config_gets_os_env_vars(monkeypatch, config_name, env_var): + """ + Test that the config class pulls the correct values from the environment. + """ + expected = "MOCK_ENV" + monkeypatch.setenv(env_var, expected) + assert getattr(ConsumerConfig(), config_name) == expected diff --git a/openstack-rabbit-consumer/tests/test_message_consumer.py b/openstack-rabbit-consumer/tests/test_message_consumer.py new file mode 100644 index 0000000..3ae08f7 --- /dev/null +++ b/openstack-rabbit-consumer/tests/test_message_consumer.py @@ -0,0 +1,445 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +Tests the message consumption flow +for the consumer +""" +from unittest.mock import Mock, NonCallableMock, patch, call, MagicMock + +import pytest + +# noinspection PyUnresolvedReferences +from rabbit_consumer.consumer_config import ConsumerConfig +from rabbit_consumer.message_consumer import ( + on_message, + initiate_consumer, + add_aq_details_to_metadata, + handle_create_machine, + handle_machine_delete, + SUPPORTED_MESSAGE_TYPES, + check_machine_valid, + is_aq_managed_image, + get_aq_build_metadata, + delete_machine, +) +from rabbit_consumer.vm_data import VmData + + +@pytest.fixture(name="valid_event_type") +def fixture_valid_event_type(): + """ + Fixture for a valid event type + """ + mock = NonCallableMock() + mock.event_type = SUPPORTED_MESSAGE_TYPES["create"] + return mock + + +@patch("rabbit_consumer.message_consumer.consume") +@patch("rabbit_consumer.message_consumer.MessageEventType") +@patch("rabbit_consumer.message_consumer.RabbitMessage") +def test_on_message_parses_json( + message_parser, message_event_type, consume, valid_event_type +): + """ + Test that the function parses the message body as JSON + """ + message_event_type.from_json.return_value = valid_event_type + + with ( + patch("rabbit_consumer.message_consumer.json") as json, + patch("rabbit_consumer.message_consumer.is_aq_managed_image"), + ): + message = Mock() + on_message(message) + + decoded_body = json.loads.return_value + message_parser.from_json.assert_called_once_with(decoded_body["oslo.message"]) + consume.assert_called_once_with(message_parser.from_json.return_value) + message.ack.assert_called_once() + + +@patch("rabbit_consumer.message_consumer.consume") +@patch("rabbit_consumer.message_consumer.is_aq_managed_image") +@patch("rabbit_consumer.message_consumer.MessageEventType") +def test_on_message_ignores_wrong_message_type(message_event_type, is_managed, consume): + """ + Test that the function ignores messages with the wrong message type + """ + message_event = NonCallableMock() + message_event.event_type = "wrong" + message_event_type.from_json.return_value = message_event + + with patch("rabbit_consumer.message_consumer.json"): + message = Mock() + on_message(message) + + is_managed.assert_not_called() + consume.assert_not_called() + message.ack.assert_called_once() + + +@pytest.mark.parametrize("event_type", SUPPORTED_MESSAGE_TYPES.values()) +@patch("rabbit_consumer.message_consumer.consume") +@patch("rabbit_consumer.message_consumer.MessageEventType") +def test_on_message_accepts_event_types(message_event_type, consume, event_type): + """ + Test that the function accepts the correct event types + """ + message_event = NonCallableMock() + message_event.event_type = event_type + message_event_type.from_json.return_value = message_event + + with ( + patch("rabbit_consumer.message_consumer.RabbitMessage"), + patch("rabbit_consumer.message_consumer.json"), + ): + message = Mock() + on_message(message) + + consume.assert_called_once() + message.ack.assert_called_once() + + +# pylint: disable=too-few-public-methods +class MockedConfig(ConsumerConfig): + """ + Provides a mocked input config for the consumer + """ + + rabbit_host = "rabbit_host" + rabbit_port = 1234 + rabbit_username = "rabbit_username" + rabbit_password = "rabbit_password" + + +@patch("rabbit_consumer.message_consumer.verify_kerberos_ticket") +@patch("rabbit_consumer.message_consumer.rabbitpy") +def test_initiate_consumer_channel_setup(rabbitpy, _): + """ + Test that the function sets up the channel and queue correctly + """ + mocked_config = MockedConfig() + + with patch("rabbit_consumer.message_consumer.ConsumerConfig") as config: + config.return_value = mocked_config + initiate_consumer() + + rabbitpy.Connection.assert_called_once_with( + f"amqp://{mocked_config.rabbit_username}:{mocked_config.rabbit_password}@{mocked_config.rabbit_host}:{mocked_config.rabbit_port}/" + ) + + connection = rabbitpy.Connection.return_value.__enter__.return_value + connection.channel.assert_called_once() + channel = connection.channel.return_value.__enter__.return_value + + rabbitpy.Queue.assert_called_once_with(channel, name="ral.info", durable=True) + queue = rabbitpy.Queue.return_value + queue.bind.assert_called_once_with("nova", routing_key="ral.info") + + +@patch("rabbit_consumer.message_consumer.verify_kerberos_ticket") +@patch("rabbit_consumer.message_consumer.on_message") +@patch("rabbit_consumer.message_consumer.rabbitpy") +def test_initiate_consumer_actual_consumption(rabbitpy, message_mock, _): + """ + Test that the function actually consumes messages + """ + queue_messages = [NonCallableMock(), NonCallableMock()] + # We need our mocked queue to act like a generator + rabbitpy.Queue.return_value.__iter__.return_value = queue_messages + + initiate_consumer() + + message_mock.assert_has_calls([call(message) for message in queue_messages]) + + +@patch("rabbit_consumer.message_consumer.openstack_api") +@patch("rabbit_consumer.message_consumer.aq_api") +def test_add_aq_details_to_metadata( + aq_api, openstack_api, vm_data, openstack_address_list +): + """ + Test that the function adds the hostname to the metadata when the machine exists + """ + openstack_api.check_machine_exists.return_value = True + add_aq_details_to_metadata(vm_data, openstack_address_list) + + hostnames = [i.hostname for i in openstack_address_list] + expected = { + "HOSTNAMES": ",".join(hostnames), + "AQ_STATUS": "SUCCESS", + "AQ_MACHINE": aq_api.search_machine_by_serial.return_value, + } + + openstack_api.check_machine_exists.assert_called_once_with(vm_data) + aq_api.search_machine_by_serial.assert_called_once_with(vm_data) + openstack_api.update_metadata.assert_called_with(vm_data, expected) + + +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_add_hostname_to_metadata_machine_does_not_exist(openstack_api, vm_data): + """ + Test that the function does not add the hostname to the metadata when the machine does not exist + """ + openstack_api.check_machine_exists.return_value = False + add_aq_details_to_metadata(vm_data, []) + + openstack_api.check_machine_exists.assert_called_once_with(vm_data) + openstack_api.update_metadata.assert_not_called() + + +@patch("rabbit_consumer.message_consumer.check_machine_valid") +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_handle_create_machine_skips_invalid(openstack_api, machine_valid): + """ + Test that the function skips invalid machines + """ + machine_valid.return_value = False + vm_data = Mock() + + handle_create_machine(vm_data) + + machine_valid.assert_called_once_with(vm_data) + openstack_api.get_server_networks.assert_not_called() + + +@patch("rabbit_consumer.message_consumer.openstack_api") +@patch("rabbit_consumer.message_consumer.aq_api") +@patch("rabbit_consumer.message_consumer.add_aq_details_to_metadata") +# pylint: disable=too-many-arguments +def test_consume_create_machine_hostnames_good_path( + metadata, aq_api, openstack, rabbit_message, image_metadata +): + """ + Test that the function calls the correct functions in the correct order to register a new machine + """ + with ( + patch("rabbit_consumer.message_consumer.VmData") as data_patch, + patch("rabbit_consumer.message_consumer.check_machine_valid") as check_machine, + patch( + "rabbit_consumer.message_consumer.get_aq_build_metadata" + ) as get_image_meta, + patch("rabbit_consumer.message_consumer.delete_machine") as delete_machine_mock, + ): + check_machine.return_value = True + get_image_meta.return_value = image_metadata + + handle_create_machine(rabbit_message) + + vm_data = data_patch.from_message.return_value + network_details = openstack.get_server_networks.return_value + + data_patch.from_message.assert_called_with(rabbit_message) + openstack.get_server_networks.assert_called_with(vm_data) + + # Check main Aq Flow + delete_machine_mock.assert_called_once_with(vm_data, network_details[0]) + aq_api.create_machine.assert_called_once_with(rabbit_message, vm_data) + machine_name = aq_api.create_machine.return_value + + # Networking + aq_api.add_machine_nics.assert_called_once_with(machine_name, network_details) + + aq_api.set_interface_bootable.assert_called_once_with(machine_name, "eth0") + + aq_api.create_host.assert_called_once_with( + image_metadata, network_details, machine_name + ) + aq_api.aq_make.assert_called_once_with(network_details) + + # Metadata + metadata.assert_called_once_with(vm_data, network_details) + + +@patch("rabbit_consumer.message_consumer.delete_machine") +def test_consume_delete_machine_good_path(delete_machine_mock, rabbit_message): + """ + Test that the function calls the correct functions in the correct order to delete a machine + """ + rabbit_message.payload.metadata.machine_name = "AQ-HOST1" + + with patch("rabbit_consumer.message_consumer.VmData") as data_patch: + handle_machine_delete(rabbit_message) + + delete_machine_mock.assert_called_once_with( + vm_data=data_patch.from_message.return_value + ) + + +@patch("rabbit_consumer.message_consumer.is_aq_managed_image") +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_check_machine_valid(openstack_api, is_aq_managed): + """ + Test that the function returns True when the machine is valid + """ + mock_message = NonCallableMock() + is_aq_managed.return_value = True + + vm_data = VmData.from_message(mock_message) + + openstack_api.check_machine_exists.return_value = True + + assert check_machine_valid(mock_message) + is_aq_managed.assert_called_once_with(vm_data) + openstack_api.check_machine_exists.assert_called_once_with(vm_data) + + +@patch("rabbit_consumer.message_consumer.is_aq_managed_image") +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_check_machine_invalid_image(openstack_api, is_aq_managed): + """ + Test that the function returns False when the image is not AQ managed + """ + mock_message = NonCallableMock() + is_aq_managed.return_value = False + openstack_api.check_machine_exists.return_value = True + vm_data = VmData.from_message(mock_message) + + assert not check_machine_valid(mock_message) + + openstack_api.check_machine_exists.assert_called_once_with(vm_data) + is_aq_managed.assert_called_once_with(vm_data) + + +@patch("rabbit_consumer.message_consumer.is_aq_managed_image") +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_check_machine_invalid_machine(openstack_api, is_aq_managed): + """ + Test that the function returns False when the machine does not exist + """ + mock_message = NonCallableMock() + openstack_api.check_machine_exists.return_value = False + + assert not check_machine_valid(mock_message) + + is_aq_managed.assert_not_called() + openstack_api.check_machine_exists.assert_called_once_with( + VmData.from_message(mock_message) + ) + + +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_is_aq_managed_image(openstack_api, vm_data): + """ + Test that the function returns True when the image is AQ managed + """ + openstack_api.get_image.return_value.metadata = {"AQ_OS": "True"} + + assert is_aq_managed_image(vm_data) + openstack_api.get_image.assert_called_once_with(vm_data) + + +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_is_aq_managed_image_missing_image(openstack_api, vm_data): + """ + Test that the function returns False when the image is not AQ managed + """ + openstack_api.get_image.return_value = None + + assert not is_aq_managed_image(vm_data) + openstack_api.get_image.assert_called_once_with(vm_data) + + +@patch("rabbit_consumer.message_consumer.VmData") +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_is_aq_managed_image_missing_key(openstack_api, vm_data): + """ + Test that the function returns False when the image is not AQ managed + """ + openstack_api.get_image.return_value.metadata = {} + + assert not is_aq_managed_image(vm_data) + openstack_api.get_image.assert_called_once_with(vm_data) + + +@patch("rabbit_consumer.message_consumer.AqMetadata") +@patch("rabbit_consumer.message_consumer.openstack_api") +def test_get_aq_build_metadata(openstack_api, aq_metadata_class, vm_data): + """ + Test that the function returns the correct metadata + """ + aq_metadata_obj: MagicMock = get_aq_build_metadata(vm_data) + + # We should first construct from an image + assert aq_metadata_obj == aq_metadata_class.from_dict.return_value + aq_metadata_class.from_dict.assert_called_once_with( + openstack_api.get_image.return_value.metadata + ) + + # Then override with an object + openstack_api.get_server_metadata.assert_called_once_with(vm_data) + aq_metadata_obj.override_from_vm_meta.assert_called_once_with( + openstack_api.get_server_metadata.return_value + ) + + +@patch("rabbit_consumer.message_consumer.aq_api") +def test_delete_machine_hostname_only(aq_api, vm_data, openstack_address): + """ + Tests that the function deletes a host then exits if no machine is found + """ + aq_api.check_host_exists.return_value = True + aq_api.search_machine_by_serial.return_value = None + + delete_machine(vm_data, openstack_address) + aq_api.delete_host.assert_called_once_with(openstack_address.hostname) + aq_api.delete_machine.assert_not_called() + + +@patch("rabbit_consumer.message_consumer.aq_api") +def test_delete_machine_by_serial(aq_api, vm_data, openstack_address): + """ + Tests that the function deletes a host then a machine + assuming both were found + """ + # Assume our host address doesn't match the machine record + # but the machine does have a hostname which is valid... + aq_api.check_host_exists.side_effect = [False, True] + + aq_api.search_host_by_machine.return_value = "host.example.com" + aq_api.get_machine_details.return_value = "" + + delete_machine(vm_data, openstack_address) + + aq_api.check_host_exists.assert_has_calls( + [call(openstack_address.hostname), call("host.example.com")] + ) + aq_api.delete_host.assert_called_once_with("host.example.com") + + +@patch("rabbit_consumer.message_consumer.aq_api") +@patch("rabbit_consumer.message_consumer.socket") +def test_delete_machine_no_hostname(socket_api, aq_api, vm_data): + """ + Tests + """ + aq_api.check_host_exists.return_value = False + + ip_address = "127.0.0.1" + socket_api.gethostbyname.return_value = ip_address + + machine_name = aq_api.search_machine_by_serial.return_value + aq_api.get_machine_details.return_value = f"eth0: {ip_address}" + + delete_machine(vm_data, NonCallableMock()) + aq_api.delete_address.assert_called_once_with(ip_address, machine_name) + aq_api.delete_interface.assert_called_once_with(machine_name) + + +@patch("rabbit_consumer.message_consumer.aq_api") +@patch("rabbit_consumer.message_consumer.socket") +def test_delete_machine_always_called(socket_api, aq_api, vm_data): + """ + Tests that the function always calls the delete machine function + """ + aq_api.check_host_exists.return_value = False + socket_api.gethostbyname.return_value = "123123" + + aq_api.get_machine_details.return_value = "Machine Details" + + machine_name = "machine_name" + aq_api.search_machine_by_serial.return_value = machine_name + + delete_machine(vm_data, NonCallableMock()) + aq_api.delete_machine.assert_called_once_with(machine_name) diff --git a/openstack-rabbit-consumer/tests/test_openstack_address.py b/openstack-rabbit-consumer/tests/test_openstack_address.py new file mode 100644 index 0000000..631ee1e --- /dev/null +++ b/openstack-rabbit-consumer/tests/test_openstack_address.py @@ -0,0 +1,161 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +Tests the dataclass representing OpenStack network addresses +""" +import copy +from unittest.mock import patch + +import pytest + +from rabbit_consumer.openstack_address import OpenstackAddress + + +@pytest.fixture(name="example_dict_internal") +def fixture_example_dict_internal(): + """ + Creates a dictionary with mock data representing the network addresses of an internal VM + """ + # Adapted from real response from OpenStack API + return { + "Internal": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:ca:aa:aa:aa:aa", + "version": 4, + "addr": "127.0.0.63", + "OS-EXT-IPS:type": "fixed", + } + ] + } + + +@pytest.fixture(name="example_dict_two_entries_internal") +def fixture_example_dict_two_entries_internal(example_dict_internal): + """ + Creates a dictionary with mock data representing the network addresses of an internal VM with two entries + """ + second = copy.deepcopy(example_dict_internal["Internal"][0]) + second["addr"] = "127.0.0.64" + example_dict_internal["Internal"].append(second) + return example_dict_internal + + +@patch("rabbit_consumer.openstack_address.socket.gethostbyaddr") +def test_openstack_address_single_case_internal(mock_socket, example_dict_internal): + """ + Tests the OpenstackAddress class with a single internal network address + """ + result = OpenstackAddress.get_internal_networks(example_dict_internal) + assert len(result) == 1 + assert result[0].version == 4 + assert result[0].addr == "127.0.0.63" + assert result[0].mac_addr == "fa:ca:aa:aa:aa:aa" + mock_socket.assert_called_once() + + +@patch("rabbit_consumer.openstack_address.socket.gethostbyaddr") +def test_openstack_address_multiple_networks_internal( + mock_socket, example_dict_two_entries_internal +): + """ + Tests the OpenstackAddress class with multiple internal network addresses + """ + result = OpenstackAddress.get_internal_networks(example_dict_two_entries_internal) + assert len(result) == 2 + assert result[0].version == 4 + assert result[0].addr == "127.0.0.63" + assert result[1].addr == "127.0.0.64" + mock_socket.assert_called() + + +@patch("rabbit_consumer.openstack_address.socket.gethostbyaddr") +def test_openstack_address_populate_internal( + mock_socket, example_dict_two_entries_internal +): + """ + Tests the OpenstackAddress class with multiple internal network addresses + """ + mock_socket.side_effect = [("hostname", None, None), ("hostname2", None, None)] + result = OpenstackAddress.get_internal_networks(example_dict_two_entries_internal) + + assert result[0].hostname == "hostname" + assert result[1].hostname == "hostname2" + + assert mock_socket.call_count == 2 + assert mock_socket.call_args_list[0][0][0] == "127.0.0.63" + assert mock_socket.call_args_list[1][0][0] == "127.0.0.64" + + +@pytest.fixture(name="example_dict_services") +def fixture_example_dict_services(): + """ + Creates a dictionary with mock data representing the services network addresses of a VM + """ + # Adapted from real response from OpenStack API + return { + "Services": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:ca:aa:aa:aa:aa", + "version": 4, + "addr": "127.0.0.63", + "OS-EXT-IPS:type": "fixed", + } + ] + } + + +@pytest.fixture(name="example_dict_two_entries_services") +def fixture_example_dict_two_entries_services(example_dict_services): + """ + Creates a dictionary with mock data representing the services network addresses of a VM with two entries + """ + second = copy.deepcopy(example_dict_services["Services"][0]) + second["addr"] = "127.0.0.64" + example_dict_services["Services"].append(second) + return example_dict_services + + +@patch("rabbit_consumer.openstack_address.socket.gethostbyaddr") +def test_openstack_address_single_case_services(mock_socket, example_dict_services): + """ + Tests the OpenstackAddress class with a single services network address + """ + result = OpenstackAddress.get_services_networks(example_dict_services) + assert len(result) == 1 + assert result[0].version == 4 + assert result[0].addr == "127.0.0.63" + assert result[0].mac_addr == "fa:ca:aa:aa:aa:aa" + mock_socket.assert_called_once() + + +@patch("rabbit_consumer.openstack_address.socket.gethostbyaddr") +def test_openstack_address_multiple_networks_services( + mock_socket, example_dict_two_entries_services +): + """ + Tests the OpenstackAddress class with multiple services network addresses + """ + result = OpenstackAddress.get_services_networks(example_dict_two_entries_services) + assert len(result) == 2 + assert result[0].version == 4 + assert result[0].addr == "127.0.0.63" + assert result[1].addr == "127.0.0.64" + mock_socket.assert_called() + + +@patch("rabbit_consumer.openstack_address.socket.gethostbyaddr") +def test_openstack_address_populate_services( + mock_socket, example_dict_two_entries_services +): + """ + Tests the OpenstackAddress class with services multiple network addresses + """ + mock_socket.side_effect = [("hostname", None, None), ("hostname2", None, None)] + result = OpenstackAddress.get_services_networks(example_dict_two_entries_services) + + assert result[0].hostname == "hostname" + assert result[1].hostname == "hostname2" + + assert mock_socket.call_count == 2 + assert mock_socket.call_args_list[0][0][0] == "127.0.0.63" + assert mock_socket.call_args_list[1][0][0] == "127.0.0.64" diff --git a/openstack-rabbit-consumer/tests/test_openstack_api.py b/openstack-rabbit-consumer/tests/test_openstack_api.py new file mode 100644 index 0000000..0a20b47 --- /dev/null +++ b/openstack-rabbit-consumer/tests/test_openstack_api.py @@ -0,0 +1,161 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +Tests that the Openstack API functions are invoked +as expected with the correct params +""" +from unittest.mock import NonCallableMock, patch + +# noinspection PyUnresolvedReferences +from rabbit_consumer.openstack_api import ( + update_metadata, + OpenstackConnection, + check_machine_exists, + get_server_details, + get_server_networks, + get_image, +) + + +@patch("rabbit_consumer.openstack_api.ConsumerConfig") +@patch("rabbit_consumer.openstack_api.openstack.connect") +def test_openstack_connection(mock_connect, mock_config): + """ + Test that the OpenstackConnection context manager calls the correct functions + """ + with OpenstackConnection() as conn: + mock_connect.assert_called_once_with( + auth_url=mock_config.return_value.openstack_auth_url, + username=mock_config.return_value.openstack_username, + password=mock_config.return_value.openstack_password, + project_name="admin", + user_domain_name="Default", + project_domain_name="default", + ) + + # Pylint is unable to see that openstack.connect returns a mock + # pylint: disable=no-member + assert conn == mock_connect.return_value + # pylint: disable=no-member + assert conn.close.call_count == 0 + + # Check close is called when the context manager exits + # pylint: disable=no-member + assert conn.close.call_count == 1 + + +@patch("rabbit_consumer.openstack_api.OpenstackConnection") +def test_check_machine_exists_existing_machine(conn, vm_data): + """ + Test that the function returns True when the machine exists + """ + context = conn.return_value.__enter__.return_value + context.compute.find_server.return_value = NonCallableMock() + found = check_machine_exists(vm_data) + + conn.assert_called_once_with() + context.compute.find_server.assert_called_with(vm_data.virtual_machine_id) + assert isinstance(found, bool) and found + + +@patch("rabbit_consumer.openstack_api.OpenstackConnection") +def test_check_machine_exists_deleted_machine(conn, vm_data): + """ + Test that the function returns False when the machine does not exist + """ + context = conn.return_value.__enter__.return_value + context.compute.find_server.return_value = None + found = check_machine_exists(vm_data) + + conn.assert_called_once_with() + context = conn.return_value.__enter__.return_value + context.compute.find_server.assert_called_with(vm_data.virtual_machine_id) + assert isinstance(found, bool) and not found + + +@patch("rabbit_consumer.openstack_api.OpenstackConnection") +@patch("rabbit_consumer.openstack_api.get_server_details") +def test_update_metadata(server_details, conn, vm_data): + """ + Test that the function calls the correct functions to update the metadata on a VM + """ + server_details.return_value = NonCallableMock() + update_metadata(vm_data, {"key": "value"}) + + server_details.assert_called_once_with(vm_data) + + conn.assert_called_once_with() + context = conn.return_value.__enter__.return_value + context.compute.set_server_metadata.assert_called_once_with( + server_details.return_value, **{"key": "value"} + ) + + +@patch("rabbit_consumer.openstack_api.OpenstackConnection") +def test_get_server_details(conn, vm_data): + """ + Test that the function calls the correct functions to get the details of a VM + """ + context = conn.return_value.__enter__.return_value + context.compute.servers.return_value = [NonCallableMock()] + + result = get_server_details(vm_data) + + context.compute.servers.assert_called_once_with( + uuid=vm_data.virtual_machine_id, all_projects=True + ) + + assert result == context.compute.servers.return_value[0] + + +@patch("rabbit_consumer.openstack_api.get_server_details") +@patch("rabbit_consumer.openstack_api.OpenstackAddress") +def test_get_server_networks_internal(address, server_details, vm_data): + """ + Test that the function calls the correct functions to get the networks of a VM + """ + server_details.return_value.addresses = {"Internal": []} + + get_server_networks(vm_data) + address.get_internal_networks.assert_called_once_with( + server_details.return_value.addresses + ) + + +@patch("rabbit_consumer.openstack_api.get_server_details") +@patch("rabbit_consumer.openstack_api.OpenstackAddress") +def test_get_server_networks_services(address, server_details, vm_data): + """ + Test that the function calls the correct functions to get the networks of a VM + """ + server_details.return_value.addresses = {"Services": []} + + get_server_networks(vm_data) + address.get_services_networks.assert_called_once_with( + server_details.return_value.addresses + ) + + +@patch("rabbit_consumer.openstack_api.get_server_details") +def test_get_server_networks_no_network(server_details, vm_data): + """ + Tests that an empty list is returned when there are no networks + """ + server_details.return_value = NonCallableMock() + server_details.return_value.addresses = {} + + result = get_server_networks(vm_data) + assert not result + + +@patch("rabbit_consumer.openstack_api.get_server_details") +def test_get_image_no_image_id(server_details, vm_data): + """ + Tests that get image handles an empty image UUID + usually when a volume was used instead of an image + """ + server_details.return_value = NonCallableMock() + server_details.return_value.image.id = None + + result = get_image(vm_data) + assert not result diff --git a/openstack-rabbit-consumer/tests/test_rabbit_message.py b/openstack-rabbit-consumer/tests/test_rabbit_message.py new file mode 100644 index 0000000..15a060a --- /dev/null +++ b/openstack-rabbit-consumer/tests/test_rabbit_message.py @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright (c) 2023 United Kingdom Research and Innovation +""" +Tests rabbit messages are consumed correctly from the queue +""" +import json +from typing import Dict + +import pytest + +from rabbit_consumer.rabbit_message import RabbitMessage + + +def _example_dict(with_metadata: bool) -> Dict: + """ + Returns an example dictionary for testing, based on real data from the RabbitMQ queue + """ + example_dict = { + "event_type": "compute.instance.create.end", + "_context_project_name": "project_name", + "_context_project_id": "project_id", + "_context_user_name": "user_name", + "payload": { + "instance_id": "instance_id", + "display_name": "vm_name", + "vcpus": 1, + "memory_mb": 1024, + "host": "vm_host", + "metadata": {}, + }, + } + + if with_metadata: + example_dict["payload"]["metadata"] = {"AQ_MACHINENAME": "machine_name"} + + return example_dict + + +@pytest.fixture(name="example_json") +def fixture_example_json(): + """ + Returns an example JSON string for testing, based on real data from the RabbitMQ queue + """ + return json.dumps(_example_dict(with_metadata=False)) + + +@pytest.fixture(name="example_json_with_metadata") +def fixture_example_json_with_metadata(): + """ + Returns an example JSON string for testing, with metadata included + """ + return json.dumps(_example_dict(with_metadata=True)) + + +def test_rabbit_json_load(example_json): + """ + Tests that RabbitMessage.from_json() can load a JSON string and deserialise it into dataclasses + """ + deserialized = RabbitMessage.from_json(example_json) + assert deserialized.event_type == "compute.instance.create.end" + assert deserialized.project_name == "project_name" + assert deserialized.project_id == "project_id" + assert deserialized.user_name == "user_name" + assert deserialized.payload.instance_id == "instance_id" + assert deserialized.payload.vm_name == "vm_name" + assert deserialized.payload.vcpus == 1 + assert deserialized.payload.memory_mb == 1024 + assert deserialized.payload.vm_host == "vm_host" + + +def test_with_metadata(example_json_with_metadata): + """ + Tests that RabbitMessage.from_json() can load a JSON string and deserialise it into dataclasses + """ + deserialized = RabbitMessage.from_json(example_json_with_metadata) + assert deserialized.payload.metadata.machine_name == "machine_name" diff --git a/openstack-rabbit-consumer/version.txt b/openstack-rabbit-consumer/version.txt new file mode 100644 index 0000000..00355e2 --- /dev/null +++ b/openstack-rabbit-consumer/version.txt @@ -0,0 +1 @@ +2.3.7