diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6ea02bfba..234be6fbd 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1 @@ -# This should match the owning team set up in https://github.com/orgs/opensearch-project/teams -* @opensearch-project/alerting-plugin \ No newline at end of file +* @lezzago @AWSHurneyt @sbcd90 @eirsep @getsaurabh02 @praveensameneni @qreshi @bowenlan-amzn @rishabhmaurya @engechas \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 05f144533..206f8a630 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -3,7 +3,7 @@ *Description of changes:* *CheckList:* -[ ] Commits are signed per the DCO using --signoff +- [ ] Commits are signed per the DCO using --signoff By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. For more information on following Developer Certificate of Origin and signing off your commits, please check [here](https://github.com/opensearch-project/alerting/blob/main/CONTRIBUTING.md#developer-certificate-of-origin). \ No newline at end of file diff --git a/.github/workflows/add-untriaged.yml b/.github/workflows/add-untriaged.yml new file mode 100644 index 000000000..9dcc7020d --- /dev/null +++ b/.github/workflows/add-untriaged.yml @@ -0,0 +1,19 @@ +name: Apply 'untriaged' label during issue lifecycle + +on: + issues: + types: [opened, reopened, transferred] + +jobs: + apply-label: + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v6 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ['untriaged'] + }) diff --git a/.github/workflows/auto-release.yml b/.github/workflows/auto-release.yml new file mode 100644 index 000000000..24eeb2730 --- /dev/null +++ b/.github/workflows/auto-release.yml @@ -0,0 +1,29 @@ +name: Releases + +on: + push: + tags: + - '*' + +jobs: + + build: + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.5.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + - name: Get tag + id: tag + uses: dawidd6/action-get-tag@v1 + - uses: actions/checkout@v2 + - uses: ncipollo/release-action@v1 + with: + github_token: ${{ steps.github_app_token.outputs.token }} + bodyFile: release-notes/opensearch.release-notes-${{steps.tag.outputs.tag}}.md \ No newline at end of file diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index e3f96a44f..56fef5073 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -23,7 +23,9 @@ jobs: installation_id: 22958780 - name: Backport - uses: VachaShah/backport@v1.1.4 + uses: VachaShah/backport@v2.2.0 with: github_token: ${{ steps.github_app_token.outputs.token }} branch_name: backport/backport-${{ github.event.number }} + labels_template: "<%= JSON.stringify([...labels, 'autocut']) %>" + failure_labels: "failed backport" diff --git a/.github/workflows/bwc-test-workflow.yml b/.github/workflows/bwc-test-workflow.yml index 66174c697..bf8e8ff7c 100644 --- a/.github/workflows/bwc-test-workflow.yml +++ b/.github/workflows/bwc-test-workflow.yml @@ -9,7 +9,14 @@ on: - "*" jobs: - build: + Get-CI-Image-Tag: + uses: ./.github/workflows/get-ci-image-tag.yml + with: + platform: centos7 + usage: opensearch + + build-linux: + needs: Get-CI-Image-Tag strategy: matrix: java: [ 11 ] @@ -17,21 +24,24 @@ jobs: name: Build and test Alerting # This job runs on Linux runs-on: ubuntu-latest + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root + steps: - # This step uses the setup-java Github action: https://github.com/actions/setup-java - - name: Set Up JDK ${{ matrix.java }} - uses: actions/setup-java@v1 - with: - java-version: ${{ matrix.java }} # This step uses the checkout Github action: https://github.com/actions/checkout - name: Checkout Branch uses: actions/checkout@v2 # This step uses the setup-java Github action: https://github.com/actions/setup-java - - name: Set Up JDK 11 + - name: Set Up JDK ${{ matrix.java }} uses: actions/setup-java@v1 with: - java-version: 11 + java-version: ${{ matrix.java }} - name: Run Alerting Backwards Compatibility Tests run: | echo "Running backwards compatibility tests..." - ./gradlew bwcTestSuite + chown -R opensearch.opensearch `pwd` + su opensearch -c "whoami && java -version && ./gradlew bwcTestSuite" diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index 53ed5304c..000000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Developer Certificate of Origin Check - -on: [pull_request] - -jobs: - check: - runs-on: ubuntu-latest - - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@v1.1.0 - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@v1.1.0 - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} \ No newline at end of file diff --git a/.github/workflows/maven-publish.yml b/.github/workflows/maven-publish.yml new file mode 100644 index 000000000..6d8793c10 --- /dev/null +++ b/.github/workflows/maven-publish.yml @@ -0,0 +1,42 @@ +name: Publish snapshots to maven + +on: + workflow_dispatch: + push: + branches: [ + main + 1.* + 2.* + ] + +jobs: + build-and-publish-snapshots: + strategy: + fail-fast: false + matrix: + jdk: [17] + platform: ["ubuntu-latest"] + if: github.repository == 'opensearch-project/alerting' + runs-on: ${{ matrix.platform }} + + permissions: + id-token: write + contents: write + + steps: + - uses: actions/setup-java@v3 + with: + distribution: temurin # Temurin is a distribution of adoptium + java-version: ${{ matrix.jdk }} + - uses: actions/checkout@v3 + - uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PUBLISH_SNAPSHOTS_ROLE }} + aws-region: us-east-1 + - name: publish snapshots to maven + run: | + export SONATYPE_USERNAME=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-username --query SecretString --output text) + export SONATYPE_PASSWORD=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-password --query SecretString --output text) + echo "::add-mask::$SONATYPE_USERNAME" + echo "::add-mask::$SONATYPE_PASSWORD" + ./gradlew publishPluginZipPublicationToSnapshotsRepository diff --git a/.github/workflows/multi-node-test-workflow.yml b/.github/workflows/multi-node-test-workflow.yml index b6377ed5d..ab50db83b 100644 --- a/.github/workflows/multi-node-test-workflow.yml +++ b/.github/workflows/multi-node-test-workflow.yml @@ -9,7 +9,13 @@ on: - "*" jobs: - build: + Get-CI-Image-Tag: + uses: opensearch-project/opensearch-build/.github/workflows/get-ci-image-tag.yml@main + with: + product: opensearch + + build-linux: + needs: Get-CI-Image-Tag strategy: matrix: java: [ 11, 17 ] @@ -17,6 +23,13 @@ jobs: name: Build and test Alerting # This job runs on Linux runs-on: ubuntu-latest + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root + steps: # This step uses the setup-java Github action: https://github.com/actions/setup-java - name: Set Up JDK ${{ matrix.java }} @@ -27,4 +40,6 @@ jobs: - name: Checkout Branch uses: actions/checkout@v2 - name: Run integration tests with multi node config - run: ./gradlew integTest -PnumNodes=3 + run: | + chown -R 1000:1000 `pwd` + su `id -un 1000` -c "whoami && java -version && ./gradlew integTest -PnumNodes=3" diff --git a/.github/workflows/security-test-workflow.yml b/.github/workflows/security-test-workflow.yml index 7cb66242c..a096f26a0 100644 --- a/.github/workflows/security-test-workflow.yml +++ b/.github/workflows/security-test-workflow.yml @@ -12,7 +12,7 @@ jobs: build: strategy: matrix: - java: [ 11, 17 ] + java: [ 11, 17, 21 ] # Job name name: Build and test Alerting # This job runs on Linux @@ -43,7 +43,12 @@ jobs: plugin_version=`echo $plugin|awk -F- '{print $3}'| cut -d. -f 1-4` qualifier=`echo $plugin|awk -F- '{print $4}'| cut -d. -f 1-1` candidate_version=`echo $plugin|awk -F- '{print $5}'| cut -d. -f 1-1` - docker_version=$version-$qualifier + if qualifier + then + docker_version=$version-$qualifier + else + docker_version=$version + fi [[ -z $candidate_version ]] && candidate_version=$qualifier && qualifier="" @@ -68,20 +73,20 @@ jobs: if: env.imagePresent == 'true' run: | cd .. - docker run -p 9200:9200 -d -p 9600:9600 -e "discovery.type=single-node" opensearch-alerting:test + docker run -p 9200:9200 -d -p 9600:9600 -e "OPENSEARCH_INITIAL_ADMIN_PASSWORD=myStrongPassword123!" -e "discovery.type=single-node" opensearch-alerting:test sleep 120 - name: Run Alerting Test for security enabled test cases if: env.imagePresent == 'true' run: | - cluster_running=`curl -XGET https://localhost:9200/_cat/plugins -u admin:admin --insecure` + cluster_running=`curl -XGET https://localhost:9200/_cat/plugins -u admin:myStrongPassword123! --insecure` echo $cluster_running - security=`curl -XGET https://localhost:9200/_cat/plugins -u admin:admin --insecure |grep opensearch-security|wc -l` + security=`curl -XGET https://localhost:9200/_cat/plugins -u admin:myStrongPassword123! --insecure |grep opensearch-security|wc -l` echo $security if [ $security -gt 0 ] then echo "Security plugin is available" - ./gradlew :alerting:integTest -Dtests.rest.cluster=localhost:9200 -Dtests.cluster=localhost:9200 -Dtests.clustername=docker-cluster -Dsecurity=true -Dhttps=true -Duser=admin -Dpassword=admin + ./gradlew :alerting:integTest -Dtests.rest.cluster=localhost:9200 -Dtests.cluster=localhost:9200 -Dtests.clustername=docker-cluster -Dsecurity=true -Dhttps=true -Duser=admin -Dpassword=myStrongPassword123! else echo "Security plugin is NOT available skipping this run as tests without security have already been run" fi diff --git a/.github/workflows/test-workflow.yml b/.github/workflows/test-workflow.yml index c83a25fd5..ebc4c2cec 100644 --- a/.github/workflows/test-workflow.yml +++ b/.github/workflows/test-workflow.yml @@ -9,14 +9,30 @@ on: - "*" jobs: - build: + Get-CI-Image-Tag: + uses: opensearch-project/opensearch-build/.github/workflows/get-ci-image-tag.yml@main + with: + product: opensearch + + build-linux: + needs: Get-CI-Image-Tag + env: + BUILD_ARGS: ${{ matrix.os_build_args }} + WORKING_DIR: ${{ matrix.working_directory }}. strategy: matrix: - java: [11, 17] + java: [11, 17, 21] # Job name - name: Build Alerting with JDK ${{ matrix.java }} + name: Build Alerting with JDK ${{ matrix.java }} on Linux # This job runs on Linux runs-on: ubuntu-latest + container: + # using the same image which is used by opensearch-build team to build the OpenSearch Distribution + # this image tag is subject to change as more dependencies and updates will arrive over time + image: ${{ needs.Get-CI-Image-Tag.outputs.ci-image-version-linux }} + # need to switch to root so that github actions can install runner binary on container without permission issues. + options: --user root + steps: # This step uses the checkout Github action: https://github.com/actions/checkout - name: Checkout Branch @@ -27,7 +43,9 @@ jobs: with: java-version: ${{ matrix.java }} - name: Build and run with Gradle - run: ./gradlew build + run: | + chown -R 1000:1000 `pwd` + su `id -un 1000` -c "whoami && java -version && ./gradlew assemble integTest" - name: Create Artifact Path run: | mkdir -p alerting-artifacts @@ -41,5 +59,53 @@ jobs: - name: Upload Artifacts uses: actions/upload-artifact@v1 with: - name: alerting-plugin + name: alerting-plugin-${{ matrix.os }} + path: alerting-artifacts + + build: + needs: Get-CI-Image-Tag + env: + BUILD_ARGS: ${{ matrix.os_build_args }} + WORKING_DIR: ${{ matrix.working_directory }}. + strategy: + matrix: + java: [11, 17, 21] + os: [ windows-latest, macos-latest ] + include: + - os: windows-latest + os_build_args: -x integTest + working_directory: X:\ + os_java_options: -Xmx4096M + # Job name + name: Build Alerting with JDK ${{ matrix.java }} on ${{ matrix.os }} + # This job runs on Linux + runs-on: ${{ matrix.os }} + steps: + # This step uses the checkout Github action: https://github.com/actions/checkout + - name: Checkout Branch + uses: actions/checkout@v2 + # This is a hack, but this step creates a link to the X: mounted drive, which makes the path + # short enough to work on Windows + - name: Shorten Path + if: ${{ matrix.os == 'windows-latest' }} + run: subst 'X:' . + # This step uses the setup-java Github action: https://github.com/actions/setup-java + - name: Set Up JDK ${{ matrix.java }} + uses: actions/setup-java@v1 + with: + java-version: ${{ matrix.java }} + - name: Build and run with Gradle + working-directory: ${{ env.WORKING_DIR }} + run: ./gradlew assemble integTest ${{ env.BUILD_ARGS }} + env: + _JAVA_OPTIONS: ${{ matrix.os_java_options }} + - name: Create Artifact Path + run: | + mkdir -p alerting-artifacts + cp ./alerting/build/distributions/*.zip alerting-artifacts + # This step uses the upload-artifact Github action: https://github.com/actions/upload-artifact + - name: Upload Artifacts + uses: actions/upload-artifact@v1 + with: + name: alerting-plugin-${{ matrix.os }} path: alerting-artifacts diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index ec1a3404b..38bb99af3 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -76,9 +76,9 @@ When launching a cluster using one of the above commands, logs are placed in `al 1. Setup a local opensearch cluster with security plugin. - - `./gradlew :alerting:integTest -Dtests.rest.cluster=localhost:9200 -Dtests.cluster=localhost:9200 -Dtests.clustername=opensearch -Dhttps=true -Dsecurity=true -Duser=admin -Dpassword=admin` + - `./gradlew :alerting:integTest -Dtests.rest.cluster=localhost:9200 -Dtests.cluster=localhost:9200 -Dtests.clustername=opensearch -Dhttps=true -Dsecurity=true -Duser=admin -Dpassword=` - - `./gradlew :alerting:integTest -Dtests.rest.cluster=localhost:9200 -Dtests.cluster=localhost:9200 -Dtests.clustername=opensearch -Dhttps=true -Dsecurity=true -Duser=admin -Dpassword=admin --tests "org.opensearch.alerting.MonitorRunnerIT.test execute monitor returns search result"` + - `./gradlew :alerting:integTest -Dtests.rest.cluster=localhost:9200 -Dtests.cluster=localhost:9200 -Dtests.clustername=opensearch -Dhttps=true -Dsecurity=true -Duser=admin -Dpassword= --tests "org.opensearch.alerting.MonitorRunnerIT.test execute monitor returns search result"` #### Building from the IDE @@ -128,6 +128,21 @@ You can also run the integration tests against a multi-node cluster by running ` You can also debug a multi-node cluster, by using a combination of above multi-node and debug steps. But, you must set up debugger configurations to listen on each port starting from `5005` and increasing by 1 for each node. +### Running with custom Opensearch distribution + +If you're trying to execute run/integTest on ARM MacOS, or current opensearch-min distro artifact is not available for any reason, you can use your own distro jar. +Pass `-PcustomDistributionUrl=/path/to/distro` to run/integTest to execute them with local opensearch distro. + +1. Build Opensearch min distribution: + 1. Clone [Opensearch repo](https://github.com/opensearch-project/OpenSearch) + 2. Execute ./gradlew assemble (you will need docker installed) + 3. Built distros are in distribution/archives//distributions/ +2. `./gradlew run -PcustomDistributionUrl=/path/to/distro` + +Example running on ARM MacOS with local artifact: + +`./gradlew integTest -PnumNodes=3 -PcustomDistributionUrl=/Users/macos-user/OpenSearch/distribution/archives/darwin-arm64-tar/build/distributions/opensearch-min-2.5.0-SNAPSHOT-darwin-arm64.tar.gz` + ### Backport - [Link to backport documentation](https://github.com/opensearch-project/opensearch-plugins/blob/main/BACKPORT.md) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index ad7ce1602..89c7bd145 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,12 +1,25 @@ -## Maintainers -| Maintainer | GitHub ID | Affiliation | -| --------------- | --------- | ----------- | -| Ashish Agrawal | [lezzago](https://github.com/lezzago) | Amazon | -| Mohammad Qureshi | [qreshi](https://github.com/qreshi) | Amazon | -| Sriram Kosuri | [skkosuri-amzn](https://github.com/skkosuri-amzn) | Amazon | -| Bowen Lan | [bowenlan-amzn](https://github.com/bowenlan-amzn) | Amazon | -| Rishabh Maurya | [rishabhmaurya](https://github.com/rishabhmaurya) | Amazon | -| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | -| Annie Lee | [leeyun-amzn](https://github.com/leeyun-amzn) | Amazon | - -[This document](https://github.com/opensearch-project/.github/blob/main/MAINTAINERS.md) explains what maintainers do in this repo, and how they should be doing it. If you're interested in contributing, see [CONTRIBUTING](CONTRIBUTING.md). \ No newline at end of file +## Overview + +This document contains a list of maintainers in this repo. See [opensearch-project/.github/RESPONSIBILITIES.md](https://github.com/opensearch-project/.github/blob/main/RESPONSIBILITIES.md#maintainer-responsibilities) that explains what the role of maintainer means, what maintainers do in this and other repos, and how they should be doing it. If you're interested in contributing, and becoming a maintainer, see [CONTRIBUTING](CONTRIBUTING.md). + +## Current Maintainers + +| Maintainer | GitHub ID | Affiliation | +|----------------------| ------------------------------------------------- |-------------| +| Ashish Agrawal | [lezzago](https://github.com/lezzago) | Amazon | +| Mohammad Qureshi | [qreshi](https://github.com/qreshi) | Amazon | +| Bowen Lan | [bowenlan-amzn](https://github.com/bowenlan-amzn) | Amazon | +| Saurabh Singh | [getsaurabh02](https://github.com/getsaurabh02) | Amazon | +| Rishabh Maurya | [rishabhmaurya](https://github.com/rishabhmaurya) | Amazon | +| Subhobrata DEY | [sbcd90](https://github.com/sbcd90) | Amazon | +| Surya Sashank Nistala | [eirsep](https://github.com/eirsep) | Amazon | +| Thomas Hurney | [AWSHurneyt](https://github.com/AWSHurneyt) | Amazon | +| Praveen Sameneni | [praveensameneni](https://github.com/praveensameneni) | Amazon | +| Chase Engelbrecht | [engechas](https://github.com/engechas) | Amazon | + +## Emeritus + +| Maintainer | GitHub ID | Affiliation | +|-------------------------|---------------------------------------------| ----------- | +| Sriram Kosuri | [skkosuri-amzn](https://github.com/skkosuri-amzn) | Amazon | +| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | diff --git a/README.md b/README.md index 5616272a6..a485a5a46 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ [![Test Workflow](https://github.com/opensearch-project/alerting/workflows/Test%20Workflow/badge.svg)](https://github.com/opensearch-project/alerting/actions) [![codecov](https://codecov.io/gh/opensearch-project/alerting/branch/main/graph/badge.svg)](https://codecov.io/gh/opensearch-project/alerting) [![Documentation](https://img.shields.io/badge/api-reference-blue.svg)](https://opensearch.org/docs/latest/monitoring-plugins/alerting/api/) -[![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://discuss.opendistrocommunity.dev/c/alerting/) +[![Chat](https://img.shields.io/badge/chat-on%20forums-blue)](https://forum.opensearch.org/c/plugins/alerting/7) ![PRs welcome!](https://img.shields.io/badge/PRs-welcome!-success) diff --git a/alerting/build.gradle b/alerting/build.gradle index df76479ff..0e920fee6 100644 --- a/alerting/build.gradle +++ b/alerting/build.gradle @@ -16,6 +16,7 @@ apply plugin: 'jacoco' def usingRemoteCluster = System.properties.containsKey('tests.rest.cluster') || System.properties.containsKey('tests.cluster') def usingMultiNode = project.properties.containsKey('numNodes') +String bwcVersion = "2.12.0.0" ext { projectSubstitutions = [:] @@ -36,6 +37,7 @@ publishing { pom { name = "opensearch-alerting" description = "OpenSearch Alerting plugin" + groupId = "org.opensearch.plugin" licenses { license { name = "The Apache License, Version 2.0" @@ -51,6 +53,17 @@ publishing { } } } + + repositories { + maven { + name = "Snapshots" + url = "https://aws.oss.sonatype.org/content/repositories/snapshots" + credentials { + username "$System.env.SONATYPE_USERNAME" + password "$System.env.SONATYPE_PASSWORD" + } + } + } } // Prefer elastic bundled versions for conflicts (primarily with AWS SDK). We need to specify these manually because @@ -64,9 +77,10 @@ configurations.all { force "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" force "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" force "commons-logging:commons-logging:${versions.commonslogging}" - force "org.apache.httpcomponents:httpcore:${versions.httpcore}" // force the version until OpenSearch upgrade to an invulnerable one, https://www.whitesourcesoftware.com/vulnerability-database/WS-2019-0379 force "commons-codec:commons-codec:1.13" + + force "org.slf4j:slf4j-api:${versions.slf4j}" //Needed for http5 // This is required because kotlin-coroutines-core 1.1.1 still requires kotlin stdlib 1.3.20 and we're using a higher kotlin version force "org.jetbrains.kotlin:kotlin-stdlib:${kotlin_version}" @@ -78,7 +92,18 @@ configurations.testImplementation { exclude module: "securemock" } +configurations { + zipArchive +} + dependencies { + // Needed for integ tests + zipArchive group: 'org.opensearch.plugin', name:'opensearch-notifications-core', version: "${opensearch_build}" + zipArchive group: 'org.opensearch.plugin', name:'notifications', version: "${opensearch_build}" + + // Needed for BWC tests + zipArchive group: 'org.opensearch.plugin', name:'alerting', version: "${bwcVersion}-SNAPSHOT" + compileOnly "org.opensearch.plugin:opensearch-scripting-painless-spi:${versions.opensearch}" api "org.opensearch.plugin:percolator-client:${opensearch_version}" @@ -88,10 +113,15 @@ dependencies { implementation "org.jetbrains:annotations:13.0" api project(":alerting-core") - implementation "com.github.seancfoley:ipaddress:5.3.3" + implementation "com.github.seancfoley:ipaddress:5.4.1" + testImplementation "org.antlr:antlr4-runtime:${versions.antlr4}" testImplementation "org.jetbrains.kotlin:kotlin-test:${kotlin_version}" - testImplementation "org.mockito:mockito-core:4.3.1" + testImplementation "org.mockito:mockito-core:${versions.mockito}" + testImplementation "org.opensearch.plugin:reindex-client:${opensearch_version}" + testImplementation "org.opensearch.plugin:parent-join-client:${opensearch_version}" + testImplementation "org.opensearch.plugin:lang-painless:${opensearch_version}" + testImplementation "org.opensearch.plugin:lang-mustache-client:${opensearch_version}" } javadoc.enabled = false // turn off javadoc as it barfs on Kotlin code @@ -122,12 +152,6 @@ integTest.getClusters().forEach{c -> c.plugin(project.getObjects().fileProperty( def _numNodes = findProperty('numNodes') as Integer ?: 1 -String notificationsFilePath = "src/test/resources/notifications" -String notificationsCoreFilePath = "src/test/resources/notifications-core" -String notificationsPlugin = "opensearch-notifications-" + plugin_no_snapshot + ".zip" -String notificationsCorePlugin = "opensearch-notifications-core-" + plugin_no_snapshot + ".zip" -String notificationsRemoteFile = "https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/" + opensearch_no_snapshot + "/latest/linux/x64/tar/builds/opensearch/plugins/" + notificationsPlugin -String notificationsCoreRemoteFile = "https://ci.opensearch.org/ci/dbc/distribution-build-opensearch/" + opensearch_no_snapshot + "/latest/linux/x64/tar/builds/opensearch/plugins/" + notificationsCorePlugin testClusters.integTest { testDistribution = "ARCHIVE" // Cluster shrink exception thrown if we try to set numberOfNodes to 1, so only apply if > 1 @@ -147,17 +171,9 @@ testClusters.integTest { new RegularFile() { @Override File getAsFile() { - File dir = new File(rootDir.path + "/alerting/" + notificationsCoreFilePath) - - if (!dir.exists()) { - dir.mkdirs() - } - - File f = new File(dir, notificationsCorePlugin) - if (!f.exists()) { - new URL(notificationsCoreRemoteFile).withInputStream{ ins -> f.withOutputStream{ it << ins }} - } - fileTree(notificationsCoreFilePath).getSingleFile() + return configurations.zipArchive.asFileTree.matching { + include '**/opensearch-notifications-core*' + }.singleFile } } })) @@ -166,24 +182,16 @@ testClusters.integTest { new RegularFile() { @Override File getAsFile() { - File dir = new File(rootDir.path + "/alerting/" + notificationsFilePath) - - if (!dir.exists()) { - dir.mkdirs() - } - - File f = new File(dir, notificationsPlugin) - if (!f.exists()) { - new URL(notificationsRemoteFile).withInputStream{ ins -> f.withOutputStream{ it << ins }} - } - fileTree(notificationsFilePath).getSingleFile() + return configurations.zipArchive.asFileTree.matching { + include '**/notifications*' + }.singleFile } } })) } testClusters.integTest.nodes.each { node -> - node.setting("opendistro.destination.host.deny_list", "[\"10.0.0.0/8\", \"127.0.0.1\"]") + node.setting("plugins.destination.host.deny_list", "[\"10.0.0.0/8\", \"127.0.0.1\"]") } integTest { @@ -247,17 +255,12 @@ task integTestRemote(type: RestIntegTestTask) { } integTestRemote.enabled = System.getProperty("tests.rest.cluster") != null -String bwcVersion = "1.13.1.0" String baseName = "alertingBwcCluster" -String bwcFilePath = "src/test/resources/bwc" -String bwcOpenDistroPlugin = "opendistro-alerting-" + bwcVersion + ".zip" -String bwcRemoteFile = 'https://d3g5vo6xdbdb9a.cloudfront.net/downloads/elasticsearch-plugins/opendistro-alerting/' + bwcOpenDistroPlugin - 2.times {i -> testClusters { "${baseName}$i" { testDistribution = "ARCHIVE" - versions = ["7.10.2","2.1.0-SNAPSHOT"] + versions = ["2.12.0-SNAPSHOT", "3.0.0-SNAPSHOT"] numberOfNodes = 3 plugin(provider(new Callable(){ @Override @@ -265,16 +268,9 @@ String bwcRemoteFile = 'https://d3g5vo6xdbdb9a.cloudfront.net/downloads/elastics return new RegularFile() { @Override File getAsFile() { - File dir = new File(rootDir.path + "/alerting/" + bwcFilePath + "/alerting/" + bwcVersion) - - if (!dir.exists()) { - dir.mkdirs() - } - File f = new File(dir, bwcOpenDistroPlugin) - if (!f.exists()) { - new URL(bwcRemoteFile).withInputStream{ ins -> f.withOutputStream{ it << ins }} - } - return fileTree(bwcFilePath + "/alerting/" + bwcVersion).getSingleFile() + return configurations.zipArchive.asFileTree.matching { + include '**/alerting*' + }.singleFile } } } @@ -297,17 +293,9 @@ task prepareBwcTests { new RegularFile() { @Override File getAsFile() { - File dir = new File(rootDir.path + "/alerting/" + notificationsCoreFilePath) - - if (!dir.exists()) { - dir.mkdirs() - } - - File f = new File(dir, notificationsCorePlugin) - if (!f.exists()) { - new URL(notificationsCoreRemoteFile).withInputStream{ ins -> f.withOutputStream{ it << ins }} - } - fileTree(notificationsCoreFilePath).getSingleFile() + return configurations.zipArchive.asFileTree.matching { + include '**/opensearch-notifications-core*' + }.singleFile } } }), @@ -315,17 +303,9 @@ task prepareBwcTests { new RegularFile() { @Override File getAsFile() { - File dir = new File(rootDir.path + "/alerting/" + notificationsFilePath) - - if (!dir.exists()) { - dir.mkdirs() - } - - File f = new File(dir, notificationsPlugin) - if (!f.exists()) { - new URL(notificationsRemoteFile).withInputStream{ ins -> f.withOutputStream{ it << ins }} - } - fileTree(notificationsFilePath).getSingleFile() + return configurations.zipArchive.asFileTree.matching { + include '**/notifications*' + }.singleFile } } }) diff --git a/alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java b/alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java index 33f67641e..09e279ec7 100644 --- a/alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java +++ b/alerting/src/main/java/org/opensearch/percolator/PercolateQueryBuilderExt.java @@ -54,27 +54,31 @@ import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.SetOnce; import org.opensearch.OpenSearchException; import org.opensearch.ResourceNotFoundException; import org.opensearch.Version; -import org.opensearch.action.ActionListener; import org.opensearch.action.get.GetRequest; -import org.opensearch.common.ParseField; -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.io.stream.InputStreamStreamInput; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.SetOnce; import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.NamedXContentRegistry; -import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; -import org.opensearch.common.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.ParseField; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.core.xcontent.ConstructingObjectParser; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.analysis.FieldNameAnalyzer; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -90,8 +94,6 @@ import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.QueryShardException; import org.opensearch.index.query.Rewriteable; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.breaker.NoneCircuitBreakerService; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -103,8 +105,8 @@ import java.util.Objects; import java.util.function.Supplier; -import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.opensearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.opensearch.core.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.core.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.opensearch.search.SearchService.ALLOW_EXPENSIVE_QUERIES; /** @@ -127,7 +129,7 @@ public class PercolateQueryBuilderExt extends AbstractQueryBuilder documents; - private final XContentType documentXContentType; + private final MediaType documentXContentType; private final String indexedDocumentIndex; private final String indexedDocumentId; @@ -154,7 +156,7 @@ public PercolateQueryBuilderExt(String field, BytesReference document, XContentT * @param documents The binary blob containing document to percolate * @param documentXContentType The content type of the binary blob containing the document to percolate */ - public PercolateQueryBuilderExt(String field, List documents, XContentType documentXContentType) { + public PercolateQueryBuilderExt(String field, List documents, MediaType documentXContentType) { if (field == null) { throw new IllegalArgumentException("[field] is a required argument"); } @@ -256,7 +258,11 @@ protected PercolateQueryBuilderExt(String field, Supplier docume } documents = in.readList(StreamInput::readBytesReference); if (documents.isEmpty() == false) { - documentXContentType = in.readEnum(XContentType.class); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + documentXContentType = in.readMediaType(); + } else { + documentXContentType = in.readEnum(XContentType.class); + } } else { documentXContentType = null; } @@ -302,7 +308,11 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBytesReference(document); } if (documents.isEmpty() == false) { - out.writeEnum(documentXContentType); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + documentXContentType.writeTo(out); + } else { + out.writeEnum((XContentType) documentXContentType); + } } } @@ -436,7 +446,7 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) { PercolateQueryBuilderExt rewritten = new PercolateQueryBuilderExt( field, Collections.singletonList(source), - XContentHelper.xContentType(source) + MediaTypeRegistry.xContentType(source) ); if (name != null) { rewritten.setName(name); @@ -562,7 +572,7 @@ public List getDocuments() { } // pkg-private for testing - XContentType getXContentType() { + MediaType getXContentType() { return documentXContentType; } diff --git a/alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java b/alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java index 3a8fc8a22..8ed81e12b 100644 --- a/alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java +++ b/alerting/src/main/java/org/opensearch/percolator/PercolatorFieldMapperExt.java @@ -56,16 +56,16 @@ import org.apache.lucene.util.BytesRefBuilder; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.common.ParsingException; -import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.hash.MurmurHash3; -import org.opensearch.common.io.stream.OutputStreamStreamOutput; import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentLocation; -import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.core.common.ParsingException; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.OutputStreamStreamOutput; +import org.opensearch.core.xcontent.XContentLocation; +import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.FieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt index 14656e81a..6b820cf36 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/AlertService.kt @@ -13,40 +13,61 @@ import org.opensearch.action.bulk.BulkRequest import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.delete.DeleteRequest import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.alerts.AlertError +import org.opensearch.action.support.WriteRequest import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.ActionExecutionResult import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.AggregationResultBucket -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.BucketLevelTrigger -import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.model.ChainedAlertTriggerRunResult +import org.opensearch.alerting.model.ClusterMetricsTriggerRunResult import org.opensearch.alerting.model.QueryLevelTriggerRunResult -import org.opensearch.alerting.model.Trigger -import org.opensearch.alerting.model.action.AlertCategory import org.opensearch.alerting.opensearchapi.firstFailureOrNull import org.opensearch.alerting.opensearchapi.retry import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext import org.opensearch.alerting.util.IndexUtils +import org.opensearch.alerting.util.MAX_SEARCH_SIZE import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.alerting.workflow.WorkflowRunContext import org.opensearch.client.Client -import org.opensearch.common.bytes.BytesReference +import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.NoOpTrigger +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.VersionType import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortOrder import java.time.Instant import java.util.UUID +import java.util.concurrent.TimeUnit +import kotlin.coroutines.resume +import kotlin.coroutines.resumeWithException +import kotlin.coroutines.suspendCoroutine /** Service that handles CRUD operations for alerts */ class AlertService( @@ -57,14 +78,38 @@ class AlertService( companion object { const val MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT = 500 + const val ERROR_ALERT_ID_PREFIX = "error-alert" + + val ALERTS_SEARCH_TIMEOUT = TimeValue(5, TimeUnit.MINUTES) } private val logger = LogManager.getLogger(AlertService::class.java) - suspend fun loadCurrentAlertsForQueryLevelMonitor(monitor: Monitor): Map { + suspend fun loadCurrentAlertsForWorkflow(workflow: Workflow, dataSources: DataSources): Map { val searchAlertsResponse: SearchResponse = searchAlerts( - monitorId = monitor.id, - size = monitor.triggers.size * 2 // We expect there to be only a single in-progress alert so fetch 2 to check + workflow = workflow, + size = workflow.triggers.size * 2, // We expect there to be only a single in-progress alert so fetch 2 to check + dataSources = dataSources + ) + + val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } + .groupBy { it.triggerId } + foundAlerts.values.forEach { alerts -> + if (alerts.size > 1) { + logger.warn("Found multiple alerts for same trigger: $alerts") + } + } + + return workflow.triggers.associateWith { trigger -> + foundAlerts[trigger.id]?.firstOrNull() + } + } + + suspend fun loadCurrentAlertsForQueryLevelMonitor(monitor: Monitor, workflowRunContext: WorkflowRunContext?): Map { + val searchAlertsResponse: SearchResponse = searchAlerts( + monitor = monitor, + size = monitor.triggers.size * 2, // We expect there to be only a single in-progress alert so fetch 2 to check + workflowRunContext ) val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } @@ -80,11 +125,15 @@ class AlertService( } } - suspend fun loadCurrentAlertsForBucketLevelMonitor(monitor: Monitor): Map> { + suspend fun loadCurrentAlertsForBucketLevelMonitor( + monitor: Monitor, + workflowRunContext: WorkflowRunContext?, + ): Map> { val searchAlertsResponse: SearchResponse = searchAlerts( - monitorId = monitor.id, + monitor = monitor, // TODO: This should be limited based on a circuit breaker that limits Alerts - size = MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT + size = MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT, + workflowRunContext = workflowRunContext ) val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } @@ -103,7 +152,9 @@ class AlertService( fun composeQueryLevelAlert( ctx: QueryLevelTriggerExecutionContext, result: QueryLevelTriggerRunResult, - alertError: AlertError? + alertError: AlertError?, + executionId: String, + workflorwRunContext: WorkflowRunContext? ): Alert? { val currentTime = Instant.now() val currentAlert = ctx.alert @@ -140,30 +191,56 @@ class AlertService( ) } + // Including a list of triggered clusters for cluster metrics monitors + var triggeredClusters: MutableList? = null + if (result is ClusterMetricsTriggerRunResult) + result.clusterTriggerResults.forEach { + if (it.triggered) { + // Add an empty list if one isn't already present + if (triggeredClusters.isNullOrEmpty()) triggeredClusters = mutableListOf() + + // Add the cluster to the list of triggered clusters + triggeredClusters!!.add(it.cluster) + } + } + // Merge the alert's error message to the current alert's history val updatedHistory = currentAlert?.errorHistory.update(alertError) return if (alertError == null && !result.triggered) { currentAlert?.copy( - state = Alert.State.COMPLETED, endTime = currentTime, errorMessage = null, - errorHistory = updatedHistory, actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion + state = Alert.State.COMPLETED, + endTime = currentTime, + errorMessage = null, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion, + clusters = triggeredClusters ) } else if (alertError == null && currentAlert?.isAcknowledged() == true) { null } else if (currentAlert != null) { val alertState = if (alertError == null) Alert.State.ACTIVE else Alert.State.ERROR currentAlert.copy( - state = alertState, lastNotificationTime = currentTime, errorMessage = alertError?.message, - errorHistory = updatedHistory, actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion + state = alertState, + lastNotificationTime = currentTime, + errorMessage = alertError?.message, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion, + clusters = triggeredClusters ) } else { - val alertState = if (alertError == null) Alert.State.ACTIVE else Alert.State.ERROR + val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else if (alertError == null) Alert.State.ACTIVE + else Alert.State.ERROR Alert( monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, errorHistory = updatedHistory, actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion + schemaVersion = IndexUtils.alertIndexSchemaVersion, executionId = executionId, + workflowId = workflorwRunContext?.workflowId ?: "", + clusters = triggeredClusters ) } } @@ -173,18 +250,131 @@ class AlertService( findings: List, relatedDocIds: List, ctx: DocumentLevelTriggerExecutionContext, - alertError: AlertError? + alertError: AlertError?, + executionId: String, + workflorwRunContext: WorkflowRunContext? ): Alert { val currentTime = Instant.now() - val alertState = if (alertError == null) Alert.State.ACTIVE else Alert.State.ERROR + val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else if (alertError == null) { + Alert.State.ACTIVE + } else { + Alert.State.ERROR + } return Alert( id = UUID.randomUUID().toString(), monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, - schemaVersion = IndexUtils.alertIndexSchemaVersion, findingIds = findings, relatedDocIds = relatedDocIds + schemaVersion = IndexUtils.alertIndexSchemaVersion, findingIds = findings, relatedDocIds = relatedDocIds, + executionId = executionId, workflowId = workflorwRunContext?.workflowId ?: "" ) } + fun composeMonitorErrorAlert( + id: String, + monitor: Monitor, + alertError: AlertError, + executionId: String?, + workflowRunContext: WorkflowRunContext? + ): Alert { + val currentTime = Instant.now() + val alertState = if (workflowRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else { + Alert.State.ERROR + } + return Alert( + id = id, monitor = monitor, trigger = NoOpTrigger(), startTime = currentTime, + lastNotificationTime = currentTime, state = alertState, errorMessage = alertError.message, + schemaVersion = IndexUtils.alertIndexSchemaVersion, executionId = executionId, workflowId = workflowRunContext?.workflowId ?: "" + ) + } + + fun composeChainedAlert( + ctx: ChainedAlertTriggerExecutionContext, + executionId: String, + workflow: Workflow, + associatedAlertIds: List, + result: ChainedAlertTriggerRunResult, + alertError: AlertError? = null, + ): Alert? { + + val currentTime = Instant.now() + val currentAlert = ctx.alert + + val updatedActionExecutionResults = mutableListOf() + val currentActionIds = mutableSetOf() + if (currentAlert != null) { + // update current alert's action execution results + for (actionExecutionResult in currentAlert.actionExecutionResults) { + val actionId = actionExecutionResult.actionId + currentActionIds.add(actionId) + val actionRunResult = result.actionResults[actionId] + when { + actionRunResult == null -> updatedActionExecutionResults.add(actionExecutionResult) + actionRunResult.throttled -> + updatedActionExecutionResults.add( + actionExecutionResult.copy( + throttledCount = actionExecutionResult.throttledCount + 1 + ) + ) + + else -> updatedActionExecutionResults.add(actionExecutionResult.copy(lastExecutionTime = actionRunResult.executionTime)) + } + } + // add action execution results which not exist in current alert + updatedActionExecutionResults.addAll( + result.actionResults.filter { !currentActionIds.contains(it.key) } + .map { ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) } + ) + } else { + updatedActionExecutionResults.addAll( + result.actionResults.map { + ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) + } + ) + } + + // Merge the alert's error message to the current alert's history + val updatedHistory = currentAlert?.errorHistory.update(alertError) + return if (alertError == null && !result.triggered) { + currentAlert?.copy( + state = Alert.State.COMPLETED, + endTime = currentTime, + errorMessage = null, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion + ) + } else if (alertError == null && currentAlert?.isAcknowledged() == true) { + null + } else if (currentAlert != null) { + val alertState = Alert.State.ACTIVE + currentAlert.copy( + state = alertState, + lastNotificationTime = currentTime, + errorMessage = alertError?.message, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion, + ) + } else { + if (alertError == null) Alert.State.ACTIVE + else Alert.State.ERROR + Alert( + startTime = Instant.now(), + lastNotificationTime = currentTime, + state = Alert.State.ACTIVE, + errorMessage = null, schemaVersion = IndexUtils.alertIndexSchemaVersion, + chainedAlertTrigger = ctx.trigger, + executionId = executionId, + workflow = workflow, + associatedAlertIds = associatedAlertIds + ) + } + } + fun updateActionResultsForBucketLevelAlert( currentAlert: Alert, actionResults: Map, @@ -235,7 +425,10 @@ class AlertService( monitor: Monitor, trigger: BucketLevelTrigger, currentAlerts: MutableMap, - aggResultBuckets: List + aggResultBuckets: List, + findings: List, + executionId: String, + workflorwRunContext: WorkflowRunContext? ): Map> { val dedupedAlerts = mutableListOf() val newAlerts = mutableListOf() @@ -251,11 +444,15 @@ class AlertService( currentAlerts.remove(aggAlertBucket.getBucketKeysHash()) } else { // New Alert + val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else Alert.State.ACTIVE val newAlert = Alert( monitor = monitor, trigger = trigger, startTime = currentTime, - lastNotificationTime = null, state = Alert.State.ACTIVE, errorMessage = null, + lastNotificationTime = currentTime, state = alertState, errorMessage = null, errorHistory = mutableListOf(), actionExecutionResults = mutableListOf(), - schemaVersion = IndexUtils.alertIndexSchemaVersion, aggregationResultBucket = aggAlertBucket + schemaVersion = IndexUtils.alertIndexSchemaVersion, aggregationResultBucket = aggAlertBucket, + findingIds = findings, executionId = executionId, workflowId = workflorwRunContext?.workflowId ?: "" ) newAlerts.add(newAlert) } @@ -277,7 +474,216 @@ class AlertService( } ?: listOf() } - suspend fun saveAlerts(alerts: List, retryPolicy: BackoffPolicy, allowUpdatingAcknowledgedAlert: Boolean = false) { + suspend fun upsertMonitorErrorAlert( + monitor: Monitor, + errorMessage: String, + executionId: String?, + workflowRunContext: WorkflowRunContext?, + ) { + val newErrorAlertId = "$ERROR_ALERT_ID_PREFIX-${monitor.id}-${UUID.randomUUID()}" + + val searchRequest = SearchRequest(monitor.dataSources.alertsIndex) + .source( + SearchSourceBuilder() + .sort(Alert.START_TIME_FIELD, SortOrder.DESC) + .query( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitor.id)) + .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) + ) + ) + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + + var alert = + composeMonitorErrorAlert(newErrorAlertId, monitor, AlertError(Instant.now(), errorMessage), executionId, workflowRunContext) + + if (searchResponse.hits.totalHits.value > 0L) { + if (searchResponse.hits.totalHits.value > 1L) { + logger.warn("There are [${searchResponse.hits.totalHits.value}] error alerts for monitor [${monitor.id}]") + } + // Deserialize first/latest Alert + val hit = searchResponse.hits.hits[0] + val xcp = contentParser(hit.sourceRef) + val existingErrorAlert = Alert.parse(xcp, hit.id, hit.version) + + val currentTime = Instant.now() + alert = if (alert.errorMessage != existingErrorAlert.errorMessage) { + var newErrorHistory = existingErrorAlert.errorHistory.update( + AlertError(existingErrorAlert.startTime, existingErrorAlert.errorMessage!!) + ) + alert.copy( + id = existingErrorAlert.id, + errorHistory = newErrorHistory, + startTime = currentTime, + lastNotificationTime = currentTime + ) + } else { + existingErrorAlert.copy(lastNotificationTime = currentTime) + } + } + + val alertIndexRequest = IndexRequest(monitor.dataSources.alertsIndex) + .routing(alert.monitorId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + .opType(DocWriteRequest.OpType.INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .id(alert.id) + + val indexResponse: IndexResponse = client.suspendUntil { index(alertIndexRequest, it) } + logger.debug("Monitor error Alert successfully upserted. Op result: ${indexResponse.result}") + } + + suspend fun clearMonitorErrorAlert(monitor: Monitor) { + val currentTime = Instant.now() + try { + val searchRequest = SearchRequest("${monitor.dataSources.alertsIndex}") + .source( + SearchSourceBuilder() + .size(MAX_SEARCH_SIZE) + .sort(Alert.START_TIME_FIELD, SortOrder.DESC) + .query( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitor.id)) + .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) + ) + + ) + searchRequest.cancelAfterTimeInterval = ALERTS_SEARCH_TIMEOUT + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + // If there's no error alert present, there's nothing to clear. We can stop here. + if (searchResponse.hits.totalHits.value == 0L) { + return + } + + val indexRequests = mutableListOf() + searchResponse.hits.hits.forEach { hit -> + if (searchResponse.hits.totalHits.value > 1L) { + logger.warn("Found [${searchResponse.hits.totalHits.value}] error alerts for monitor [${monitor.id}] while clearing") + } + // Deserialize first/latest Alert + val xcp = contentParser(hit.sourceRef) + val existingErrorAlert = Alert.parse(xcp, hit.id, hit.version) + + val updatedAlert = existingErrorAlert.copy( + endTime = currentTime + ) + + indexRequests += IndexRequest(monitor.dataSources.alertsIndex) + .routing(monitor.id) + .id(updatedAlert.id) + .source(updatedAlert.toXContentWithUser(XContentFactory.jsonBuilder())) + .opType(DocWriteRequest.OpType.INDEX) + } + + val bulkResponse: BulkResponse = client.suspendUntil { + bulk(BulkRequest().add(indexRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + if (bulkResponse.hasFailures()) { + bulkResponse.items.forEach { item -> + if (item.isFailed) { + logger.debug("Failed clearing error alert ${item.id} of monitor [${monitor.id}]") + } + } + } else { + logger.debug("[${bulkResponse.items.size}] Error Alerts successfully cleared. End time set to: $currentTime") + } + } catch (e: Exception) { + logger.error("Error clearing monitor error alerts for monitor [${monitor.id}]: ${ExceptionsHelper.detailedMessage(e)}") + } + } + + /** + * Moves already cleared "error alerts" to history index. + * Error Alert is cleared when endTime timestamp is set, on first successful run after failed run + * */ + suspend fun moveClearedErrorAlertsToHistory(monitorId: String, alertIndex: String, alertHistoryIndex: String) { + try { + val searchRequest = SearchRequest(alertIndex) + .source( + SearchSourceBuilder() + .size(MAX_SEARCH_SIZE) + .query( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) + .must(QueryBuilders.existsQuery(Alert.END_TIME_FIELD)) + ) + .version(true) // Do we need this? + ) + searchRequest.cancelAfterTimeInterval = ALERTS_SEARCH_TIMEOUT + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + + if (searchResponse.hits.totalHits.value == 0L) { + return + } + + // Copy to history index + + val copyRequests = mutableListOf() + + searchResponse.hits.hits.forEach { hit -> + + val xcp = contentParser(hit.sourceRef) + val alert = Alert.parse(xcp, hit.id, hit.version) + + copyRequests.add( + IndexRequest(alertHistoryIndex) + .routing(alert.monitorId) + .source(hit.sourceRef, XContentType.JSON) + .version(hit.version) + .versionType(VersionType.EXTERNAL_GTE) + .id(hit.id) + .timeout(MonitorRunnerService.monitorCtx.indexTimeout) + ) + } + + val bulkResponse: BulkResponse = client.suspendUntil { + bulk(BulkRequest().add(copyRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + if (bulkResponse.hasFailures()) { + bulkResponse.items.forEach { item -> + if (item.isFailed) { + logger.error("Failed copying error alert [${item.id}] to history index [$alertHistoryIndex]") + } + } + return + } + + // Delete from alertIndex + + val alertIds = searchResponse.hits.hits.map { it.id } + + val deleteResponse: BulkByScrollResponse = suspendCoroutine { cont -> + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(alertIndex) + .filter(QueryBuilders.termsQuery("_id", alertIds)) + .refresh(true) + .timeout(ALERTS_SEARCH_TIMEOUT) + .execute( + object : ActionListener { + override fun onResponse(response: BulkByScrollResponse) = cont.resume(response) + override fun onFailure(t: Exception) = cont.resumeWithException(t) + } + ) + } + deleteResponse.bulkFailures.forEach { + logger.error("Failed deleting alert while moving cleared alerts: [${it.id}] cause: [${it.cause}] ") + } + } catch (e: Exception) { + logger.error("Failed moving cleared error alerts to history index: ${ExceptionsHelper.detailedMessage(e)}") + } + } + + suspend fun saveAlerts( + dataSources: DataSources, + alerts: List, + retryPolicy: BackoffPolicy, + allowUpdatingAcknowledgedAlert: Boolean = false, + routingId: String // routing is mandatory and set as monitor id. for workflow chained alerts we pass workflow id as routing + ) { + val alertsIndex = dataSources.alertsIndex + val alertsHistoryIndex = dataSources.alertsHistoryIndex + var requestsToRetry = alerts.flatMap { alert -> // We don't want to set the version when saving alerts because the MonitorRunner has first priority when writing alerts. // In the rare event that a user acknowledges an alert between when it's read and when it's written @@ -286,8 +692,8 @@ class AlertService( when (alert.state) { Alert.State.ACTIVE, Alert.State.ERROR -> { listOf>( - IndexRequest(AlertIndices.ALERT_INDEX) - .routing(alert.monitorId) + IndexRequest(alertsIndex) + .routing(routingId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) .id(if (alert.id != Alert.NO_ID) alert.id else null) ) @@ -297,8 +703,8 @@ class AlertService( // and updated by the MonitorRunner if (allowUpdatingAcknowledgedAlert) { listOf>( - IndexRequest(AlertIndices.ALERT_INDEX) - .routing(alert.monitorId) + IndexRequest(alertsIndex) + .routing(routingId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) .id(if (alert.id != Alert.NO_ID) alert.id else null) ) @@ -306,17 +712,28 @@ class AlertService( throw IllegalStateException("Unexpected attempt to save ${alert.state} alert: $alert") } } + Alert.State.AUDIT -> { + val index = if (alertIndices.isAlertHistoryEnabled()) { + dataSources.alertsHistoryIndex + } else dataSources.alertsIndex + listOf>( + IndexRequest(index) + .routing(routingId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + .id(if (alert.id != Alert.NO_ID) alert.id else null) + ) + } Alert.State.DELETED -> { throw IllegalStateException("Unexpected attempt to save ${alert.state} alert: $alert") } Alert.State.COMPLETED -> { listOfNotNull>( - DeleteRequest(AlertIndices.ALERT_INDEX, alert.id) - .routing(alert.monitorId), + DeleteRequest(alertsIndex, alert.id) + .routing(routingId), // Only add completed alert to history index if history is enabled if (alertIndices.isAlertHistoryEnabled()) { - IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - .routing(alert.monitorId) + IndexRequest(alertsHistoryIndex) + .routing(routingId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) .id(alert.id) } else null @@ -328,7 +745,7 @@ class AlertService( if (requestsToRetry.isEmpty()) return // Retry Bulk requests if there was any 429 response retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val bulkRequest = BulkRequest().add(requestsToRetry) + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkRequest, it) } val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } requestsToRetry = failedResponses.filter { it.status() == RestStatus.TOO_MANY_REQUESTS } @@ -344,22 +761,25 @@ class AlertService( /** * This is a separate method created specifically for saving new Alerts during the Bucket-Level Monitor run. * Alerts are saved in two batches during the execution of an Bucket-Level Monitor, once before the Actions are executed - * and once afterwards. This method saves Alerts to the [AlertIndices.ALERT_INDEX] but returns the same Alerts with their document IDs. + * and once afterwards. This method saves Alerts to the monitor's alertIndex but returns the same Alerts with their document IDs. * * The Alerts are required with their indexed ID so that when the new Alerts are updated after the Action execution, * the ID is available for the index request so that the existing Alert can be updated, instead of creating a duplicate Alert document. */ - suspend fun saveNewAlerts(alerts: List, retryPolicy: BackoffPolicy): List { + suspend fun saveNewAlerts(dataSources: DataSources, alerts: List, retryPolicy: BackoffPolicy): List { val savedAlerts = mutableListOf() var alertsBeingIndexed = alerts var requestsToRetry: MutableList = alerts.map { alert -> - if (alert.state != Alert.State.ACTIVE) { + if (alert.state != Alert.State.ACTIVE && alert.state != Alert.State.AUDIT) { throw IllegalStateException("Unexpected attempt to save new alert [$alert] with state [${alert.state}]") } if (alert.id != Alert.NO_ID) { throw IllegalStateException("Unexpected attempt to save new alert [$alert] with an existing alert ID [${alert.id}]") } - IndexRequest(AlertIndices.ALERT_INDEX) + val alertIndex = if (alert.state == Alert.State.AUDIT && alertIndices.isAlertHistoryEnabled()) { + dataSources.alertsHistoryIndex + } else dataSources.alertsIndex + IndexRequest(alertIndex) .routing(alert.monitorId) .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) }.toMutableList() @@ -372,7 +792,7 @@ class AlertService( // If the index request is to be retried, the Alert is saved separately as well so that its relative ordering is maintained in // relation to index request in the retried bulk request for when it eventually succeeds. retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val bulkRequest = BulkRequest().add(requestsToRetry) + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkRequest, it) } // TODO: This is only used to retrieve the retryCause, could instead fetch it from the bulkResponse iteration below val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } @@ -413,20 +833,25 @@ class AlertService( } /** - * Searches for Alerts in the [AlertIndices.ALERT_INDEX]. + * Searches for Alerts in the monitor's alertIndex. * * @param monitorId The Monitor to get Alerts for * @param size The number of search hits (Alerts) to return */ - private suspend fun searchAlerts(monitorId: String, size: Int): SearchResponse { - val queryBuilder = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + private suspend fun searchAlerts(monitor: Monitor, size: Int, workflowRunContext: WorkflowRunContext?): SearchResponse { + val monitorId = monitor.id + val alertIndex = monitor.dataSources.alertsIndex + val queryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + if (workflowRunContext != null) { + queryBuilder.must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowRunContext.workflowId)) + } val searchSourceBuilder = SearchSourceBuilder() .size(size) .query(queryBuilder) - val searchRequest = SearchRequest(AlertIndices.ALERT_INDEX) + val searchRequest = SearchRequest(alertIndex) .routing(monitorId) .source(searchSourceBuilder) val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } @@ -437,6 +862,37 @@ class AlertService( return searchResponse } + /** + * Searches for ACTIVE/ACKNOWLEDGED chained alerts in the workflow's alertIndex. + * + * @param monitorId The Monitor to get Alerts for + * @param size The number of search hits (Alerts) to return + */ + private suspend fun searchAlerts( + workflow: Workflow, + size: Int, + dataSources: DataSources, + ): SearchResponse { + val workflowId = workflow.id + val alertIndex = dataSources.alertsIndex + + val queryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowId)) + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, "")) + val searchSourceBuilder = SearchSourceBuilder() + .size(size) + .query(queryBuilder) + + val searchRequest = SearchRequest(alertIndex) + .routing(workflowId) + .source(searchSourceBuilder) + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + if (searchResponse.status() != RestStatus.OK) { + throw (searchResponse.firstFailureOrNull()?.cause ?: RuntimeException("Unknown error loading alerts")) + } + return searchResponse + } + private fun List?.update(alertError: AlertError?): List { return when { this == null && alertError == null -> emptyList() diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt b/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt index 89ed19c23..ea01b2524 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/AlertingPlugin.kt @@ -6,66 +6,66 @@ package org.opensearch.alerting import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionResponse -import org.opensearch.alerting.action.AcknowledgeAlertAction -import org.opensearch.alerting.action.DeleteMonitorAction import org.opensearch.alerting.action.ExecuteMonitorAction -import org.opensearch.alerting.action.GetAlertsAction +import org.opensearch.alerting.action.ExecuteWorkflowAction import org.opensearch.alerting.action.GetDestinationsAction import org.opensearch.alerting.action.GetEmailAccountAction import org.opensearch.alerting.action.GetEmailGroupAction -import org.opensearch.alerting.action.GetFindingsAction -import org.opensearch.alerting.action.GetMonitorAction -import org.opensearch.alerting.action.IndexMonitorAction +import org.opensearch.alerting.action.GetRemoteIndexesAction import org.opensearch.alerting.action.SearchEmailAccountAction import org.opensearch.alerting.action.SearchEmailGroupAction -import org.opensearch.alerting.action.SearchMonitorAction -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.core.JobSweeper import org.opensearch.alerting.core.ScheduledJobIndices import org.opensearch.alerting.core.action.node.ScheduledJobsStatsAction import org.opensearch.alerting.core.action.node.ScheduledJobsStatsTransportAction -import org.opensearch.alerting.core.model.ClusterMetricsInput -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.core.resthandler.RestScheduledJobStatsHandler import org.opensearch.alerting.core.schedule.JobScheduler import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings import org.opensearch.alerting.core.settings.ScheduledJobSettings -import org.opensearch.alerting.model.BucketLevelTrigger -import org.opensearch.alerting.model.DocumentLevelTrigger -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.resthandler.RestAcknowledgeAlertAction +import org.opensearch.alerting.resthandler.RestAcknowledgeChainedAlertAction import org.opensearch.alerting.resthandler.RestDeleteMonitorAction +import org.opensearch.alerting.resthandler.RestDeleteWorkflowAction import org.opensearch.alerting.resthandler.RestExecuteMonitorAction +import org.opensearch.alerting.resthandler.RestExecuteWorkflowAction import org.opensearch.alerting.resthandler.RestGetAlertsAction import org.opensearch.alerting.resthandler.RestGetDestinationsAction import org.opensearch.alerting.resthandler.RestGetEmailAccountAction import org.opensearch.alerting.resthandler.RestGetEmailGroupAction import org.opensearch.alerting.resthandler.RestGetFindingsAction import org.opensearch.alerting.resthandler.RestGetMonitorAction +import org.opensearch.alerting.resthandler.RestGetRemoteIndexesAction +import org.opensearch.alerting.resthandler.RestGetWorkflowAction +import org.opensearch.alerting.resthandler.RestGetWorkflowAlertsAction import org.opensearch.alerting.resthandler.RestIndexMonitorAction +import org.opensearch.alerting.resthandler.RestIndexWorkflowAction import org.opensearch.alerting.resthandler.RestSearchEmailAccountAction import org.opensearch.alerting.resthandler.RestSearchEmailGroupAction import org.opensearch.alerting.resthandler.RestSearchMonitorAction import org.opensearch.alerting.script.TriggerScript +import org.opensearch.alerting.service.DeleteMonitorService import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.settings.DestinationSettings import org.opensearch.alerting.settings.LegacyOpenDistroAlertingSettings import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings import org.opensearch.alerting.transport.TransportAcknowledgeAlertAction +import org.opensearch.alerting.transport.TransportAcknowledgeChainedAlertAction import org.opensearch.alerting.transport.TransportDeleteMonitorAction +import org.opensearch.alerting.transport.TransportDeleteWorkflowAction import org.opensearch.alerting.transport.TransportExecuteMonitorAction +import org.opensearch.alerting.transport.TransportExecuteWorkflowAction import org.opensearch.alerting.transport.TransportGetAlertsAction import org.opensearch.alerting.transport.TransportGetDestinationsAction import org.opensearch.alerting.transport.TransportGetEmailAccountAction import org.opensearch.alerting.transport.TransportGetEmailGroupAction import org.opensearch.alerting.transport.TransportGetFindingsSearchAction import org.opensearch.alerting.transport.TransportGetMonitorAction +import org.opensearch.alerting.transport.TransportGetRemoteIndexesAction +import org.opensearch.alerting.transport.TransportGetWorkflowAction +import org.opensearch.alerting.transport.TransportGetWorkflowAlertsAction import org.opensearch.alerting.transport.TransportIndexMonitorAction +import org.opensearch.alerting.transport.TransportIndexWorkflowAction import org.opensearch.alerting.transport.TransportSearchEmailAccountAction import org.opensearch.alerting.transport.TransportSearchEmailGroupAction import org.opensearch.alerting.transport.TransportSearchMonitorAction @@ -75,21 +75,34 @@ import org.opensearch.client.Client import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.node.DiscoveryNodes import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.io.stream.NamedWriteableRegistry -import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.settings.ClusterSettings import org.opensearch.common.settings.IndexScopedSettings import org.opensearch.common.settings.Setting import org.opensearch.common.settings.Settings import org.opensearch.common.settings.SettingsFilter -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentParser +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser import org.opensearch.env.Environment import org.opensearch.env.NodeEnvironment import org.opensearch.index.IndexModule +import org.opensearch.painless.spi.Allowlist +import org.opensearch.painless.spi.AllowlistLoader import org.opensearch.painless.spi.PainlessExtension -import org.opensearch.painless.spi.Whitelist -import org.opensearch.painless.spi.WhitelistLoader import org.opensearch.percolator.PercolatorPluginExt import org.opensearch.plugins.ActionPlugin import org.opensearch.plugins.ReloadablePlugin @@ -112,8 +125,8 @@ import java.util.function.Supplier */ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, ReloadablePlugin, SearchPlugin, PercolatorPluginExt() { - override fun getContextWhitelists(): Map, List> { - val whitelist = WhitelistLoader.loadFromResourceFiles(javaClass, "org.opensearch.alerting.txt") + override fun getContextAllowlists(): Map, List> { + val whitelist = AllowlistLoader.loadFromResourceFiles(javaClass, "org.opensearch.alerting.txt") return mapOf(TriggerScript.CONTEXT to listOf(whitelist)) } @@ -121,6 +134,8 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R @JvmField val OPEN_SEARCH_DASHBOARDS_USER_AGENT = "OpenSearch-Dashboards" @JvmField val UI_METADATA_EXCLUDE = arrayOf("monitor.${Monitor.UI_METADATA_FIELD}") @JvmField val MONITOR_BASE_URI = "/_plugins/_alerting/monitors" + @JvmField val WORKFLOW_BASE_URI = "/_plugins/_alerting/workflows" + @JvmField val REMOTE_BASE_URI = "/_plugins/_alerting/remote" @JvmField val DESTINATION_BASE_URI = "/_plugins/_alerting/destinations" @JvmField val LEGACY_OPENDISTRO_MONITOR_BASE_URI = "/_opendistro/_alerting/monitors" @JvmField val LEGACY_OPENDISTRO_DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" @@ -129,7 +144,8 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R @JvmField val LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_accounts" @JvmField val LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_groups" @JvmField val FINDING_BASE_URI = "/_plugins/_alerting/findings" - @JvmField val ALERTING_JOB_TYPES = listOf("monitor") + + @JvmField val ALERTING_JOB_TYPES = listOf("monitor", "workflow") } lateinit var runner: MonitorRunnerService @@ -155,9 +171,12 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R RestGetMonitorAction(), RestDeleteMonitorAction(), RestIndexMonitorAction(), + RestIndexWorkflowAction(), RestSearchMonitorAction(settings, clusterService), RestExecuteMonitorAction(), + RestExecuteWorkflowAction(), RestAcknowledgeAlertAction(), + RestAcknowledgeChainedAlertAction(), RestScheduledJobStatsHandler("_alerting"), RestSearchEmailAccountAction(), RestGetEmailAccountAction(), @@ -165,27 +184,39 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R RestGetEmailGroupAction(), RestGetDestinationsAction(), RestGetAlertsAction(), - RestGetFindingsAction() + RestGetWorkflowAlertsAction(), + RestGetFindingsAction(), + RestGetWorkflowAction(), + RestDeleteWorkflowAction(), + RestGetRemoteIndexesAction(), ) } override fun getActions(): List> { return listOf( ActionPlugin.ActionHandler(ScheduledJobsStatsAction.INSTANCE, ScheduledJobsStatsTransportAction::class.java), - ActionPlugin.ActionHandler(IndexMonitorAction.INSTANCE, TransportIndexMonitorAction::class.java), - ActionPlugin.ActionHandler(GetMonitorAction.INSTANCE, TransportGetMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.INDEX_MONITOR_ACTION_TYPE, TransportIndexMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_MONITOR_ACTION_TYPE, TransportGetMonitorAction::class.java), ActionPlugin.ActionHandler(ExecuteMonitorAction.INSTANCE, TransportExecuteMonitorAction::class.java), - ActionPlugin.ActionHandler(SearchMonitorAction.INSTANCE, TransportSearchMonitorAction::class.java), - ActionPlugin.ActionHandler(DeleteMonitorAction.INSTANCE, TransportDeleteMonitorAction::class.java), - ActionPlugin.ActionHandler(AcknowledgeAlertAction.INSTANCE, TransportAcknowledgeAlertAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, TransportSearchMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.DELETE_MONITOR_ACTION_TYPE, TransportDeleteMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, TransportAcknowledgeAlertAction::class.java), + ActionPlugin.ActionHandler( + AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, TransportAcknowledgeChainedAlertAction::class.java + ), ActionPlugin.ActionHandler(GetEmailAccountAction.INSTANCE, TransportGetEmailAccountAction::class.java), ActionPlugin.ActionHandler(SearchEmailAccountAction.INSTANCE, TransportSearchEmailAccountAction::class.java), ActionPlugin.ActionHandler(GetEmailGroupAction.INSTANCE, TransportGetEmailGroupAction::class.java), ActionPlugin.ActionHandler(SearchEmailGroupAction.INSTANCE, TransportSearchEmailGroupAction::class.java), ActionPlugin.ActionHandler(GetDestinationsAction.INSTANCE, TransportGetDestinationsAction::class.java), - ActionPlugin.ActionHandler(GetAlertsAction.INSTANCE, TransportGetAlertsAction::class.java), - ActionPlugin.ActionHandler(GetFindingsAction.INSTANCE, TransportGetFindingsSearchAction::class.java) - + ActionPlugin.ActionHandler(AlertingActions.GET_ALERTS_ACTION_TYPE, TransportGetAlertsAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, TransportGetWorkflowAlertsAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_FINDINGS_ACTION_TYPE, TransportGetFindingsSearchAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, TransportIndexWorkflowAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_WORKFLOW_ACTION_TYPE, TransportGetWorkflowAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, TransportDeleteWorkflowAction::class.java), + ActionPlugin.ActionHandler(ExecuteWorkflowAction.INSTANCE, TransportExecuteWorkflowAction::class.java), + ActionPlugin.ActionHandler(GetRemoteIndexesAction.INSTANCE, TransportGetRemoteIndexesAction::class.java), ) } @@ -197,7 +228,9 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R QueryLevelTrigger.XCONTENT_REGISTRY, BucketLevelTrigger.XCONTENT_REGISTRY, ClusterMetricsInput.XCONTENT_REGISTRY, - DocumentLevelTrigger.XCONTENT_REGISTRY + DocumentLevelTrigger.XCONTENT_REGISTRY, + ChainedAlertTrigger.XCONTENT_REGISTRY, + Workflow.XCONTENT_REGISTRY ) } @@ -221,14 +254,16 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R .registerClusterService(clusterService) .registerClient(client) .registerNamedXContentRegistry(xContentRegistry) + .registerindexNameExpressionResolver(indexNameExpressionResolver) .registerScriptService(scriptService) .registerSettings(settings) .registerThreadPool(threadPool) .registerAlertIndices(alertIndices) - .registerInputService(InputService(client, scriptService, namedWriteableRegistry, xContentRegistry)) + .registerInputService(InputService(client, scriptService, namedWriteableRegistry, xContentRegistry, clusterService, settings)) .registerTriggerService(TriggerService(scriptService)) .registerAlertService(AlertService(client, xContentRegistry, alertIndices)) .registerDocLevelMonitorQueries(DocLevelMonitorQueries(client, clusterService)) + .registerWorkflowService(WorkflowService(client, xContentRegistry)) .registerConsumers() .registerDestinationSettings() scheduledJobIndices = ScheduledJobIndices(client.admin(), clusterService) @@ -238,6 +273,23 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R destinationMigrationCoordinator = DestinationMigrationCoordinator(client, clusterService, threadPool, scheduledJobIndices) this.threadPool = threadPool this.clusterService = clusterService + + MonitorMetadataService.initialize( + client, + clusterService, + xContentRegistry, + settings + ) + + WorkflowMetadataService.initialize( + client, + clusterService, + xContentRegistry, + settings + ) + + DeleteMonitorService.initialize(client) + return listOf(sweeper, scheduler, runner, scheduledJobIndices, docLevelMonitorQueries, destinationMigrationCoordinator) } @@ -300,7 +352,9 @@ internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, R AlertingSettings.FINDING_HISTORY_MAX_DOCS, AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE, AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD, - AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD + AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD, + AlertingSettings.FINDINGS_INDEXING_BATCH_SIZE, + AlertingSettings.REMOTE_MONITORING_ENABLED ) } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt index 113f77a0c..e960b9da5 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/BucketLevelMonitorRunner.kt @@ -6,24 +6,51 @@ package org.opensearch.alerting import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.BucketLevelTrigger import org.opensearch.alerting.model.BucketLevelTriggerRunResult import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.action.AlertCategory -import org.opensearch.alerting.model.action.PerAlertActionScope -import org.opensearch.alerting.model.action.PerExecutionActionScope import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.retry +import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.opensearchapi.withClosableContext import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext import org.opensearch.alerting.util.defaultToPerExecutionAction import org.opensearch.alerting.util.getActionExecutionPolicy import org.opensearch.alerting.util.getBucketKeysHash import org.opensearch.alerting.util.getCombinedTriggerRunResult +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.script.ScriptType +import org.opensearch.script.TemplateScript +import org.opensearch.search.aggregations.AggregatorFactories +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.builder.SearchSourceBuilder import java.time.Instant +import java.util.UUID object BucketLevelMonitorRunner : MonitorRunner() { private val logger = LogManager.getLogger(javaClass) @@ -33,7 +60,9 @@ object BucketLevelMonitorRunner : MonitorRunner() { monitorCtx: MonitorRunnerExecutionContext, periodStart: Instant, periodEnd: Instant, - dryrun: Boolean + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String ): MonitorRunResult { val roles = MonitorRunnerService.getRolesForMonitor(monitor) logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") @@ -44,9 +73,12 @@ object BucketLevelMonitorRunner : MonitorRunner() { var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) val currentAlerts = try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex() - monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex() - monitorCtx.alertService!!.loadCurrentAlertsForBucketLevelMonitor(monitor) + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) + if (monitor.dataSources.findingsEnabled == true) { + monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex(monitor.dataSources) + } + monitorCtx.alertService!!.loadCurrentAlertsForBucketLevelMonitor(monitor, workflowRunContext) } catch (e: Exception) { // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id @@ -89,7 +121,8 @@ object BucketLevelMonitorRunner : MonitorRunner() { monitor, periodStart, periodEnd, - monitorResult.inputResults + monitorResult.inputResults, + workflowRunContext ) if (firstIteration) { firstPageOfInputResults = inputResults @@ -116,11 +149,31 @@ object BucketLevelMonitorRunner : MonitorRunner() { * existing Alerts in a way the user can easily view them since they will have all been moved to the history index. */ if (triggerResults[trigger.id]?.error != null) continue - + val findings = + if (monitor.triggers.size == 1 && monitor.dataSources.findingsEnabled == true) { + logger.debug("Creating bucket level findings") + createFindings( + triggerResult, + monitor, + monitorCtx, + periodStart, + periodEnd, + !dryrun && monitor.id != Monitor.NO_ID, + executionId + ) + } else { + emptyList() + } // TODO: Should triggerResult's aggregationResultBucket be a list? If not, getCategorizedAlertsForBucketLevelMonitor can // be refactored to use a map instead val categorizedAlerts = monitorCtx.alertService!!.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlertsForTrigger, triggerResult.aggregationResultBuckets.values.toList() + monitor, + trigger, + currentAlertsForTrigger, + triggerResult.aggregationResultBuckets.values.toList(), + findings, + executionId, + workflowRunContext ).toMutableMap() val dedupedAlerts = categorizedAlerts.getOrDefault(AlertCategory.DEDUPED, emptyList()) var newAlerts = categorizedAlerts.getOrDefault(AlertCategory.NEW, emptyList()) @@ -135,8 +188,14 @@ object BucketLevelMonitorRunner : MonitorRunner() { * will still execute with the Alert information in the ctx but the Alerts may not be visible. */ if (!dryrun && monitor.id != Monitor.NO_ID) { - monitorCtx.alertService!!.saveAlerts(dedupedAlerts, monitorCtx.retryPolicy!!, allowUpdatingAcknowledgedAlert = true) - newAlerts = monitorCtx.alertService!!.saveNewAlerts(newAlerts, monitorCtx.retryPolicy!!) + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, + dedupedAlerts, + monitorCtx.retryPolicy!!, + allowUpdatingAcknowledgedAlert = true, + monitor.id + ) + newAlerts = monitorCtx.alertService!!.saveNewAlerts(monitor.dataSources, newAlerts, monitorCtx.retryPolicy!!) } // Store deduped and new Alerts to accumulate across pages @@ -269,12 +328,17 @@ object BucketLevelMonitorRunner : MonitorRunner() { // Update Alerts with action execution results (if it's not a test Monitor). // ACKNOWLEDGED Alerts should not be saved here since actions are not executed for them. if (!dryrun && monitor.id != Monitor.NO_ID) { - monitorCtx.alertService!!.saveAlerts(updatedAlerts, monitorCtx.retryPolicy!!, allowUpdatingAcknowledgedAlert = false) + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, updatedAlerts, monitorCtx.retryPolicy!!, allowUpdatingAcknowledgedAlert = false, + routingId = monitor.id + ) // Save any COMPLETED Alerts that were not covered in updatedAlerts monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, completedAlertsToUpdate.toList(), monitorCtx.retryPolicy!!, - allowUpdatingAcknowledgedAlert = false + allowUpdatingAcknowledgedAlert = false, + monitor.id ) } } @@ -282,6 +346,138 @@ object BucketLevelMonitorRunner : MonitorRunner() { return monitorResult.copy(inputResults = firstPageOfInputResults, triggerResults = triggerResults) } + private suspend fun createFindings( + triggerResult: BucketLevelTriggerRunResult, + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + shouldCreateFinding: Boolean, + executionId: String, + ): List { + monitor.inputs.forEach { input -> + if (input is SearchInput) { + val bucketValues: Set = triggerResult.aggregationResultBuckets.keys + val query = input.query + var fieldName = "" + + for (aggFactory in (query.aggregations() as AggregatorFactories.Builder).aggregatorFactories) { + when (aggFactory) { + is CompositeAggregationBuilder -> { + var groupByFields = 0 // if number of fields used to group by > 1 we won't calculate findings + val sources = aggFactory.sources() + for (source in sources) { + if (groupByFields > 0) { + logger.error("grouByFields > 0. not generating findings for bucket level monitor ${monitor.id}") + return listOf() + } + groupByFields++ + fieldName = source.field() + } + } + is TermsAggregationBuilder -> { + fieldName = aggFactory.field() + } + else -> { + logger.error( + "Bucket level monitor findings supported only for composite and term aggs. Found [{${aggFactory.type}}]" + ) + return listOf() + } + } + } + if (fieldName != "") { + val searchParams = mapOf( + "period_start" to periodStart.toEpochMilli(), + "period_end" to periodEnd.toEpochMilli() + ) + val searchSource = monitorCtx.scriptService!!.compile( + Script( + ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, + query.toString(), searchParams + ), + TemplateScript.CONTEXT + ) + .newInstance(searchParams) + .execute() + val sr = SearchRequest(*input.indices.toTypedArray()) + XContentType.JSON.xContent().createParser(monitorCtx.xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource) + .use { + val source = SearchSourceBuilder.fromXContent(it) + val queryBuilder = if (input.query.query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(source.query()) + queryBuilder.filter(QueryBuilders.termsQuery(fieldName, bucketValues)) + sr.source().query(queryBuilder) + } + val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(sr, it) } + return createFindingPerIndex(searchResponse, monitor, monitorCtx, shouldCreateFinding, executionId) + } else { + logger.error("Couldn't resolve groupBy field. Not generating bucket level monitor findings for monitor %${monitor.id}") + } + } + } + return listOf() + } + + private suspend fun createFindingPerIndex( + searchResponse: SearchResponse, + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + shouldCreateFinding: Boolean, + workflowExecutionId: String? = null + ): List { + val docIdsByIndexName: MutableMap> = mutableMapOf() + for (hit in searchResponse.hits.hits) { + val ids = docIdsByIndexName.getOrDefault(hit.index, mutableListOf()) + ids.add(hit.id) + docIdsByIndexName[hit.index] = ids + } + val findings = mutableListOf() + var requestsToRetry: MutableList = mutableListOf() + docIdsByIndexName.entries.forEach { it -> + run { + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = it.value, + monitorId = monitor.id, + monitorName = monitor.name, + index = it.key, + timestamp = Instant.now(), + docLevelQueries = listOf(), + executionId = workflowExecutionId + ) + + val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + logger.debug("Bucket level monitor ${monitor.id} Findings: $findingStr") + if (shouldCreateFinding) { + logger.debug("Saving bucket level monitor findings for monitor ${monitor.id}") + val indexRequest = IndexRequest(monitor.dataSources.findingsIndex) + .source(findingStr, XContentType.JSON) + .id(finding.id) + .routing(finding.id) + requestsToRetry.add(indexRequest) + } + findings.add(finding.id) + } + } + if (requestsToRetry.isEmpty()) return listOf() + monitorCtx.retryPolicy!!.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + val bulkResponse: BulkResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.bulk(bulkRequest, it) } + requestsToRetry = mutableListOf() + val findingsBeingRetried = mutableListOf() + bulkResponse.items.forEach { item -> + if (item.isFailed) { + if (item.status() == RestStatus.TOO_MANY_REQUESTS) { + requestsToRetry.add(bulkRequest.requests()[item.itemId] as IndexRequest) + findingsBeingRetried.add(findingsBeingRetried[item.itemId]) + } + } + } + } + return findings + } + private fun getActionContextForAlertCategory( alertCategory: AlertCategory, alert: Alert, diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/DocumentLevelMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/DocumentLevelMonitorRunner.kt index 66f2461cd..0e88b5cb3 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/DocumentLevelMonitorRunner.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/DocumentLevelMonitorRunner.kt @@ -6,55 +6,66 @@ package org.opensearch.alerting import org.apache.logging.log4j.LogManager -import org.opensearch.action.admin.indices.get.GetIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.admin.indices.refresh.RefreshAction +import org.opensearch.action.admin.indices.refresh.RefreshRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse import org.opensearch.action.search.SearchAction import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.alerts.AlertIndices.Companion.FINDING_HISTORY_WRITE_INDEX -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.model.ActionExecutionResult -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.AlertingConfigAccessor.Companion.getMonitorMetadata import org.opensearch.alerting.model.DocumentExecutionContext -import org.opensearch.alerting.model.DocumentLevelTrigger import org.opensearch.alerting.model.DocumentLevelTriggerRunResult -import org.opensearch.alerting.model.Finding import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.model.MonitorMetadata import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.action.PerAlertActionScope -import org.opensearch.alerting.opensearchapi.string +import org.opensearch.alerting.model.userErrorMessage import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IndexUtils import org.opensearch.alerting.util.defaultToPerExecutionAction import org.opensearch.alerting.util.getActionExecutionPolicy -import org.opensearch.alerting.util.updateMonitorMetadata +import org.opensearch.alerting.workflow.WorkflowRunContext import org.opensearch.client.Client +import org.opensearch.client.node.NodeClient +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.routing.Preference import org.opensearch.cluster.routing.ShardRouting import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.bytes.BytesReference -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.AlertingPluginInterface +import org.opensearch.commons.alerting.action.PublishFindingsRequest +import org.opensearch.commons.alerting.action.SubscribeFindingsResponse +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.IndexNotFoundException import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.Operator import org.opensearch.index.query.QueryBuilders import org.opensearch.percolator.PercolateQueryBuilderExt -import org.opensearch.rest.RestStatus import org.opensearch.search.SearchHits import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.search.sort.SortOrder import java.io.IOException import java.time.Instant import java.util.UUID -import kotlin.collections.HashMap import kotlin.math.max object DocumentLevelMonitorRunner : MonitorRunner() { @@ -65,45 +76,42 @@ object DocumentLevelMonitorRunner : MonitorRunner() { monitorCtx: MonitorRunnerExecutionContext, periodStart: Instant, periodEnd: Instant, - dryrun: Boolean + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String ): MonitorRunResult { logger.debug("Document-level-monitor is running ...") + val isTempMonitor = dryrun || monitor.id == Monitor.NO_ID var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex() - monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex() - monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex() + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex(monitor.dataSources) } catch (e: Exception) { val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id logger.error("Error setting up alerts and findings indices for monitor: $id", e) - return monitorResult.copy(error = AlertingException.wrap(e)) + monitorResult = monitorResult.copy(error = AlertingException.wrap(e)) } try { validate(monitor) } catch (e: Exception) { logger.error("Failed to start Document-level-monitor. Error: ${e.message}") - return monitorResult.copy(error = AlertingException.wrap(e)) + monitorResult = monitorResult.copy(error = AlertingException.wrap(e)) } - monitorCtx.docLevelMonitorQueries!!.initDocLevelQueryIndex() - monitorCtx.docLevelMonitorQueries!!.indexDocLevelQueries( + var (monitorMetadata, _) = MonitorMetadataService.getOrCreateMetadata( monitor = monitor, - monitorId = monitor.id, - indexTimeout = monitorCtx.indexTimeout!! + createWithRunContext = false, + skipIndex = isTempMonitor, + workflowRunContext?.workflowMetadataId ) val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput - val index = docLevelMonitorInput.indices[0] - val queries: List = docLevelMonitorInput.queries - var monitorMetadata = getMonitorMetadata(monitorCtx.client!!, monitorCtx.xContentRegistry!!, "${monitor.id}-metadata") - if (monitorMetadata == null) { - monitorMetadata = createMonitorMetadata(monitor.id) - } + val queries: List = docLevelMonitorInput.queries - val isTempMonitor = dryrun || monitor.id == Monitor.NO_ID val lastRunContext = if (monitorMetadata.lastRunContext.isNullOrEmpty()) mutableMapOf() else monitorMetadata.lastRunContext.toMutableMap() as MutableMap> @@ -114,102 +122,241 @@ object DocumentLevelMonitorRunner : MonitorRunner() { val docsToQueries = mutableMapOf>() try { - val getIndexRequest = GetIndexRequest().indices(index) - val getIndexResponse: GetIndexResponse = monitorCtx.client!!.suspendUntil { - monitorCtx.client!!.admin().indices().getIndex(getIndexRequest, it) + // Resolve all passed indices to concrete indices + val allConcreteIndices = IndexUtils.resolveAllIndices( + docLevelMonitorInput.indices, + monitorCtx.clusterService!!, + monitorCtx.indexNameExpressionResolver!! + ) + if (allConcreteIndices.isEmpty()) { + logger.error("indices not found-${docLevelMonitorInput.indices.joinToString(",")}") + throw IndexNotFoundException(docLevelMonitorInput.indices.joinToString(",")) } - val indices = getIndexResponse.indices() + + monitorCtx.docLevelMonitorQueries!!.initDocLevelQueryIndex(monitor.dataSources) + monitorCtx.docLevelMonitorQueries!!.indexDocLevelQueries( + monitor = monitor, + monitorId = monitor.id, + monitorMetadata, + indexTimeout = monitorCtx.indexTimeout!! + ) // cleanup old indices that are not monitored anymore from the same monitor - for (ind in updatedLastRunContext.keys) { - if (!indices.contains(ind)) { + val runContextKeys = updatedLastRunContext.keys.toMutableSet() + for (ind in runContextKeys) { + if (!allConcreteIndices.contains(ind)) { updatedLastRunContext.remove(ind) } } - indices.forEach { indexName -> - // Prepare lastRunContext for each index - val indexLastRunContext = lastRunContext.getOrPut(indexName) { - val indexCreatedRecently = createdRecently(monitor, indexName, periodStart, periodEnd, getIndexResponse) - createRunContext(monitorCtx.clusterService!!, monitorCtx.client!!, indexName, indexCreatedRecently) - } + // Map of document ids per index when monitor is workflow delegate and has chained findings + val matchingDocIdsPerIndex = workflowRunContext?.matchingDocIdsPerIndex - // Prepare updatedLastRunContext for each index - val indexUpdatedRunContext = updateLastRunContext( - indexLastRunContext.toMutableMap(), - monitorCtx, - indexName - ) as MutableMap - updatedLastRunContext[indexName] = indexUpdatedRunContext - - val count: Int = indexLastRunContext["shards_count"] as Int - for (i: Int in 0 until count) { - val shard = i.toString() - - // update lastRunContext if its a temp monitor as we only want to view the last bit of data then - // TODO: If dryrun, we should make it so we limit the search as this could still potentially give us lots of data - if (isTempMonitor) { - indexLastRunContext[shard] = max(-1, (indexUpdatedRunContext[shard] as String).toInt() - 10) + docLevelMonitorInput.indices.forEach { indexName -> + var concreteIndices = IndexUtils.resolveAllIndices( + listOf(indexName), + monitorCtx.clusterService!!, + monitorCtx.indexNameExpressionResolver!! + ) + var lastWriteIndex: String? = null + if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || + IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) + ) { + lastWriteIndex = concreteIndices.find { lastRunContext.containsKey(it) } + if (lastWriteIndex != null) { + val lastWriteIndexCreationDate = + IndexUtils.getCreationDateForIndex(lastWriteIndex, monitorCtx.clusterService!!.state()) + concreteIndices = IndexUtils.getNewestIndicesByCreationDate( + concreteIndices, + monitorCtx.clusterService!!.state(), + lastWriteIndexCreationDate + ) } } + val updatedIndexName = indexName.replace("*", "_") + val conflictingFields = monitorCtx.docLevelMonitorQueries!!.getAllConflictingFields( + monitorCtx.clusterService!!.state(), + concreteIndices + ) - // Prepare DocumentExecutionContext for each index - val docExecutionContext = DocumentExecutionContext(queries, indexLastRunContext, indexUpdatedRunContext) + concreteIndices.forEach { concreteIndexName -> + // Prepare lastRunContext for each index + val indexLastRunContext = lastRunContext.getOrPut(concreteIndexName) { + val isIndexCreatedRecently = createdRecently( + monitor, + periodStart, + periodEnd, + monitorCtx.clusterService!!.state().metadata.index(concreteIndexName) + ) + MonitorMetadataService.createRunContextForIndex(concreteIndexName, isIndexCreatedRecently) + } - val matchingDocs = getMatchingDocs(monitor, monitorCtx, docExecutionContext, indexName) + // Prepare updatedLastRunContext for each index + val indexUpdatedRunContext = updateLastRunContext( + indexLastRunContext.toMutableMap(), + monitorCtx, + concreteIndexName + ) as MutableMap + if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || + IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) + ) { + if (concreteIndexName == IndexUtils.getWriteIndex(indexName, monitorCtx.clusterService!!.state())) { + updatedLastRunContext.remove(lastWriteIndex) + updatedLastRunContext[concreteIndexName] = indexUpdatedRunContext + } + } else { + updatedLastRunContext[concreteIndexName] = indexUpdatedRunContext + } - if (matchingDocs.isNotEmpty()) { - val matchedQueriesForDocs = getMatchedQueries(monitorCtx, matchingDocs.map { it.second }, monitor, indexName) + val count: Int = indexLastRunContext["shards_count"] as Int + for (i: Int in 0 until count) { + val shard = i.toString() - matchedQueriesForDocs.forEach { hit -> - val (id, query) = Pair( - hit.id.replace("_${indexName}_${monitor.id}", ""), - ((hit.sourceAsMap["query"] as HashMap<*, *>)["query_string"] as HashMap<*, *>)["query"].toString() - .replace("_${indexName}_${monitor.id}", "") + // update lastRunContext if its a temp monitor as we only want to view the last bit of data then + // TODO: If dryrun, we should make it so we limit the search as this could still potentially give us lots of data + if (isTempMonitor) { + indexLastRunContext[shard] = max(-1, (indexUpdatedRunContext[shard] as String).toInt() - 10) + } + } + + // Prepare DocumentExecutionContext for each index + val docExecutionContext = DocumentExecutionContext(queries, indexLastRunContext, indexUpdatedRunContext) + + val matchingDocs = getMatchingDocs( + monitor, + monitorCtx, + docExecutionContext, + updatedIndexName, + concreteIndexName, + conflictingFields.toList(), + matchingDocIdsPerIndex?.get(concreteIndexName) + ) + + if (matchingDocs.isNotEmpty()) { + val matchedQueriesForDocs = getMatchedQueries( + monitorCtx, + matchingDocs.map { it.second }, + monitor, + monitorMetadata, + updatedIndexName, + concreteIndexName ) - val docLevelQuery = DocLevelQuery(id, id, query) - - val docIndices = hit.field("_percolator_document_slot").values.map { it.toString().toInt() } - docIndices.forEach { idx -> - val docIndex = "${matchingDocs[idx].first}|$indexName" - queryToDocIds.getOrPut(docLevelQuery) { mutableSetOf() }.add(docIndex) - inputRunResults.getOrPut(docLevelQuery.id) { mutableSetOf() }.add(docIndex) - docsToQueries.getOrPut(docIndex) { mutableListOf() }.add(id) + + matchedQueriesForDocs.forEach { hit -> + val id = hit.id + .replace("_${updatedIndexName}_${monitor.id}", "") + .replace("_${concreteIndexName}_${monitor.id}", "") + + val docIndices = hit.field("_percolator_document_slot").values.map { it.toString().toInt() } + docIndices.forEach { idx -> + val docIndex = "${matchingDocs[idx].first}|$concreteIndexName" + inputRunResults.getOrPut(id) { mutableSetOf() }.add(docIndex) + docsToQueries.getOrPut(docIndex) { mutableListOf() }.add(id) + } } } } } + monitorResult = monitorResult.copy(inputResults = InputRunResults(listOf(inputRunResults))) + + /* + populate the map queryToDocIds with pairs of + this fixes the issue of passing id, name, tags fields of DocLevelQuery object correctly to TriggerExpressionParser + */ + queries.forEach { + if (inputRunResults.containsKey(it.id)) { + queryToDocIds[it] = inputRunResults[it.id]!! + } + } + + val idQueryMap: Map = queries.associateBy { it.id } + + val triggerResults = mutableMapOf() + // If there are no triggers defined, we still want to generate findings + if (monitor.triggers.isEmpty()) { + if (dryrun == false && monitor.id != Monitor.NO_ID) { + createFindings(monitor, monitorCtx, docsToQueries, idQueryMap, true) + } + } else { + monitor.triggers.forEach { + triggerResults[it.id] = runForEachDocTrigger( + monitorCtx, + monitorResult, + it as DocumentLevelTrigger, + monitor, + idQueryMap, + docsToQueries, + queryToDocIds, + dryrun, + executionId = executionId, + workflowRunContext = workflowRunContext + ) + } + } + // Don't update monitor if this is a test monitor + if (!isTempMonitor) { + // If any error happened during trigger execution, upsert monitor error alert + val errorMessage = constructErrorMessageFromTriggerResults(triggerResults = triggerResults) + if (errorMessage.isNotEmpty()) { + monitorCtx.alertService!!.upsertMonitorErrorAlert( + monitor = monitor, + errorMessage = errorMessage, + executionId = executionId, + workflowRunContext + ) + } else { + onSuccessfulMonitorRun(monitorCtx, monitor) + } + + MonitorMetadataService.upsertMetadata( + monitorMetadata.copy(lastRunContext = updatedLastRunContext), + true + ) + } + + // TODO: Update the Document as part of the Trigger and return back the trigger action result + return monitorResult.copy(triggerResults = triggerResults) } catch (e: Exception) { - logger.error("Failed to start Document-level-monitor $index. Error: ${e.message}", e) - val alertingException = AlertingException.wrap(e) + val errorMessage = ExceptionsHelper.detailedMessage(e) + monitorCtx.alertService!!.upsertMonitorErrorAlert(monitor, errorMessage, executionId, workflowRunContext) + logger.error("Failed running Document-level-monitor ${monitor.name}", e) + val alertingException = AlertingException( + errorMessage, + RestStatus.INTERNAL_SERVER_ERROR, + e + ) return monitorResult.copy(error = alertingException, inputResults = InputRunResults(emptyList(), alertingException)) } + } - monitorResult = monitorResult.copy(inputResults = InputRunResults(listOf(inputRunResults))) - - val idQueryMap: Map = queries.associateBy { it.id } - - val triggerResults = mutableMapOf() - monitor.triggers.forEach { - triggerResults[it.id] = runForEachDocTrigger( - monitorCtx, - monitorResult, - it as DocumentLevelTrigger, - monitor, - idQueryMap, - docsToQueries, - queryToDocIds, - dryrun + private suspend fun onSuccessfulMonitorRun(monitorCtx: MonitorRunnerExecutionContext, monitor: Monitor) { + monitorCtx.alertService!!.clearMonitorErrorAlert(monitor) + if (monitor.dataSources.alertsHistoryIndex != null) { + monitorCtx.alertService!!.moveClearedErrorAlertsToHistory( + monitor.id, + monitor.dataSources.alertsIndex, + monitor.dataSources.alertsHistoryIndex!! ) } + } - // Don't update monitor if this is a test monitor - if (!isTempMonitor) { - updateMonitorMetadata(monitorCtx.client!!, monitorCtx.settings!!, monitorMetadata.copy(lastRunContext = updatedLastRunContext)) + private fun constructErrorMessageFromTriggerResults( + triggerResults: MutableMap? = null + ): String { + var errorMessage = "" + if (triggerResults != null) { + val triggersErrorBuilder = StringBuilder() + triggerResults.forEach { + if (it.value.error != null) { + triggersErrorBuilder.append("[${it.key}]: [${it.value.error!!.userErrorMessage()}]").append(" | ") + } + } + if (triggersErrorBuilder.isNotEmpty()) { + errorMessage = "Trigger errors: $triggersErrorBuilder" + } } - - // TODO: Update the Document as part of the Trigger and return back the trigger action result - return monitorResult.copy(triggerResults = triggerResults) + return errorMessage } private suspend fun runForEachDocTrigger( @@ -218,40 +365,49 @@ object DocumentLevelMonitorRunner : MonitorRunner() { trigger: DocumentLevelTrigger, monitor: Monitor, idQueryMap: Map, - docsToQueries: Map>, + docsToQueries: MutableMap>, queryToDocIds: Map>, - dryrun: Boolean + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String ): DocumentLevelTriggerRunResult { val triggerCtx = DocumentLevelTriggerExecutionContext(monitor, trigger) val triggerResult = monitorCtx.triggerService!!.runDocLevelTrigger(monitor, trigger, queryToDocIds) - val findings = mutableListOf() - val findingDocPairs = mutableListOf>() + val triggerFindingDocPairs = mutableListOf>() // TODO: Implement throttling for findings - docsToQueries.forEach { - val triggeredQueries = it.value.map { queryId -> idQueryMap[queryId]!! } - val findingId = createFindings(monitor, monitorCtx, triggeredQueries, it.key, !dryrun && monitor.id != Monitor.NO_ID) - findings.add(findingId) + val findingToDocPairs = createFindings( + monitor, + monitorCtx, + docsToQueries, + idQueryMap, + !dryrun && monitor.id != Monitor.NO_ID, + executionId + ) - if (triggerResult.triggeredDocs.contains(it.key)) { - findingDocPairs.add(Pair(findingId, it.key)) + findingToDocPairs.forEach { + // Only pick those entries whose docs have triggers associated with them + if (triggerResult.triggeredDocs.contains(it.second)) { + triggerFindingDocPairs.add(Pair(it.first, it.second)) } } val actionCtx = triggerCtx.copy( triggeredDocs = triggerResult.triggeredDocs, - relatedFindings = findings, + relatedFindings = findingToDocPairs.map { it.first }, error = monitorResult.error ?: triggerResult.error ) val alerts = mutableListOf() - findingDocPairs.forEach { + triggerFindingDocPairs.forEach { val alert = monitorCtx.alertService!!.composeDocLevelAlert( listOf(it.first), listOf(it.second), triggerCtx, - monitorResult.alertError() ?: triggerResult.alertError() + monitorResult.alertError() ?: triggerResult.alertError(), + executionId = executionId, + workflorwRunContext = workflowRunContext ) alerts.add(alert) } @@ -291,46 +447,121 @@ object DocumentLevelMonitorRunner : MonitorRunner() { alert.copy(actionExecutionResults = actionExecutionResults) } - monitorCtx.retryPolicy?.let { monitorCtx.alertService!!.saveAlerts(updatedAlerts, it) } + monitorCtx.retryPolicy?.let { + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, + updatedAlerts, + it, + routingId = monitor.id + ) + } } return triggerResult } + /** + * 1. Bulk index all findings based on shouldCreateFinding flag + * 2. invoke publishFinding() to kickstart auto-correlations + * 3. Returns a list of pairs for finding id to doc id + */ private suspend fun createFindings( monitor: Monitor, monitorCtx: MonitorRunnerExecutionContext, - docLevelQueries: List, - matchingDocId: String, - shouldCreateFinding: Boolean - ): String { - // Before the "|" is the doc id and after the "|" is the index - val docIndex = matchingDocId.split("|") + docsToQueries: MutableMap>, + idQueryMap: Map, + shouldCreateFinding: Boolean, + workflowExecutionId: String? = null, + ): List> { - val finding = Finding( - id = UUID.randomUUID().toString(), - relatedDocIds = listOf(docIndex[0]), - monitorId = monitor.id, - monitorName = monitor.name, - index = docIndex[1], - docLevelQueries = docLevelQueries, - timestamp = Instant.now() - ) + val findingDocPairs = mutableListOf>() + val findings = mutableListOf() + val indexRequests = mutableListOf() + + docsToQueries.forEach { + val triggeredQueries = it.value.map { queryId -> idQueryMap[queryId]!! } - val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() - logger.debug("Findings: $findingStr") + // Before the "|" is the doc id and after the "|" is the index + val docIndex = it.key.split("|") + + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = listOf(docIndex[0]), + correlatedDocIds = listOf(docIndex[0]), + monitorId = monitor.id, + monitorName = monitor.name, + index = docIndex[1], + docLevelQueries = triggeredQueries, + timestamp = Instant.now(), + executionId = workflowExecutionId + ) + findingDocPairs.add(Pair(finding.id, it.key)) + findings.add(finding) + + val findingStr = + finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS) + .string() + logger.debug("Findings: $findingStr") + + if (shouldCreateFinding) { + indexRequests += IndexRequest(monitor.dataSources.findingsIndex) + .source(findingStr, XContentType.JSON) + .id(finding.id) + .opType(DocWriteRequest.OpType.CREATE) + } + } - if (shouldCreateFinding) { - val indexRequest = IndexRequest(FINDING_HISTORY_WRITE_INDEX) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(findingStr, XContentType.JSON) - .id(finding.id) - .routing(finding.id) + if (indexRequests.isNotEmpty()) { + bulkIndexFindings(monitor, monitorCtx, indexRequests) + } - monitorCtx.client!!.suspendUntil { - monitorCtx.client!!.index(indexRequest, it) + try { + findings.forEach { finding -> + publishFinding(monitor, monitorCtx, finding) + } + } catch (e: Exception) { + // suppress exception + logger.error("Optional finding callback failed", e) + } + return findingDocPairs + } + + private suspend fun bulkIndexFindings( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + indexRequests: List + ) { + indexRequests.chunked(monitorCtx.findingsIndexBatchSize).forEach { batch -> + val bulkResponse: BulkResponse = monitorCtx.client!!.suspendUntil { + bulk(BulkRequest().add(batch), it) + } + if (bulkResponse.hasFailures()) { + bulkResponse.items.forEach { item -> + if (item.isFailed) { + logger.error("Failed indexing the finding ${item.id} of monitor [${monitor.id}]") + } + } + } else { + logger.debug("[${bulkResponse.items.size}] All findings successfully indexed.") } } - return finding.id + monitorCtx.client!!.execute(RefreshAction.INSTANCE, RefreshRequest(monitor.dataSources.findingsIndex)) + } + + private fun publishFinding( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + finding: Finding + ) { + val publishFindingsRequest = PublishFindingsRequest(monitor.id, finding) + AlertingPluginInterface.publishFinding( + monitorCtx.client!! as NodeClient, + publishFindingsRequest, + object : ActionListener { + override fun onResponse(response: SubscribeFindingsResponse) {} + + override fun onFailure(e: Exception) {} + } + ) } private suspend fun updateLastRunContext( @@ -357,42 +588,22 @@ object DocumentLevelMonitorRunner : MonitorRunner() { throw IOException("Invalid input with document-level-monitor.") } - val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput - if (docLevelMonitorInput.indices.size > 1) { - throw IOException("Only one index is supported with document-level-monitor.") - } - } - - suspend fun createRunContext( - clusterService: ClusterService, - client: Client, - index: String, - createdRecently: Boolean = false - ): HashMap { - val lastRunContext = HashMap() - lastRunContext["index"] = index - val count = getShardsCount(clusterService, index) - lastRunContext["shards_count"] = count - - for (i: Int in 0 until count) { - val shard = i.toString() - val maxSeqNo: Long = if (createdRecently) -1L else getMaxSeqNo(client, index, shard) - lastRunContext[shard] = maxSeqNo + if ((monitor.inputs[0] as DocLevelMonitorInput).indices.isEmpty()) { + throw IllegalArgumentException("DocLevelMonitorInput has no indices") } - return lastRunContext } // Checks if the index was created from the last execution run or when the monitor was last updated to ensure that // new index is monitored from the beginning of that index private fun createdRecently( monitor: Monitor, - index: String, periodStart: Instant, periodEnd: Instant, - getIndexResponse: GetIndexResponse + indexMetadata: IndexMetadata ): Boolean { val lastExecutionTime = if (periodStart == periodEnd) monitor.lastUpdateTime else periodStart - return getIndexResponse.settings.get(index).getAsLong("index.creation_date", 0L) > lastExecutionTime.toEpochMilli() + val indexCreationDate = indexMetadata.settings.get("index.creation_date")?.toLong() ?: 0L + return indexCreationDate > lastExecutionTime.toEpochMilli() } /** @@ -430,7 +641,10 @@ object DocumentLevelMonitorRunner : MonitorRunner() { monitor: Monitor, monitorCtx: MonitorRunnerExecutionContext, docExecutionCtx: DocumentExecutionContext, - index: String + index: String, + concreteIndex: String, + conflictingFields: List, + docIds: List? = null ): List> { val count: Int = docExecutionCtx.updatedLastRunContext["shards_count"] as Int val matchingDocs = mutableListOf>() @@ -442,18 +656,19 @@ object DocumentLevelMonitorRunner : MonitorRunner() { val hits: SearchHits = searchShard( monitorCtx, - index, + concreteIndex, shard, prevSeqNo, maxSeqNo, - null + null, + docIds ) if (hits.hits.isNotEmpty()) { - matchingDocs.addAll(getAllDocs(hits, index, monitor.id)) + matchingDocs.addAll(getAllDocs(hits, index, concreteIndex, monitor.id, conflictingFields)) } } catch (e: Exception) { - logger.warn("Failed to run for shard $shard. Error: ${e.message}") + logger.error("Failed to run for shard $shard. Error: ${e.message}") } } return matchingDocs @@ -465,7 +680,8 @@ object DocumentLevelMonitorRunner : MonitorRunner() { shard: String, prevSeqNo: Long?, maxSeqNo: Long, - query: String? + query: String?, + docIds: List? = null ): SearchHits { if (prevSeqNo?.equals(maxSeqNo) == true && maxSeqNo != 0L) { return SearchHits.empty() @@ -477,6 +693,10 @@ object DocumentLevelMonitorRunner : MonitorRunner() { boolQueryBuilder.must(QueryBuilders.queryStringQuery(query)) } + if (!docIds.isNullOrEmpty()) { + boolQueryBuilder.filter(QueryBuilders.termsQuery("_id", docIds)) + } + val request: SearchRequest = SearchRequest() .indices(index) .preference("_shards:$shard") @@ -486,6 +706,7 @@ object DocumentLevelMonitorRunner : MonitorRunner() { .query(boolQueryBuilder) .size(10000) // fixme: make this configurable. ) + .preference(Preference.PRIMARY_FIRST.type()) val response: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(request, it) } if (response.status() !== RestStatus.OK) { throw IOException("Failed to search shard: $shard") @@ -497,44 +718,125 @@ object DocumentLevelMonitorRunner : MonitorRunner() { monitorCtx: MonitorRunnerExecutionContext, docs: List, monitor: Monitor, - index: String + monitorMetadata: MonitorMetadata, + index: String, + concreteIndex: String ): SearchHits { - val boolQueryBuilder = BoolQueryBuilder().filter(QueryBuilders.matchQuery("index", index)) + val boolQueryBuilder = BoolQueryBuilder().must(QueryBuilders.matchQuery("index", index).operator(Operator.AND)) val percolateQueryBuilder = PercolateQueryBuilderExt("query", docs, XContentType.JSON) if (monitor.id.isNotEmpty()) { - boolQueryBuilder.filter(QueryBuilders.matchQuery("monitor_id", monitor.id)) + boolQueryBuilder.must(QueryBuilders.matchQuery("monitor_id", monitor.id).operator(Operator.AND)) } boolQueryBuilder.filter(percolateQueryBuilder) - val searchRequest = SearchRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + val queryIndex = monitorMetadata.sourceToQueryIndexMapping[index + monitor.id] + if (queryIndex == null) { + val message = "Failed to resolve concrete queryIndex from sourceIndex during monitor execution!" + + " sourceIndex:$concreteIndex queryIndex:${monitor.dataSources.queryIndex}" + logger.error(message) + throw AlertingException.wrap( + OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) + ) + } + val searchRequest = SearchRequest(queryIndex).preference(Preference.PRIMARY_FIRST.type()) val searchSourceBuilder = SearchSourceBuilder() searchSourceBuilder.query(boolQueryBuilder) searchRequest.source(searchSourceBuilder) - val response: SearchResponse = monitorCtx.client!!.suspendUntil { - monitorCtx.client!!.execute(SearchAction.INSTANCE, searchRequest, it) + var response: SearchResponse + try { + response = monitorCtx.client!!.suspendUntil { + monitorCtx.client!!.execute(SearchAction.INSTANCE, searchRequest, it) + } + } catch (e: Exception) { + throw IllegalStateException( + "Failed to run percolate search for sourceIndex [$index] and queryIndex [$queryIndex] for ${docs.size} document(s)", e + ) } if (response.status() !== RestStatus.OK) { - throw IOException("Failed to search percolate index: ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + throw IOException("Failed to search percolate index: $queryIndex") } return response.hits } - private fun getAllDocs(hits: SearchHits, index: String, monitorId: String): List> { + private fun getAllDocs( + hits: SearchHits, + index: String, + concreteIndex: String, + monitorId: String, + conflictingFields: List + ): List> { return hits.map { hit -> val sourceMap = hit.sourceAsMap - var xContentBuilder = XContentFactory.jsonBuilder().startObject() - sourceMap.forEach { (k, v) -> - xContentBuilder = xContentBuilder.field("${k}_${index}_$monitorId", v) - } - xContentBuilder = xContentBuilder.endObject() + transformDocumentFieldNames( + sourceMap, + conflictingFields, + "_${index}_$monitorId", + "_${concreteIndex}_$monitorId", + "" + ) + + var xContentBuilder = XContentFactory.jsonBuilder().map(sourceMap) val sourceRef = BytesReference.bytes(xContentBuilder) + logger.debug("Document [${hit.id}] payload after transform: ", sourceRef.utf8ToString()) + Pair(hit.id, sourceRef) } } + + /** + * Traverses document fields in leaves recursively and appends [fieldNameSuffixIndex] to field names with same names + * but different mappings & [fieldNameSuffixPattern] to field names which have unique names. + * + * Example for index name is my_log_index and Monitor ID is TReewWdsf2gdJFV: + * { { + * "a": { "a": { + * "b": 1234 ----> "b_my_log_index_TReewWdsf2gdJFV": 1234 + * } } + * } + * + * @param jsonAsMap Input JSON (as Map) + * @param fieldNameSuffix Field suffix which is appended to existing field name + */ + private fun transformDocumentFieldNames( + jsonAsMap: MutableMap, + conflictingFields: List, + fieldNameSuffixPattern: String, + fieldNameSuffixIndex: String, + fieldNamePrefix: String + ) { + val tempMap = mutableMapOf() + val it: MutableIterator> = jsonAsMap.entries.iterator() + while (it.hasNext()) { + val entry = it.next() + if (entry.value is Map<*, *>) { + transformDocumentFieldNames( + entry.value as MutableMap, + conflictingFields, + fieldNameSuffixPattern, + fieldNameSuffixIndex, + if (fieldNamePrefix == "") entry.key else "$fieldNamePrefix.${entry.key}" + ) + } else if (!entry.key.endsWith(fieldNameSuffixPattern) && !entry.key.endsWith(fieldNameSuffixIndex)) { + var alreadyReplaced = false + conflictingFields.forEach { conflictingField -> + if (conflictingField == "$fieldNamePrefix.${entry.key}" || (fieldNamePrefix == "" && conflictingField == entry.key)) { + tempMap["${entry.key}$fieldNameSuffixIndex"] = entry.value + it.remove() + alreadyReplaced = true + } + } + if (!alreadyReplaced) { + tempMap["${entry.key}$fieldNameSuffixPattern"] = entry.value + it.remove() + } + } + } + jsonAsMap.putAll(tempMap) + } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/InputService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/InputService.kt index a5916a73c..e0e06606c 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/InputService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/InputService.kt @@ -5,27 +5,43 @@ package org.opensearch.alerting +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.core.model.ClusterMetricsInput -import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.TriggerAfterKey import org.opensearch.alerting.opensearchapi.convertToMap import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AggregationQueryRewriter +import org.opensearch.alerting.util.CrossClusterMonitorUtils import org.opensearch.alerting.util.addUserBackendRolesFilter -import org.opensearch.alerting.util.executeTransportAction -import org.opensearch.alerting.util.toMap +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.executeTransportAction +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.toMap +import org.opensearch.alerting.util.getRoleFilterEnabled +import org.opensearch.alerting.util.use +import org.opensearch.alerting.workflow.WorkflowRunContext import org.opensearch.client.Client +import org.opensearch.cluster.routing.Preference +import org.opensearch.cluster.service.ClusterService import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput -import org.opensearch.common.io.stream.NamedWriteableRegistry +import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.query.TermsQueryBuilder import org.opensearch.script.Script import org.opensearch.script.ScriptService import org.opensearch.script.ScriptType @@ -33,12 +49,16 @@ import org.opensearch.script.TemplateScript import org.opensearch.search.builder.SearchSourceBuilder import java.time.Instant +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + /** Service that handles the collection of input results for Monitor executions */ class InputService( val client: Client, val scriptService: ScriptService, val namedWriteableRegistry: NamedWriteableRegistry, - val xContentRegistry: NamedXContentRegistry + val xContentRegistry: NamedXContentRegistry, + val clusterService: ClusterService, + val settings: Settings ) { private val logger = LogManager.getLogger(InputService::class.java) @@ -47,12 +67,16 @@ class InputService( monitor: Monitor, periodStart: Instant, periodEnd: Instant, - prevResult: InputRunResults? = null + prevResult: InputRunResults? = null, + workflowRunContext: WorkflowRunContext? = null ): InputRunResults { return try { val results = mutableListOf>() val aggTriggerAfterKey: MutableMap = mutableMapOf() + // If monitor execution is triggered from a workflow + val matchingDocIdsPerIndex = workflowRunContext?.matchingDocIdsPerIndex + // TODO: If/when multiple input queries are supported for Bucket-Level Monitor execution, aggTriggerAfterKeys will // need to be updated to account for it monitor.inputs.forEach { input -> @@ -63,9 +87,17 @@ class InputService( "period_start" to periodStart.toEpochMilli(), "period_end" to periodEnd.toEpochMilli() ) + // Deep copying query before passing it to rewriteQuery since otherwise, the monitor.input is modified directly // which causes a strange bug where the rewritten query persists on the Monitor across executions val rewrittenQuery = AggregationQueryRewriter.rewriteQuery(deepCopyQuery(input.query), prevResult, monitor.triggers) + + // Rewrite query to consider the doc ids per given index + if (chainedFindingExist(matchingDocIdsPerIndex) && rewrittenQuery.query() != null) { + val updatedSourceQuery = updateInputQueryWithFindingDocIds(rewrittenQuery.query(), matchingDocIdsPerIndex!!) + rewrittenQuery.query(updatedSourceQuery) + } + val searchSource = scriptService.compile( Script( ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, @@ -76,7 +108,10 @@ class InputService( .newInstance(searchParams) .execute() - val searchRequest = SearchRequest().indices(*input.indices.toTypedArray()) + val indexes = CrossClusterMonitorUtils.parseIndexesForRemoteSearch(input.indices, clusterService) + val searchRequest = SearchRequest() + .indices(*indexes.toTypedArray()) + .preference(Preference.PRIMARY_FIRST.type()) XContentType.JSON.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource).use { searchRequest.source(SearchSourceBuilder.fromXContent(it)) } @@ -89,9 +124,36 @@ class InputService( results += searchResponse.convertToMap() } is ClusterMetricsInput -> { - logger.debug("ClusterMetricsInput clusterMetricType: ${input.clusterMetricType}") - val response = executeTransportAction(input, client) - results += response.toMap() + logger.debug("ClusterMetricsInput clusterMetricType: {}", input.clusterMetricType) + + val remoteMonitoringEnabled = clusterService.clusterSettings.get(AlertingSettings.REMOTE_MONITORING_ENABLED) + logger.debug("Remote monitoring enabled: {}", remoteMonitoringEnabled) + + val responseMap = mutableMapOf>() + if (remoteMonitoringEnabled && input.clusters.isNotEmpty()) { + client.threadPool().threadContext.stashContext().use { + scope.launch { + input.clusters.forEach { cluster -> + val targetClient = CrossClusterMonitorUtils.getClientForCluster(cluster, client, clusterService) + val response = executeTransportAction(input, targetClient) + // Not all supported API reference the cluster name in their response. + // Mapping each response to the cluster name before adding to results. + // Not adding this same logic for local-only monitors to avoid breaking existing monitors. + responseMap[cluster] = response.toMap() + } + } + } + val inputTimeout = clusterService.clusterSettings.get(AlertingSettings.INPUT_TIMEOUT) + val startTime = Instant.now().toEpochMilli() + while ( + (Instant.now().toEpochMilli() - startTime >= inputTimeout.millis) || + (responseMap.size < input.clusters.size) + ) { /* Wait for responses */ } + results += responseMap + } else { + val response = executeTransportAction(input, client) + results += response.toMap() + } } else -> { throw IllegalArgumentException("Unsupported input type: ${input.name()}.") @@ -105,6 +167,35 @@ class InputService( } } + /** + * Extends the given query builder with query that filters the given indices with the given doc ids per index + * Used whenever we want to select the documents that were found in chained delegate execution of the current workflow run + * + * @param query Original bucket monitor query + * @param matchingDocIdsPerIndex Map of finding doc ids grouped by index + */ + private fun updateInputQueryWithFindingDocIds( + query: QueryBuilder, + matchingDocIdsPerIndex: Map>, + ): QueryBuilder { + val queryBuilder = QueryBuilders.boolQuery().must(query) + val shouldQuery = QueryBuilders.boolQuery() + + matchingDocIdsPerIndex.forEach { entry -> + shouldQuery + .should() + .add( + BoolQueryBuilder() + .must(MatchQueryBuilder("_index", entry.key)) + .must(TermsQueryBuilder("_id", entry.value)) + ) + } + return queryBuilder.must(shouldQuery) + } + + private fun chainedFindingExist(indexToDocIds: Map>?) = + !indexToDocIds.isNullOrEmpty() + private fun deepCopyQuery(query: SearchSourceBuilder): SearchSourceBuilder { val out = BytesStreamOutput() query.writeTo(out) @@ -139,20 +230,15 @@ class InputService( .newInstance(searchParams) .execute() - val searchRequest = SearchRequest().indices(*input.indices.toTypedArray()) + val searchRequest = SearchRequest() + .indices(*input.indices.toTypedArray()) + .preference(Preference.PRIMARY_FIRST.type()) XContentType.JSON.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource).use { searchRequest.source(SearchSourceBuilder.fromXContent(it)) } // Add user role filter for AD result client.threadPool().threadContext.stashContext().use { - // Currently we have no way to verify if user has AD read permission or not. So we always add user - // role filter here no matter AD backend role filter enabled or not. If we don't add user role filter - // when AD backend filter disabled, user can run monitor on any detector and get anomaly data even - // they have no AD read permission. So if domain disabled AD backend role filter, monitor runner - // still can't get AD result with different user backend role, even the monitor user has permission - // to read AD result. This is a short term solution to trade off between user experience and security. - // // Possible long term solution: // 1.Use secure rest client to send request to AD search result API. If no permission exception, // that mean user has read access on AD result. Then don't need to add user role filter when query @@ -161,7 +247,9 @@ class InputService( // Monitor runner will send transport request to check permission first. If security plugin response // is yes, user has permission to query AD result. If AD role filter enabled, we will add user role // filter to protect data at user role level; otherwise, user can query any AD result. - addUserBackendRolesFilter(monitor.user, searchRequest.source()) + if (getRoleFilterEnabled(clusterService, settings, "plugins.anomaly_detection.filter_by_backend_roles")) { + addUserBackendRolesFilter(monitor.user, searchRequest.source()) + } val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } results += searchResponse.convertToMap() } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorMetadataService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorMetadataService.kt new file mode 100644 index 000000000..8c7e28734 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorMetadataService.kt @@ -0,0 +1,289 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchSecurityException +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.DocWriteResponse +import org.opensearch.action.admin.indices.get.GetIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.action.admin.indices.stats.IndicesStatsAction +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.seqno.SequenceNumbers +import org.opensearch.transport.RemoteTransportException + +private val log = LogManager.getLogger(MonitorMetadataService::class.java) + +object MonitorMetadataService : + CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("MonitorMetadataService")) { + + private lateinit var client: Client + private lateinit var xContentRegistry: NamedXContentRegistry + private lateinit var clusterService: ClusterService + private lateinit var settings: Settings + + @Volatile + private lateinit var indexTimeout: TimeValue + + fun initialize( + client: Client, + clusterService: ClusterService, + xContentRegistry: NamedXContentRegistry, + settings: Settings, + ) { + this.clusterService = clusterService + this.client = client + this.xContentRegistry = xContentRegistry + this.settings = settings + this.indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) + this.clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.INDEX_TIMEOUT) { indexTimeout = it } + } + + @Suppress("ComplexMethod", "ReturnCount") + suspend fun upsertMetadata(metadata: MonitorMetadata, updating: Boolean): MonitorMetadata { + try { + if (clusterService.state().routingTable.hasIndex(ScheduledJob.SCHEDULED_JOBS_INDEX)) { + val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source( + metadata.toXContent( + XContentFactory.jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + ) + ) + .id(metadata.id) + .routing(metadata.monitorId) + .setIfSeqNo(metadata.seqNo) + .setIfPrimaryTerm(metadata.primaryTerm) + .timeout(indexTimeout) + + if (updating) { + indexRequest.id(metadata.id).setIfSeqNo(metadata.seqNo).setIfPrimaryTerm(metadata.primaryTerm) + } else { + indexRequest.opType(DocWriteRequest.OpType.CREATE) + } + val response: IndexResponse = client.suspendUntil { index(indexRequest, it) } + when (response.result) { + DocWriteResponse.Result.DELETED, DocWriteResponse.Result.NOOP, DocWriteResponse.Result.NOT_FOUND, null -> { + val failureReason = + "The upsert metadata call failed with a ${response.result?.lowercase} result" + log.error(failureReason) + throw AlertingException( + failureReason, + RestStatus.INTERNAL_SERVER_ERROR, + IllegalStateException(failureReason) + ) + } + + DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED -> { + log.debug("Successfully upserted MonitorMetadata:${metadata.id} ") + } + } + return metadata.copy( + seqNo = response.seqNo, + primaryTerm = response.primaryTerm + ) + } else { + val failureReason = "Job index ${ScheduledJob.SCHEDULED_JOBS_INDEX} does not exist to update monitor metadata" + throw OpenSearchStatusException(failureReason, RestStatus.INTERNAL_SERVER_ERROR) + } + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + /** + * Document monitors are keeping the context of the last run. + * Since one monitor can be part of multiple workflows we need to be sure that execution of the current workflow + * doesn't interfere with the other workflows that are dependent on the given monitor + */ + suspend fun getOrCreateMetadata( + monitor: Monitor, + createWithRunContext: Boolean = true, + skipIndex: Boolean = false, + workflowMetadataId: String? = null + ): Pair { + try { + val created = true + val metadata = getMetadata(monitor, workflowMetadataId) + return if (metadata != null) { + metadata to !created + } else { + val newMetadata = createNewMetadata(monitor, createWithRunContext = createWithRunContext, workflowMetadataId) + if (skipIndex) { + newMetadata to created + } else { + upsertMetadata(newMetadata, updating = false) to created + } + } + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + suspend fun getMetadata(monitor: Monitor, workflowMetadataId: String? = null): MonitorMetadata? { + try { + val metadataId = MonitorMetadata.getId(monitor, workflowMetadataId) + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, metadataId).routing(monitor.id) + + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + return if (getResponse.isExists) { + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + MonitorMetadata.parse(xcp, getResponse.id, getResponse.seqNo, getResponse.primaryTerm) + } else { + null + } + } catch (e: Exception) { + if (e.message?.contains("no such index") == true) { + return null + } else { + throw AlertingException.wrap(e) + } + } + } + + suspend fun recreateRunContext(metadata: MonitorMetadata, monitor: Monitor): MonitorMetadata { + try { + val monitorIndex = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) + (monitor.inputs[0] as DocLevelMonitorInput).indices[0] + else null + val runContext = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) + createFullRunContext(monitorIndex, metadata.lastRunContext as MutableMap>) + else null + return if (runContext != null) { + metadata.copy( + lastRunContext = runContext + ) + } else { + metadata + } + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + private suspend fun createNewMetadata( + monitor: Monitor, + createWithRunContext: Boolean, + workflowMetadataId: String? = null, + ): MonitorMetadata { + val monitorIndex = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) + (monitor.inputs[0] as DocLevelMonitorInput).indices[0] + else null + val runContext = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR && createWithRunContext) + createFullRunContext(monitorIndex) + else emptyMap() + return MonitorMetadata( + id = MonitorMetadata.getId(monitor, workflowMetadataId), + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + monitorId = monitor.id, + lastActionExecutionTimes = emptyList(), + lastRunContext = runContext, + sourceToQueryIndexMapping = mutableMapOf() + ) + } + + suspend fun createFullRunContext( + index: String?, + existingRunContext: MutableMap>? = null, + ): MutableMap> { + val lastRunContext = existingRunContext?.toMutableMap() ?: mutableMapOf() + try { + if (index == null) return mutableMapOf() + + val indices = mutableListOf() + if (IndexUtils.isAlias(index, clusterService.state()) || + IndexUtils.isDataStream(index, clusterService.state()) + ) { + IndexUtils.getWriteIndex(index, clusterService.state())?.let { indices.add(it) } + } else { + val getIndexRequest = GetIndexRequest().indices(index) + val getIndexResponse: GetIndexResponse = client.suspendUntil { + client.admin().indices().getIndex(getIndexRequest, it) + } + indices.addAll(getIndexResponse.indices()) + } + + indices.forEach { indexName -> + if (!lastRunContext.containsKey(indexName)) { + lastRunContext[indexName] = createRunContextForIndex(indexName) + } + } + } catch (e: RemoteTransportException) { + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + throw AlertingException("Failed fetching index stats", RestStatus.INTERNAL_SERVER_ERROR, unwrappedException) + } catch (e: OpenSearchSecurityException) { + throw AlertingException( + "Failed fetching index stats - missing required index permissions: ${e.localizedMessage}", + RestStatus.INTERNAL_SERVER_ERROR, + e + ) + } catch (e: Exception) { + throw AlertingException("Failed fetching index stats", RestStatus.INTERNAL_SERVER_ERROR, e) + } + return lastRunContext + } + + suspend fun createRunContextForIndex(index: String, createdRecently: Boolean = false): MutableMap { + val request = IndicesStatsRequest().indices(index).clear() + val response: IndicesStatsResponse = client.suspendUntil { execute(IndicesStatsAction.INSTANCE, request, it) } + if (response.status != RestStatus.OK) { + val errorMessage = "Failed fetching index stats for index:$index" + throw AlertingException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(errorMessage)) + } + val shards = response.shards.filter { it.shardRouting.primary() && it.shardRouting.active() } + val lastRunContext = HashMap() + lastRunContext["index"] = index + val count = shards.size + lastRunContext["shards_count"] = count + + for (shard in shards) { + lastRunContext[shard.shardRouting.id.toString()] = + if (createdRecently) -1L + else shard.seqNoStats?.globalCheckpoint ?: SequenceNumbers.UNASSIGNED_SEQ_NO + } + return lastRunContext + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt index fd89acf8f..f8d5fe686 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunner.kt @@ -10,11 +10,7 @@ import org.opensearch.alerting.action.GetDestinationsAction import org.opensearch.alerting.action.GetDestinationsRequest import org.opensearch.alerting.action.GetDestinationsResponse import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.MonitorMetadata import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.Table -import org.opensearch.alerting.model.action.Action import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.opensearchapi.InjectorContextElement import org.opensearch.alerting.opensearchapi.suspendUntil @@ -23,15 +19,19 @@ import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext import org.opensearch.alerting.script.TriggerExecutionContext import org.opensearch.alerting.util.destinationmigration.NotificationActionConfigs import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils.Companion.getNotificationConfigInfo -import org.opensearch.alerting.util.destinationmigration.createMessageContent import org.opensearch.alerting.util.destinationmigration.getTitle import org.opensearch.alerting.util.destinationmigration.publishLegacyNotification import org.opensearch.alerting.util.destinationmigration.sendNotification import org.opensearch.alerting.util.isAllowed import org.opensearch.alerting.util.isTestAction +import org.opensearch.alerting.util.use +import org.opensearch.alerting.workflow.WorkflowRunContext import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.action.Action import org.opensearch.commons.notifications.model.NotificationConfigInfo +import org.opensearch.core.common.Strings import java.time.Instant abstract class MonitorRunner { @@ -41,7 +41,9 @@ abstract class MonitorRunner { monitorCtx: MonitorRunnerExecutionContext, periodStart: Instant, periodEnd: Instant, - dryRun: Boolean + dryRun: Boolean, + workflowRunContext: WorkflowRunContext? = null, + executionId: String ): MonitorRunResult<*> suspend fun runAction( @@ -57,23 +59,31 @@ abstract class MonitorRunner { } val actionOutput = mutableMapOf() actionOutput[Action.SUBJECT] = if (action.subjectTemplate != null) - MonitorRunnerService.compileTemplate(action.subjectTemplate, ctx) + MonitorRunnerService.compileTemplate(action.subjectTemplate!!, ctx) else "" actionOutput[Action.MESSAGE] = MonitorRunnerService.compileTemplate(action.messageTemplate, ctx) if (Strings.isNullOrEmpty(actionOutput[Action.MESSAGE])) { throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") } if (!dryrun) { - val roles = MonitorRunnerService.getRolesForMonitor(monitor) - withClosableContext( - InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles) - ) { - actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( - action, - monitorCtx, - actionOutput[Action.SUBJECT], - actionOutput[Action.MESSAGE]!! - ) + val client = monitorCtx.client + client!!.threadPool().threadContext.stashContext().use { + withClosableContext( + InjectorContextElement( + monitor.id, + monitorCtx.settings!!, + monitorCtx.threadPool!!.threadContext, + monitor.user?.roles, + monitor.user + ) + ) { + actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( + action, + monitorCtx, + actionOutput[Action.SUBJECT], + actionOutput[Action.MESSAGE]!! + ) + } } } ActionRunResult(action.id, action.name, actionOutput, false, MonitorRunnerService.currentTime(), null) @@ -90,7 +100,7 @@ abstract class MonitorRunner { ): String { val config = getConfigForNotificationAction(action, monitorCtx) if (config.destination == null && config.channel == null) { - throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.id}]") + throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.destinationId}]") } // Adding a check on TEST_ACTION Destination type here to avoid supporting it as a LegacyBaseMessage type @@ -110,7 +120,7 @@ abstract class MonitorRunner { ?.sendNotification( monitorCtx.client!!, config.channel.getTitle(subject), - config.channel.createMessageContent(subject, message) + message ) ?: actionResponseContent actionResponseContent = config.destination @@ -180,8 +190,4 @@ abstract class MonitorRunner { return NotificationActionConfigs(destination, channel) } - - protected fun createMonitorMetadata(monitorId: String): MonitorMetadata { - return MonitorMetadata("$monitorId-metadata", monitorId, emptyList(), emptyMap()) - } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt index 55624d66e..2c98495de 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerExecutionContext.kt @@ -13,10 +13,11 @@ import org.opensearch.alerting.settings.DestinationSettings import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings import org.opensearch.alerting.util.DocLevelMonitorQueries import org.opensearch.client.Client +import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.script.ScriptService import org.opensearch.threadpool.ThreadPool @@ -25,6 +26,7 @@ data class MonitorRunnerExecutionContext( var clusterService: ClusterService? = null, var client: Client? = null, var xContentRegistry: NamedXContentRegistry? = null, + var indexNameExpressionResolver: IndexNameExpressionResolver? = null, var scriptService: ScriptService? = null, var settings: Settings? = null, var threadPool: ThreadPool? = null, @@ -33,6 +35,7 @@ data class MonitorRunnerExecutionContext( var triggerService: TriggerService? = null, var alertService: AlertService? = null, var docLevelMonitorQueries: DocLevelMonitorQueries? = null, + var workflowService: WorkflowService? = null, @Volatile var retryPolicy: BackoffPolicy? = null, @Volatile var moveAlertsRetryPolicy: BackoffPolicy? = null, @@ -44,5 +47,6 @@ data class MonitorRunnerExecutionContext( @Volatile var destinationContextFactory: DestinationContextFactory? = null, @Volatile var maxActionableAlertCount: Long = AlertingSettings.DEFAULT_MAX_ACTIONABLE_ALERT_COUNT, - @Volatile var indexTimeout: TimeValue? = null + @Volatile var indexTimeout: TimeValue? = null, + @Volatile var findingsIndexBatchSize: Int = AlertingSettings.DEFAULT_FINDINGS_INDEXING_BATCH_SIZE ) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt index 7b2ec5116..103da2230 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/MonitorRunnerService.kt @@ -12,19 +12,20 @@ import kotlinx.coroutines.SupervisorJob import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.alerts.moveAlerts +import org.opensearch.alerting.alerts.AlertMover.Companion.moveAlerts import org.opensearch.alerting.core.JobRunner -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.core.ScheduledJobIndices import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.action.Action +import org.opensearch.alerting.model.WorkflowRunResult import org.opensearch.alerting.model.destination.DestinationContextFactory import org.opensearch.alerting.opensearchapi.retry import org.opensearch.alerting.script.TriggerExecutionContext +import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_COUNT import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_MILLIS +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDINGS_INDEXING_BATCH_SIZE import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTIONABLE_ALERT_COUNT import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_COUNT @@ -33,18 +34,30 @@ import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST import org.opensearch.alerting.settings.DestinationSettings.Companion.HOST_DENY_LIST import org.opensearch.alerting.settings.DestinationSettings.Companion.loadDestinationSettings import org.opensearch.alerting.util.DocLevelMonitorQueries -import org.opensearch.alerting.util.isBucketLevelMonitor +import org.opensearch.alerting.util.IndexUtils import org.opensearch.alerting.util.isDocLevelMonitor +import org.opensearch.alerting.workflow.CompositeWorkflowRunner import org.opensearch.client.Client +import org.opensearch.cluster.metadata.IndexNameExpressionResolver import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.component.AbstractLifecycleComponent +import org.opensearch.common.lifecycle.AbstractLifecycleComponent import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.alerting.util.isBucketLevelMonitor +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.script.Script import org.opensearch.script.ScriptService import org.opensearch.script.TemplateScript import org.opensearch.threadpool.ThreadPool import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneOffset +import java.util.UUID import kotlin.coroutines.CoroutineContext object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleComponent() { @@ -52,7 +65,6 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon private val logger = LogManager.getLogger(javaClass) var monitorCtx: MonitorRunnerExecutionContext = MonitorRunnerExecutionContext() - private lateinit var runnerSupervisor: Job override val coroutineContext: CoroutineContext get() = Dispatchers.Default + runnerSupervisor @@ -72,6 +84,11 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon return this } + fun registerindexNameExpressionResolver(indexNameExpressionResolver: IndexNameExpressionResolver): MonitorRunnerService { + this.monitorCtx.indexNameExpressionResolver = indexNameExpressionResolver + return this + } + fun registerScriptService(scriptService: ScriptService): MonitorRunnerService { this.monitorCtx.scriptService = scriptService return this @@ -112,6 +129,11 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon return this } + fun registerWorkflowService(workflowService: WorkflowService): MonitorRunnerService { + this.monitorCtx.workflowService = workflowService + return this + } + // Must be called after registerClusterService and registerSettings in AlertingPlugin fun registerConsumers(): MonitorRunnerService { monitorCtx.retryPolicy = BackoffPolicy.constantBackoff( @@ -127,8 +149,10 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon MOVE_ALERTS_BACKOFF_MILLIS.get(monitorCtx.settings), MOVE_ALERTS_BACKOFF_COUNT.get(monitorCtx.settings) ) - monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(MOVE_ALERTS_BACKOFF_MILLIS, MOVE_ALERTS_BACKOFF_COUNT) { - millis, count -> + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer( + MOVE_ALERTS_BACKOFF_MILLIS, + MOVE_ALERTS_BACKOFF_COUNT + ) { millis, count -> monitorCtx.moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(millis, count) } @@ -147,6 +171,11 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon monitorCtx.indexTimeout = INDEX_TIMEOUT.get(monitorCtx.settings) + monitorCtx.findingsIndexBatchSize = FINDINGS_INDEXING_BATCH_SIZE.get(monitorCtx.settings) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.FINDINGS_INDEXING_BATCH_SIZE) { + monitorCtx.findingsIndexBatchSize = it + } + return this } @@ -174,28 +203,45 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon runnerSupervisor.cancel() } - override fun doClose() { } + override fun doClose() {} override fun postIndex(job: ScheduledJob) { - if (job !is Monitor) { + if (job is Monitor) { + launch { + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + if (monitorCtx.alertIndices!!.isAlertInitialized(job.dataSources)) { + moveAlerts(monitorCtx.client!!, job.id, job) + } + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for monitor [${job.id}].", e) + } + } + } else if (job is Workflow) { + launch { + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + moveAlerts(monitorCtx.client!!, job.id, job, monitorCtx) + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for monitor [${job.id}].", e) + } + } + } else { throw IllegalArgumentException("Invalid job type") } + } + override fun postDelete(jobId: String) { launch { try { monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { - if (monitorCtx.alertIndices!!.isAlertInitialized()) { - moveAlerts(monitorCtx.client!!, job.id, job) - } + moveAlerts(monitorCtx.client!!, jobId, null, monitorCtx) } } catch (e: Exception) { - logger.error("Failed to move active alerts for monitor [${job.id}].", e) + logger.error("Failed to move active alerts for workflow [$jobId]. Could be a monitor", e) } - } - } - - override fun postDelete(jobId: String) { - launch { try { monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { if (monitorCtx.alertIndices!!.isAlertInitialized()) { @@ -209,22 +255,61 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon } override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { - if (job !is Monitor) { - throw IllegalArgumentException("Invalid job type") - } - launch { - runJob(job, periodStart, periodEnd, false) + when (job) { + is Workflow -> { + launch { + runJob(job, periodStart, periodEnd, false) + } + } + is Monitor -> { + launch { + runJob(job, periodStart, periodEnd, false) + } + } + else -> { + throw IllegalArgumentException("Invalid job type") + } } } + suspend fun runJob(workflow: Workflow, periodStart: Instant, periodEnd: Instant, dryrun: Boolean): WorkflowRunResult { + return CompositeWorkflowRunner.runWorkflow(workflow, monitorCtx, periodStart, periodEnd, dryrun) + } + suspend fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant, dryrun: Boolean): MonitorRunResult<*> { + // Updating the scheduled job index at the start of monitor execution runs for when there is an upgrade the the schema mapping + // has not been updated. + if (!IndexUtils.scheduledJobIndexUpdated && monitorCtx.clusterService != null && monitorCtx.client != null) { + IndexUtils.updateIndexMapping( + ScheduledJob.SCHEDULED_JOBS_INDEX, + ScheduledJobIndices.scheduledJobMappings(), monitorCtx.clusterService!!.state(), monitorCtx.client!!.admin().indices(), + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + } + + override fun onFailure(t: Exception) { + logger.error("Failed to update config index schema", t) + } + } + ) + } + + if (job is Workflow) { + logger.info("Executing scheduled workflow - id: ${job.id}, periodStart: $periodStart, periodEnd: $periodEnd, dryrun: $dryrun") + CompositeWorkflowRunner.runWorkflow(workflow = job, monitorCtx, periodStart, periodEnd, dryrun) + } val monitor = job as Monitor + val executionId = "${monitor.id}_${LocalDateTime.now(ZoneOffset.UTC)}_${UUID.randomUUID()}" + logger.info( + "Executing scheduled monitor - id: ${monitor.id}, type: ${monitor.monitorType.name}, periodStart: $periodStart, " + + "periodEnd: $periodEnd, dryrun: $dryrun, executionId: $executionId" + ) val runResult = if (monitor.isBucketLevelMonitor()) { - BucketLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun) + BucketLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) } else if (monitor.isDocLevelMonitor()) { - DocumentLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun) + DocumentLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) } else { - QueryLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun) + QueryLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) } return runResult } @@ -244,7 +329,7 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon // TODO: Remove "AmazonES_all_access" role? monitorCtx.settings!!.getAsList("", listOf("all_access", "AmazonES_all_access")) } else { - monitor.user.roles + monitor.user!!.roles } } @@ -253,13 +338,15 @@ object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleCompon internal fun currentTime() = Instant.ofEpochMilli(monitorCtx.threadPool!!.absoluteTimeInMillis()) internal fun isActionActionable(action: Action, alert: Alert?): Boolean { + if (alert != null && alert.state == Alert.State.AUDIT) + return false if (alert == null || action.throttle == null) { return true } if (action.throttleEnabled) { val result = alert.actionExecutionResults.firstOrNull { r -> r.actionId == action.id } val lastExecutionTime: Instant? = result?.lastExecutionTime - val throttledTimeBound = currentTime().minus(action.throttle.value.toLong(), action.throttle.unit) + val throttledTimeBound = currentTime().minus(action.throttle!!.value.toLong(), action.throttle!!.unit) return (lastExecutionTime == null || lastExecutionTime.isBefore(throttledTimeBound)) } return true diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt index 833807348..a77121069 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/QueryLevelMonitorRunner.kt @@ -6,15 +6,17 @@ package org.opensearch.alerting import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.QueryLevelTriggerRunResult import org.opensearch.alerting.opensearchapi.InjectorContextElement import org.opensearch.alerting.opensearchapi.withClosableContext import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext +import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.isADMonitor +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger import java.time.Instant object QueryLevelMonitorRunner : MonitorRunner() { @@ -25,7 +27,9 @@ object QueryLevelMonitorRunner : MonitorRunner() { monitorCtx: MonitorRunnerExecutionContext, periodStart: Instant, periodEnd: Instant, - dryrun: Boolean + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String ): MonitorRunResult { val roles = MonitorRunnerService.getRolesForMonitor(monitor) logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") @@ -36,9 +40,9 @@ object QueryLevelMonitorRunner : MonitorRunner() { var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) val currentAlerts = try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex() - monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex() - monitorCtx.alertService!!.loadCurrentAlertsForQueryLevelMonitor(monitor) + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) + monitorCtx.alertService!!.loadCurrentAlertsForQueryLevelMonitor(monitor, workflowRunContext) } catch (e: Exception) { // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id @@ -48,7 +52,7 @@ object QueryLevelMonitorRunner : MonitorRunner() { if (!isADMonitor(monitor)) { withClosableContext(InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles)) { monitorResult = monitorResult.copy( - inputResults = monitorCtx.inputService!!.collectInputResults(monitor, periodStart, periodEnd) + inputResults = monitorCtx.inputService!!.collectInputResults(monitor, periodStart, periodEnd, null, workflowRunContext) ) } } else { @@ -62,10 +66,24 @@ object QueryLevelMonitorRunner : MonitorRunner() { for (trigger in monitor.triggers) { val currentAlert = currentAlerts[trigger] val triggerCtx = QueryLevelTriggerExecutionContext(monitor, trigger as QueryLevelTrigger, monitorResult, currentAlert) - val triggerResult = monitorCtx.triggerService!!.runQueryLevelTrigger(monitor, trigger, triggerCtx) + val triggerResult = when (monitor.monitorType) { + Monitor.MonitorType.QUERY_LEVEL_MONITOR -> + monitorCtx.triggerService!!.runQueryLevelTrigger(monitor, trigger, triggerCtx) + Monitor.MonitorType.CLUSTER_METRICS_MONITOR -> { + val remoteMonitoringEnabled = + monitorCtx.clusterService!!.clusterSettings.get(AlertingSettings.REMOTE_MONITORING_ENABLED) + logger.debug("Remote monitoring enabled: {}", remoteMonitoringEnabled) + if (remoteMonitoringEnabled) + monitorCtx.triggerService!!.runClusterMetricsTrigger(monitor, trigger, triggerCtx, monitorCtx.clusterService!!) + else monitorCtx.triggerService!!.runQueryLevelTrigger(monitor, trigger, triggerCtx) + } + else -> + throw IllegalArgumentException("Unsupported monitor type: ${monitor.monitorType.name}.") + } + triggerResults[trigger.id] = triggerResult - if (monitorCtx.triggerService!!.isQueryLevelTriggerActionable(triggerCtx, triggerResult)) { + if (monitorCtx.triggerService!!.isQueryLevelTriggerActionable(triggerCtx, triggerResult, workflowRunContext)) { val actionCtx = triggerCtx.copy(error = monitorResult.error ?: triggerResult.error) for (action in trigger.actions) { triggerResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, monitor, dryrun) @@ -73,15 +91,25 @@ object QueryLevelMonitorRunner : MonitorRunner() { } val updatedAlert = monitorCtx.alertService!!.composeQueryLevelAlert( - triggerCtx, triggerResult, - monitorResult.alertError() ?: triggerResult.alertError() + triggerCtx, + triggerResult, + monitorResult.alertError() ?: triggerResult.alertError(), + executionId, + workflowRunContext ) if (updatedAlert != null) updatedAlerts += updatedAlert } // Don't save alerts if this is a test monitor if (!dryrun && monitor.id != Monitor.NO_ID) { - monitorCtx.retryPolicy?.let { monitorCtx.alertService!!.saveAlerts(updatedAlerts, it) } + monitorCtx.retryPolicy?.let { + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, + updatedAlerts, + it, + routingId = monitor.id + ) + } } return monitorResult.copy(triggerResults = triggerResults) } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt index 282ebda59..21ba32475 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/TriggerService.kt @@ -6,23 +6,33 @@ package org.opensearch.alerting import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.BUCKET_INDICES -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.PARENT_BUCKET_PATH -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.model.AggregationResultBucket -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.BucketLevelTrigger +import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.DocumentLevelTrigger +import org.opensearch.alerting.model.ChainedAlertTriggerRunResult +import org.opensearch.alerting.model.ClusterMetricsTriggerRunResult +import org.opensearch.alerting.model.ClusterMetricsTriggerRunResult.ClusterTriggerResult import org.opensearch.alerting.model.DocumentLevelTriggerRunResult -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.QueryLevelTriggerRunResult import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext import org.opensearch.alerting.script.TriggerScript import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser +import org.opensearch.alerting.util.CrossClusterMonitorUtils import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.cluster.service.ClusterService +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.BUCKET_INDICES +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.PARENT_BUCKET_PATH +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.Workflow import org.opensearch.script.Script import org.opensearch.script.ScriptService import org.opensearch.search.aggregations.Aggregation @@ -36,7 +46,21 @@ class TriggerService(val scriptService: ScriptService) { private val ALWAYS_RUN = Script("return true") private val NEVER_RUN = Script("return false") - fun isQueryLevelTriggerActionable(ctx: QueryLevelTriggerExecutionContext, result: QueryLevelTriggerRunResult): Boolean { + fun isQueryLevelTriggerActionable( + ctx: QueryLevelTriggerExecutionContext, + result: QueryLevelTriggerRunResult, + workflowRunContext: WorkflowRunContext?, + ): Boolean { + if (workflowRunContext?.auditDelegateMonitorAlerts == true) return false + // Suppress actions if the current alert is acknowledged and there are no errors. + val suppress = ctx.alert?.state == Alert.State.ACKNOWLEDGED && result.error == null && ctx.error == null + return result.triggered && !suppress + } + + fun isChainedAlertTriggerActionable( + ctx: ChainedAlertTriggerExecutionContext, + result: ChainedAlertTriggerRunResult, + ): Boolean { // Suppress actions if the current alert is acknowledged and there are no errors. val suppress = ctx.alert?.state == Alert.State.ACKNOWLEDGED && result.error == null && ctx.error == null return result.triggered && !suppress @@ -59,6 +83,52 @@ class TriggerService(val scriptService: ScriptService) { } } + fun runClusterMetricsTrigger( + monitor: Monitor, + trigger: QueryLevelTrigger, + ctx: QueryLevelTriggerExecutionContext, + clusterService: ClusterService + ): ClusterMetricsTriggerRunResult { + var runResult: ClusterMetricsTriggerRunResult? + try { + val inputResults = ctx.results.getOrElse(0) { mapOf() } + var triggered = false + val clusterTriggerResults = mutableListOf() + if (CrossClusterMonitorUtils.isRemoteMonitor(monitor, clusterService)) { + inputResults.forEach { clusterResult -> + // Reducing the inputResults to only include results from 1 cluster at a time + val clusterTriggerCtx = ctx.copy(results = listOf(mapOf(clusterResult.toPair()))) + + val clusterTriggered = scriptService.compile(trigger.condition, TriggerScript.CONTEXT) + .newInstance(trigger.condition.params) + .execute(clusterTriggerCtx) + + if (clusterTriggered) { + triggered = clusterTriggered + clusterTriggerResults.add(ClusterTriggerResult(cluster = clusterResult.key, triggered = clusterTriggered)) + } + } + } else { + triggered = scriptService.compile(trigger.condition, TriggerScript.CONTEXT) + .newInstance(trigger.condition.params) + .execute(ctx) + if (triggered) clusterTriggerResults + .add(ClusterTriggerResult(cluster = clusterService.clusterName.value(), triggered = triggered)) + } + runResult = ClusterMetricsTriggerRunResult( + triggerName = trigger.name, + triggered = triggered, + error = null, + clusterTriggerResults = clusterTriggerResults + ) + } catch (e: Exception) { + logger.info("Error running script for monitor ${monitor.id}, trigger: ${trigger.id}", e) + // if the script fails we need to send an alert so set triggered = true + runResult = ClusterMetricsTriggerRunResult(trigger.name, true, e) + } + return runResult!! + } + // TODO: improve performance and support match all and match any fun runDocLevelTrigger( monitor: Monitor, @@ -85,6 +155,32 @@ class TriggerService(val scriptService: ScriptService) { } } + fun runChainedAlertTrigger( + workflow: Workflow, + trigger: ChainedAlertTrigger, + alertGeneratingMonitors: Set, + monitorIdToAlertIdsMap: Map>, + ): ChainedAlertTriggerRunResult { + val associatedAlertIds = mutableSetOf() + return try { + val parsedTriggerCondition = ChainedAlertExpressionParser(trigger.condition.idOrCode).parse() + val evaluate = parsedTriggerCondition.evaluate(alertGeneratingMonitors) + if (evaluate) { + val monitorIdsInTriggerCondition = parsedTriggerCondition.getMonitorIds(parsedTriggerCondition) + monitorIdsInTriggerCondition.forEach { associatedAlertIds.addAll(monitorIdToAlertIdsMap.getOrDefault(it, emptySet())) } + } + ChainedAlertTriggerRunResult(trigger.name, triggered = evaluate, null, associatedAlertIds = associatedAlertIds) + } catch (e: Exception) { + logger.error("Error running chained alert trigger script for workflow ${workflow.id}, trigger: ${trigger.id}", e) + ChainedAlertTriggerRunResult( + triggerName = trigger.name, + triggered = false, + error = e, + associatedAlertIds = emptySet() + ) + } + } + @Suppress("UNCHECKED_CAST") fun runBucketLevelTrigger( monitor: Monitor, @@ -124,6 +220,8 @@ class TriggerService(val scriptService: ScriptService) { val keyField = Aggregation.CommonFields.KEY.preferredName val keyValuesList = mutableListOf() when { + bucket[keyField] is List<*> && bucket.containsKey(Aggregation.CommonFields.KEY_AS_STRING.preferredName) -> + keyValuesList.add(bucket[Aggregation.CommonFields.KEY_AS_STRING.preferredName] as String) bucket[keyField] is String -> keyValuesList.add(bucket[keyField] as String) // In the case where the key field is an Int bucket[keyField] is Int -> keyValuesList.add(bucket[keyField].toString()) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/WorkflowMetadataService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/WorkflowMetadataService.kt new file mode 100644 index 000000000..9dc4fbcdd --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/WorkflowMetadataService.kt @@ -0,0 +1,174 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchException +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.DocWriteResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneOffset +import java.util.UUID + +object WorkflowMetadataService : + CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("WorkflowMetadataService")) { + private val log = LogManager.getLogger(this::class.java) + + private lateinit var client: Client + private lateinit var xContentRegistry: NamedXContentRegistry + private lateinit var clusterService: ClusterService + private lateinit var settings: Settings + + @Volatile private lateinit var indexTimeout: TimeValue + + fun initialize( + client: Client, + clusterService: ClusterService, + xContentRegistry: NamedXContentRegistry, + settings: Settings + ) { + this.clusterService = clusterService + this.client = client + this.xContentRegistry = xContentRegistry + this.settings = settings + this.indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) + this.clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.INDEX_TIMEOUT) { indexTimeout = it } + } + + @Suppress("ComplexMethod", "ReturnCount") + suspend fun upsertWorkflowMetadata(metadata: WorkflowMetadata, updating: Boolean): WorkflowMetadata { + try { + val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(metadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .id(metadata.id) + .routing(metadata.workflowId) + .timeout(indexTimeout) + + if (updating) { + indexRequest.id(metadata.id) + } else { + indexRequest.opType(DocWriteRequest.OpType.CREATE) + } + val response: IndexResponse = client.suspendUntil { index(indexRequest, it) } + when (response.result) { + DocWriteResponse.Result.DELETED, DocWriteResponse.Result.NOOP, DocWriteResponse.Result.NOT_FOUND, null -> { + val failureReason = "The upsert metadata call failed with a ${response.result?.lowercase} result" + log.error(failureReason) + throw AlertingException(failureReason, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureReason)) + } + DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED -> { + log.debug("Successfully upserted WorkflowMetadata:${metadata.id} ") + } + } + return metadata + } catch (e: Exception) { + // If the update is set to false and id is set conflict exception will be thrown + if (e is OpenSearchException && e.status() == RestStatus.CONFLICT && !updating) { + log.debug( + "Metadata with ${metadata.id} for workflow ${metadata.workflowId} already exist." + + " Instead of creating new, updating existing metadata will be performed" + ) + return upsertWorkflowMetadata(metadata, true) + } + log.error("Error saving metadata", e) + throw AlertingException.wrap(e) + } + } + + suspend fun getOrCreateWorkflowMetadata( + workflow: Workflow, + skipIndex: Boolean = false, + executionId: String + ): Pair { + try { + val created = true + val metadata = getWorkflowMetadata(workflow) + return if (metadata != null) { + metadata to !created + } else { + val newMetadata = createNewWorkflowMetadata(workflow, executionId, skipIndex) + if (skipIndex) { + newMetadata to created + } else { + upsertWorkflowMetadata(newMetadata, updating = false) to created + } + } + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + private suspend fun getWorkflowMetadata(workflow: Workflow): WorkflowMetadata? { + try { + val metadataId = WorkflowMetadata.getId(workflow.id) + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, metadataId).routing(workflow.id) + + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + return if (getResponse.isExists) { + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + WorkflowMetadata.parse(xcp) + } else { + null + } + } catch (e: Exception) { + if (e.message?.contains("no such index") == true) { + return null + } else { + throw AlertingException.wrap(e) + } + } + } + + private fun createNewWorkflowMetadata(workflow: Workflow, executionId: String, isTempWorkflow: Boolean): WorkflowMetadata { + // In the case of temp workflow (ie. workflow is in dry-run) use timestampWithUUID-metadata format + // In the case of regular workflow execution, use the workflowId-metadata format + val id = if (isTempWorkflow) "${LocalDateTime.now(ZoneOffset.UTC)}${UUID.randomUUID()}" else workflow.id + return WorkflowMetadata( + id = WorkflowMetadata.getId(id), + workflowId = workflow.id, + monitorIds = (workflow.inputs[0] as CompositeInput).getMonitorIds(), + latestRunTime = Instant.now(), + latestExecutionId = executionId + ) + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/WorkflowService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/WorkflowService.kt new file mode 100644 index 000000000..04bd64b8d --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/WorkflowService.kt @@ -0,0 +1,144 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchException +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder + +private val log = LogManager.getLogger(WorkflowService::class.java) + +/** + * Contains util methods used in workflow execution + */ +class WorkflowService( + val client: Client, + val xContentRegistry: NamedXContentRegistry, +) { + /** + * Returns finding doc ids per index for the given workflow execution + * Used for pre-filtering the dataset in the case of creating a workflow with chained findings + * + * @param chainedMonitors Monitors that have previously executed + * @param workflowExecutionId Execution id of the current workflow + */ + suspend fun getFindingDocIdsByExecutionId(chainedMonitors: List, workflowExecutionId: String): Map> { + if (chainedMonitors.isEmpty()) + return emptyMap() + val dataSources = chainedMonitors[0].dataSources + try { + val existsResponse: IndicesExistsResponse = client.admin().indices().suspendUntil { + exists(IndicesExistsRequest(dataSources.findingsIndex).local(true), it) + } + if (existsResponse.isExists == false) return emptyMap() + // Search findings index to match id of monitors and workflow execution id + val bqb = QueryBuilders.boolQuery() + .filter( + QueryBuilders.termsQuery( + Finding.MONITOR_ID_FIELD, + chainedMonitors.map { it.id } + ) + ) + .filter(QueryBuilders.termQuery(Finding.EXECUTION_ID_FIELD, workflowExecutionId)) + val searchRequest = SearchRequest() + .source( + SearchSourceBuilder() + .query(bqb) + .version(true) + .seqNoAndPrimaryTerm(true) + ) + .indices(dataSources.findingsIndex) + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + + // Get the findings docs + val findings = mutableListOf() + for (hit in searchResponse.hits) { + val xcp = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val finding = Finding.parse(xcp) + findings.add(finding) + } + // Based on the findings get the document ids + val indexToRelatedDocIdsMap = mutableMapOf>() + for (finding in findings) { + indexToRelatedDocIdsMap.getOrPut(finding.index) { mutableListOf() }.addAll(finding.relatedDocIds) + } + return indexToRelatedDocIdsMap + } catch (t: Exception) { + log.error("Error getting finding doc ids: ${t.message}", t) + throw AlertingException.wrap(t) + } + } + + /** + * Returns the list of monitors for the given ids + * Used in workflow execution in order to figure out the monitor type + * + * @param monitors List of monitor ids + * @param size Expected number of monitors + */ + suspend fun getMonitorsById(monitors: List, size: Int): List { + try { + val bqb = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", monitors)) + + val searchRequest = SearchRequest() + .source( + SearchSourceBuilder() + .query(bqb) + .version(true) + .seqNoAndPrimaryTerm(true) + .size(size) + ) + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + return parseMonitors(searchResponse) + } catch (e: Exception) { + log.error("Error getting monitors: ${e.message}", e) + throw AlertingException.wrap(e) + } + } + + private fun parseMonitors(response: SearchResponse): List { + if (response.isTimedOut) { + log.error("Request for getting monitors timeout") + throw OpenSearchException("Cannot determine that the ${ScheduledJob.SCHEDULED_JOBS_INDEX} index is healthy") + } + val monitors = mutableListOf() + try { + for (hit in response.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, hit.sourceAsString + ).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor + monitors.add(monitor) + } + } + } catch (e: Exception) { + log.error("Error parsing monitors: ${e.message}", e) + throw AlertingException.wrap(e) + } + return monitors + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertAction.kt deleted file mode 100644 index d1d968f71..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class AcknowledgeAlertAction private constructor() : ActionType(NAME, ::AcknowledgeAlertResponse) { - companion object { - val INSTANCE = AcknowledgeAlertAction() - const val NAME = "cluster:admin/opendistro/alerting/alerts/ack" - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertRequest.kt deleted file mode 100644 index 59b1cd027..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertRequest.kt +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.support.WriteRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import java.io.IOException -import java.util.Collections - -class AcknowledgeAlertRequest : ActionRequest { - val monitorId: String - val alertIds: List - val refreshPolicy: WriteRequest.RefreshPolicy - - constructor( - monitorId: String, - alertIds: List, - refreshPolicy: WriteRequest.RefreshPolicy - ) : super() { - this.monitorId = monitorId - this.alertIds = alertIds - this.refreshPolicy = refreshPolicy - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // monitorId - Collections.unmodifiableList(sin.readStringList()), // alertIds - WriteRequest.RefreshPolicy.readFrom(sin) // refreshPolicy - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(monitorId) - out.writeStringCollection(alertIds) - refreshPolicy.writeTo(out) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponse.kt deleted file mode 100644 index ce8c3a2f0..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponse.kt +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionResponse -import org.opensearch.alerting.model.Alert -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import java.io.IOException -import java.util.Collections - -class AcknowledgeAlertResponse : ActionResponse, ToXContentObject { - - val acknowledged: List - val failed: List - val missing: List - - constructor( - acknowledged: List, - failed: List, - missing: List - ) : super() { - this.acknowledged = acknowledged - this.failed = failed - this.missing = missing - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - Collections.unmodifiableList(sin.readList(::Alert)), // acknowledged - Collections.unmodifiableList(sin.readList(::Alert)), // failed - Collections.unmodifiableList(sin.readStringList()) // missing - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeCollection(acknowledged) - out.writeCollection(failed) - out.writeStringCollection(missing) - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - - builder.startObject().startArray("success") - acknowledged.forEach { builder.value(it.id) } - builder.endArray().startArray("failed") - failed.forEach { buildFailedAlertAcknowledgeObject(builder, it) } - missing.forEach { buildMissingAlertAcknowledgeObject(builder, it) } - return builder.endArray().endObject() - } - - private fun buildFailedAlertAcknowledgeObject(builder: XContentBuilder, failedAlert: Alert) { - builder.startObject() - .startObject(failedAlert.id) - val reason = when (failedAlert.state) { - Alert.State.ERROR -> "Alert is in an error state and can not be acknowledged." - Alert.State.COMPLETED -> "Alert has already completed and can not be acknowledged." - Alert.State.ACKNOWLEDGED -> "Alert has already been acknowledged." - else -> "Alert state unknown and can not be acknowledged" - } - builder.field("failed_reason", reason) - .endObject() - .endObject() - } - - private fun buildMissingAlertAcknowledgeObject(builder: XContentBuilder, alertID: String) { - builder.startObject() - .startObject(alertID) - .field("failed_reason", "Alert: $alertID does not exist (it may have already completed).") - .endObject() - .endObject() - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/DeleteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/DeleteMonitorAction.kt deleted file mode 100644 index 402b95f41..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/DeleteMonitorAction.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType -import org.opensearch.action.delete.DeleteResponse - -class DeleteMonitorAction private constructor() : ActionType(NAME, ::DeleteResponse) { - companion object { - val INSTANCE = DeleteMonitorAction() - const val NAME = "cluster:admin/opendistro/alerting/monitor/delete" - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/DeleteMonitorRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/DeleteMonitorRequest.kt deleted file mode 100644 index 6ea08bee2..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/DeleteMonitorRequest.kt +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.support.WriteRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import java.io.IOException - -class DeleteMonitorRequest : ActionRequest { - - val monitorId: String - val refreshPolicy: WriteRequest.RefreshPolicy - - constructor(monitorId: String, refreshPolicy: WriteRequest.RefreshPolicy) : super() { - this.monitorId = monitorId - this.refreshPolicy = refreshPolicy - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : super() { - monitorId = sin.readString() - refreshPolicy = WriteRequest.RefreshPolicy.readFrom(sin) - } - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(monitorId) - refreshPolicy.writeTo(out) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequest.kt index d5c077cb0..ecc504677 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequest.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequest.kt @@ -7,10 +7,10 @@ package org.opensearch.alerting.action import org.opensearch.action.ActionRequest import org.opensearch.action.ActionRequestValidationException -import org.opensearch.alerting.model.Monitor -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import java.io.IOException class ExecuteMonitorRequest : ActionRequest { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponse.kt index 132af6f85..8d7a7c25a 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponse.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponse.kt @@ -5,13 +5,13 @@ package org.opensearch.alerting.action -import org.opensearch.action.ActionResponse import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException class ExecuteMonitorResponse : ActionResponse, ToXContentObject { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowAction.kt new file mode 100644 index 000000000..efed1087d --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class ExecuteWorkflowAction private constructor() : ActionType(NAME, ::ExecuteWorkflowResponse) { + companion object { + val INSTANCE = ExecuteWorkflowAction() + const val NAME = "cluster:admin/opensearch/alerting/workflow/execute" + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt new file mode 100644 index 000000000..3b3d48ed2 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt @@ -0,0 +1,70 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.action.ValidateActions +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import java.io.IOException + +/** + * A class containing workflow details. + */ +class ExecuteWorkflowRequest : ActionRequest { + val dryrun: Boolean + val requestEnd: TimeValue + val workflowId: String? + val workflow: Workflow? + + constructor( + dryrun: Boolean, + requestEnd: TimeValue, + workflowId: String?, + workflow: Workflow?, + ) : super() { + this.dryrun = dryrun + this.requestEnd = requestEnd + this.workflowId = workflowId + this.workflow = workflow + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readBoolean(), + sin.readTimeValue(), + sin.readOptionalString(), + if (sin.readBoolean()) { + Workflow.readFrom(sin) + } else null + ) + + override fun validate(): ActionRequestValidationException? { + var validationException: ActionRequestValidationException? = null + if (workflowId == null && workflow == null) { + validationException = ValidateActions.addValidationError( + "Both workflow and workflow id are missing", validationException + ) + } + return validationException + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeBoolean(dryrun) + out.writeTimeValue(requestEnd) + out.writeOptionalString(workflowId) + if (workflow != null) { + out.writeBoolean(true) + workflow.writeTo(out) + } else { + out.writeBoolean(false) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt new file mode 100644 index 000000000..7312a9470 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt @@ -0,0 +1,39 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +class ExecuteWorkflowResponse : ActionResponse, ToXContentObject { + val workflowRunResult: WorkflowRunResult + constructor( + workflowRunResult: WorkflowRunResult + ) : super() { + this.workflowRunResult = workflowRunResult + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + WorkflowRunResult(sin) + ) + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + workflowRunResult.writeTo(out) + } + + @Throws(IOException::class) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return workflowRunResult.toXContent(builder, params) + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsAction.kt deleted file mode 100644 index 649993565..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class GetAlertsAction private constructor() : ActionType(NAME, ::GetAlertsResponse) { - companion object { - val INSTANCE = GetAlertsAction() - const val NAME = "cluster:admin/opendistro/alerting/alerts/get" - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsRequest.kt deleted file mode 100644 index 967942353..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsRequest.kt +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.alerting.model.Table -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import java.io.IOException - -class GetAlertsRequest : ActionRequest { - val table: Table - val severityLevel: String - val alertState: String - val monitorId: String? - - constructor( - table: Table, - severityLevel: String, - alertState: String, - monitorId: String? - ) : super() { - this.table = table - this.severityLevel = severityLevel - this.alertState = alertState - this.monitorId = monitorId - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - table = Table.readFrom(sin), - severityLevel = sin.readString(), - alertState = sin.readString(), - monitorId = sin.readOptionalString() - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - table.writeTo(out) - out.writeString(severityLevel) - out.writeString(alertState) - out.writeOptionalString(monitorId) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsResponse.kt deleted file mode 100644 index 71dc6565e..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetAlertsResponse.kt +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionResponse -import org.opensearch.alerting.model.Alert -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import java.io.IOException -import java.util.Collections - -class GetAlertsResponse : ActionResponse, ToXContentObject { - val alerts: List - // totalAlerts is not the same as the size of alerts because there can be 30 alerts from the request, but - // the request only asked for 5 alerts, so totalAlerts will be 30, but alerts will only contain 5 alerts - val totalAlerts: Int? - - constructor( - alerts: List, - totalAlerts: Int? - ) : super() { - this.alerts = alerts - this.totalAlerts = totalAlerts - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - alerts = Collections.unmodifiableList(sin.readList(::Alert)), - totalAlerts = sin.readOptionalInt() - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeCollection(alerts) - out.writeOptionalInt(totalAlerts) - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field("alerts", alerts) - .field("totalAlerts", totalAlerts) - - return builder.endObject() - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsRequest.kt index 63c3ebd9d..92fae8247 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsRequest.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsRequest.kt @@ -7,9 +7,9 @@ package org.opensearch.alerting.action import org.opensearch.action.ActionRequest import org.opensearch.action.ActionRequestValidationException -import org.opensearch.alerting.model.Table -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.commons.alerting.model.Table +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.search.fetch.subphase.FetchSourceContext import java.io.IOException diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsResponse.kt index cd1abe808..5cf7d7dec 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsResponse.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetDestinationsResponse.kt @@ -5,14 +5,14 @@ package org.opensearch.alerting.action -import org.opensearch.action.ActionResponse import org.opensearch.alerting.model.destination.Destination -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.rest.RestStatus +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException class GetDestinationsResponse : ActionResponse, ToXContentObject { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountRequest.kt index 877855a01..94b79726e 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountRequest.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountRequest.kt @@ -7,8 +7,8 @@ package org.opensearch.alerting.action import org.opensearch.action.ActionRequest import org.opensearch.action.ActionRequestValidationException -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.rest.RestRequest import org.opensearch.search.fetch.subphase.FetchSourceContext import java.io.IOException diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountResponse.kt index 0c14b8964..a83cdbba1 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountResponse.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailAccountResponse.kt @@ -5,18 +5,18 @@ package org.opensearch.alerting.action -import org.opensearch.action.ActionResponse import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.util._ID -import org.opensearch.alerting.util._PRIMARY_TERM -import org.opensearch.alerting.util._SEQ_NO -import org.opensearch.alerting.util._VERSION -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.util.IndexUtils.Companion._ID +import org.opensearch.commons.alerting.util.IndexUtils.Companion._PRIMARY_TERM +import org.opensearch.commons.alerting.util.IndexUtils.Companion._SEQ_NO +import org.opensearch.commons.alerting.util.IndexUtils.Companion._VERSION +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException class GetEmailAccountResponse : ActionResponse, ToXContentObject { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupRequest.kt index 9708b0336..bb245b075 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupRequest.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupRequest.kt @@ -7,8 +7,8 @@ package org.opensearch.alerting.action import org.opensearch.action.ActionRequest import org.opensearch.action.ActionRequestValidationException -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.rest.RestRequest import org.opensearch.search.fetch.subphase.FetchSourceContext import java.io.IOException diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupResponse.kt index b41394a39..d83941ffd 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupResponse.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetEmailGroupResponse.kt @@ -5,18 +5,18 @@ package org.opensearch.alerting.action -import org.opensearch.action.ActionResponse import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.util._ID -import org.opensearch.alerting.util._PRIMARY_TERM -import org.opensearch.alerting.util._SEQ_NO -import org.opensearch.alerting.util._VERSION -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.util.IndexUtils.Companion._ID +import org.opensearch.commons.alerting.util.IndexUtils.Companion._PRIMARY_TERM +import org.opensearch.commons.alerting.util.IndexUtils.Companion._SEQ_NO +import org.opensearch.commons.alerting.util.IndexUtils.Companion._VERSION +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException class GetEmailGroupResponse : ActionResponse, ToXContentObject { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsAction.kt deleted file mode 100644 index 03d2be9c9..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class GetFindingsAction private constructor() : ActionType(NAME, ::GetFindingsResponse) { - companion object { - val INSTANCE = GetFindingsAction() - const val NAME = "cluster:admin/opensearch/alerting/findings/get" - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsRequest.kt deleted file mode 100644 index 15f9a0d41..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsRequest.kt +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.alerting.model.Table -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import java.io.IOException - -class GetFindingsRequest : ActionRequest { - val findingId: String? - val table: Table - - constructor( - findingId: String?, - table: Table - ) : super() { - this.findingId = findingId - this.table = table - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - findingId = sin.readOptionalString(), - table = Table.readFrom(sin) - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeOptionalString(findingId) - table.writeTo(out) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsResponse.kt deleted file mode 100644 index 66943e318..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetFindingsResponse.kt +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionResponse -import org.opensearch.alerting.model.FindingWithDocs -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.rest.RestStatus -import java.io.IOException - -class GetFindingsResponse : ActionResponse, ToXContentObject { - var status: RestStatus - var totalFindings: Int? - var findings: List - - constructor( - status: RestStatus, - totalFindings: Int?, - findings: List - ) : super() { - this.status = status - this.totalFindings = totalFindings - this.findings = findings - } - - @Throws(IOException::class) - constructor(sin: StreamInput) { - this.status = sin.readEnum(RestStatus::class.java) - val findings = mutableListOf() - this.totalFindings = sin.readOptionalInt() - var currentSize = sin.readInt() - for (i in 0 until currentSize) { - findings.add(FindingWithDocs.readFrom(sin)) - } - this.findings = findings - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeEnum(status) - out.writeOptionalInt(totalFindings) - out.writeInt(findings.size) - for (finding in findings) { - finding.writeTo(out) - } - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field("total_findings", totalFindings) - .field("findings", findings) - - return builder.endObject() - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorAction.kt deleted file mode 100644 index da209b983..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class GetMonitorAction private constructor() : ActionType(NAME, ::GetMonitorResponse) { - companion object { - val INSTANCE = GetMonitorAction() - const val NAME = "cluster:admin/opendistro/alerting/monitor/get" - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorRequest.kt deleted file mode 100644 index 31cc68fc5..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorRequest.kt +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.rest.RestRequest -import org.opensearch.search.fetch.subphase.FetchSourceContext -import java.io.IOException - -class GetMonitorRequest : ActionRequest { - val monitorId: String - val version: Long - val method: RestRequest.Method - val srcContext: FetchSourceContext? - - constructor( - monitorId: String, - version: Long, - method: RestRequest.Method, - srcContext: FetchSourceContext? - ) : super() { - this.monitorId = monitorId - this.version = version - this.method = method - this.srcContext = srcContext - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // monitorId - sin.readLong(), // version - sin.readEnum(RestRequest.Method::class.java), // method - if (sin.readBoolean()) { - FetchSourceContext(sin) // srcContext - } else null - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(monitorId) - out.writeLong(version) - out.writeEnum(method) - out.writeBoolean(srcContext != null) - srcContext?.writeTo(out) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorResponse.kt deleted file mode 100644 index 819168812..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetMonitorResponse.kt +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionResponse -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.util._ID -import org.opensearch.alerting.util._PRIMARY_TERM -import org.opensearch.alerting.util._SEQ_NO -import org.opensearch.alerting.util._VERSION -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.rest.RestStatus -import java.io.IOException - -class GetMonitorResponse : ActionResponse, ToXContentObject { - var id: String - var version: Long - var seqNo: Long - var primaryTerm: Long - var status: RestStatus - var monitor: Monitor? - - constructor( - id: String, - version: Long, - seqNo: Long, - primaryTerm: Long, - status: RestStatus, - monitor: Monitor? - ) : super() { - this.id = id - this.version = version - this.seqNo = seqNo - this.primaryTerm = primaryTerm - this.status = status - this.monitor = monitor - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readLong(), // version - sin.readLong(), // seqNo - sin.readLong(), // primaryTerm - sin.readEnum(RestStatus::class.java), // RestStatus - if (sin.readBoolean()) { - Monitor.readFrom(sin) // monitor - } else null - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeLong(seqNo) - out.writeLong(primaryTerm) - out.writeEnum(status) - if (monitor != null) { - out.writeBoolean(true) - monitor?.writeTo(out) - } else { - out.writeBoolean(false) - } - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(_ID, id) - .field(_VERSION, version) - .field(_SEQ_NO, seqNo) - .field(_PRIMARY_TERM, primaryTerm) - if (monitor != null) - builder.field("monitor", monitor) - - return builder.endObject() - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesAction.kt new file mode 100644 index 000000000..059110af4 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class GetRemoteIndexesAction private constructor() : ActionType(NAME, ::GetRemoteIndexesResponse) { + companion object { + val INSTANCE = GetRemoteIndexesAction() + const val NAME = "cluster:admin/opensearch/alerting/remote/indexes/get" + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesRequest.kt new file mode 100644 index 000000000..733bc3a04 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesRequest.kt @@ -0,0 +1,43 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import java.io.IOException + +class GetRemoteIndexesRequest : ActionRequest { + var indexes: List = listOf() + var includeMappings: Boolean + + constructor(indexes: List, includeMappings: Boolean) : super() { + this.indexes = indexes + this.includeMappings = includeMappings + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readStringList(), + sin.readBoolean() + ) + + override fun validate(): ActionRequestValidationException? { + return null + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeStringArray(indexes.toTypedArray()) + out.writeBoolean(includeMappings) + } + + companion object { + const val INDEXES_FIELD = "indexes" + const val INCLUDE_MAPPINGS_FIELD = "include_mappings" + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesResponse.kt new file mode 100644 index 000000000..1572b4228 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/action/GetRemoteIndexesResponse.kt @@ -0,0 +1,135 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.cluster.health.ClusterHealthStatus +import org.opensearch.cluster.metadata.MappingMetadata +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +class GetRemoteIndexesResponse : ActionResponse, ToXContentObject { + var clusterIndexes: List = emptyList() + + constructor(clusterIndexes: List) : super() { + this.clusterIndexes = clusterIndexes + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + clusterIndexes = sin.readList((ClusterIndexes.Companion)::readFrom) + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + clusterIndexes.forEach { + it.toXContent(builder, params) + } + return builder.endObject() + } + + override fun writeTo(out: StreamOutput) { + clusterIndexes.forEach { it.writeTo(out) } + } + + data class ClusterIndexes( + val clusterName: String, + val clusterHealth: ClusterHealthStatus, + val hubCluster: Boolean, + val indexes: List = listOf(), + val latency: Long + ) : ToXContentObject, Writeable { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + clusterName = sin.readString(), + clusterHealth = sin.readEnum(ClusterHealthStatus::class.java), + hubCluster = sin.readBoolean(), + indexes = sin.readList((ClusterIndex.Companion)::readFrom), + latency = sin.readLong() + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject(clusterName) + builder.field(CLUSTER_NAME_FIELD, clusterName) + builder.field(CLUSTER_HEALTH_FIELD, clusterHealth) + builder.field(HUB_CLUSTER_FIELD, hubCluster) + builder.field(INDEX_LATENCY_FIELD, latency) + builder.startObject(INDEXES_FIELD) + indexes.forEach { + it.toXContent(builder, params) + } + return builder.endObject().endObject() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(clusterName) + out.writeEnum(clusterHealth) + indexes.forEach { it.writeTo(out) } + out.writeLong(latency) + } + + companion object { + const val CLUSTER_NAME_FIELD = "cluster" + const val CLUSTER_HEALTH_FIELD = "health" + const val HUB_CLUSTER_FIELD = "hub_cluster" + const val INDEXES_FIELD = "indexes" + const val INDEX_LATENCY_FIELD = "latency" + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): ClusterIndexes { + return ClusterIndexes(sin) + } + } + + data class ClusterIndex( + val indexName: String, + val indexHealth: ClusterHealthStatus?, + val mappings: MappingMetadata? + ) : ToXContentObject, Writeable { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + indexName = sin.readString(), + indexHealth = sin.readEnum(ClusterHealthStatus::class.java), + mappings = sin.readOptionalWriteable(::MappingMetadata) + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject(indexName) + builder.field(INDEX_NAME_FIELD, indexName) + builder.field(INDEX_HEALTH_FIELD, indexHealth) + if (mappings == null) builder.startObject(MAPPINGS_FIELD).endObject() + else builder.field(MAPPINGS_FIELD, mappings.sourceAsMap()) + return builder.endObject() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(indexName) + out.writeEnum(indexHealth) + if (mappings != null) out.writeMap(mappings.sourceAsMap) + } + + companion object { + const val INDEX_NAME_FIELD = "name" + const val INDEX_HEALTH_FIELD = "health" + const val MAPPINGS_FIELD = "mappings" + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): ClusterIndex { + return ClusterIndex(sin) + } + } + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorAction.kt deleted file mode 100644 index 4c3c8dacb..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class IndexMonitorAction private constructor() : ActionType(NAME, ::IndexMonitorResponse) { - companion object { - val INSTANCE = IndexMonitorAction() - const val NAME = "cluster:admin/opendistro/alerting/monitor/write" - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorRequest.kt deleted file mode 100644 index 1e5b24551..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorRequest.kt +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.model.Monitor -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.rest.RestRequest -import java.io.IOException - -class IndexMonitorRequest : ActionRequest { - val monitorId: String - val seqNo: Long - val primaryTerm: Long - val refreshPolicy: WriteRequest.RefreshPolicy - val method: RestRequest.Method - var monitor: Monitor - - constructor( - monitorId: String, - seqNo: Long, - primaryTerm: Long, - refreshPolicy: WriteRequest.RefreshPolicy, - method: RestRequest.Method, - monitor: Monitor - ) : super() { - this.monitorId = monitorId - this.seqNo = seqNo - this.primaryTerm = primaryTerm - this.refreshPolicy = refreshPolicy - this.method = method - this.monitor = monitor - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - monitorId = sin.readString(), - seqNo = sin.readLong(), - primaryTerm = sin.readLong(), - refreshPolicy = WriteRequest.RefreshPolicy.readFrom(sin), - method = sin.readEnum(RestRequest.Method::class.java), - monitor = Monitor.readFrom(sin) as Monitor - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(monitorId) - out.writeLong(seqNo) - out.writeLong(primaryTerm) - refreshPolicy.writeTo(out) - out.writeEnum(method) - monitor.writeTo(out) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorResponse.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorResponse.kt deleted file mode 100644 index 5990bd680..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/IndexMonitorResponse.kt +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionResponse -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.util._ID -import org.opensearch.alerting.util._PRIMARY_TERM -import org.opensearch.alerting.util._SEQ_NO -import org.opensearch.alerting.util._VERSION -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.rest.RestStatus -import java.io.IOException - -class IndexMonitorResponse : ActionResponse, ToXContentObject { - var id: String - var version: Long - var seqNo: Long - var primaryTerm: Long - var status: RestStatus - var monitor: Monitor - - constructor( - id: String, - version: Long, - seqNo: Long, - primaryTerm: Long, - status: RestStatus, - monitor: Monitor - ) : super() { - this.id = id - this.version = version - this.seqNo = seqNo - this.primaryTerm = primaryTerm - this.status = status - this.monitor = monitor - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readLong(), // version - sin.readLong(), // seqNo - sin.readLong(), // primaryTerm - sin.readEnum(RestStatus::class.java), // status - Monitor.readFrom(sin) as Monitor // monitor - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeLong(seqNo) - out.writeLong(primaryTerm) - out.writeEnum(status) - monitor.writeTo(out) - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field(_ID, id) - .field(_VERSION, version) - .field(_SEQ_NO, seqNo) - .field(_PRIMARY_TERM, primaryTerm) - .field("monitor", monitor) - .endObject() - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/SearchMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/SearchMonitorAction.kt deleted file mode 100644 index 16725fc39..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/SearchMonitorAction.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType -import org.opensearch.action.search.SearchResponse - -class SearchMonitorAction private constructor() : ActionType(NAME, ::SearchResponse) { - companion object { - val INSTANCE = SearchMonitorAction() - const val NAME = "cluster:admin/opendistro/alerting/monitor/search" - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/action/SearchMonitorRequest.kt b/alerting/src/main/kotlin/org/opensearch/alerting/action/SearchMonitorRequest.kt deleted file mode 100644 index 9068e7b2b..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/action/SearchMonitorRequest.kt +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.search.SearchRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import java.io.IOException - -class SearchMonitorRequest : ActionRequest { - - val searchRequest: SearchRequest - - constructor( - searchRequest: SearchRequest - ) : super() { - this.searchRequest = searchRequest - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - searchRequest = SearchRequest(sin) - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - searchRequest.writeTo(out) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilder.kt b/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilder.kt deleted file mode 100644 index bb356a4a3..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilder.kt +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.aggregation.bucketselectorext - -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter.Companion.BUCKET_SELECTOR_COMPOSITE_AGG_FILTER -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter.Companion.BUCKET_SELECTOR_FILTER -import org.opensearch.common.ParseField -import org.opensearch.common.ParsingException -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent.Params -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.script.Script -import org.opensearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder -import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy -import org.opensearch.search.aggregations.pipeline.PipelineAggregator -import java.io.IOException -import java.util.Objects - -class BucketSelectorExtAggregationBuilder : - AbstractPipelineAggregationBuilder { - private val bucketsPathsMap: Map - val parentBucketPath: String - val script: Script - val filter: BucketSelectorExtFilter? - private var gapPolicy = GapPolicy.SKIP - - constructor( - name: String, - bucketsPathsMap: Map, - script: Script, - parentBucketPath: String, - filter: BucketSelectorExtFilter? - ) : super(name, NAME.preferredName, listOf(parentBucketPath).toTypedArray()) { - this.bucketsPathsMap = bucketsPathsMap - this.script = script - this.parentBucketPath = parentBucketPath - this.filter = filter - } - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - constructor(sin: StreamInput) : super(sin, NAME.preferredName) { - bucketsPathsMap = sin.readMap() as MutableMap - script = Script(sin) - gapPolicy = GapPolicy.readFrom(sin) - parentBucketPath = sin.readString() - filter = if (sin.readBoolean()) { - BucketSelectorExtFilter(sin) - } else { - null - } - } - - @Throws(IOException::class) - override fun doWriteTo(out: StreamOutput) { - out.writeMap(bucketsPathsMap) - script.writeTo(out) - gapPolicy.writeTo(out) - out.writeString(parentBucketPath) - if (filter != null) { - out.writeBoolean(true) - filter.writeTo(out) - } else { - out.writeBoolean(false) - } - } - - /** - * Sets the gap policy to use for this aggregation. - */ - fun gapPolicy(gapPolicy: GapPolicy?): BucketSelectorExtAggregationBuilder { - requireNotNull(gapPolicy) { "[gapPolicy] must not be null: [$name]" } - this.gapPolicy = gapPolicy - return this - } - - override fun createInternal(metaData: Map?): PipelineAggregator { - return BucketSelectorExtAggregator(name, bucketsPathsMap, parentBucketPath, script, gapPolicy, filter, metaData) - } - - @Throws(IOException::class) - public override fun internalXContent(builder: XContentBuilder, params: Params): XContentBuilder { - builder.field(PipelineAggregator.Parser.BUCKETS_PATH.preferredName, bucketsPathsMap as Map?) - .field(PARENT_BUCKET_PATH.preferredName, parentBucketPath) - .field(Script.SCRIPT_PARSE_FIELD.preferredName, script) - .field(PipelineAggregator.Parser.GAP_POLICY.preferredName, gapPolicy.getName()) - if (filter != null) { - if (filter.isCompositeAggregation) { - builder.startObject(BUCKET_SELECTOR_COMPOSITE_AGG_FILTER.preferredName) - .value(filter) - .endObject() - } else { - builder.startObject(BUCKET_SELECTOR_FILTER.preferredName) - .value(filter) - .endObject() - } - } - return builder - } - - override fun overrideBucketsPath(): Boolean { - return true - } - - override fun validate(context: ValidationContext) { - // Nothing to check - } - - override fun hashCode(): Int { - return Objects.hash(super.hashCode(), bucketsPathsMap, script, gapPolicy) - } - - override fun equals(other: Any?): Boolean { - if (this === other) return true - if (other == null || javaClass != other.javaClass) return false - if (!super.equals(other)) return false - val otherCast = other as BucketSelectorExtAggregationBuilder - return ( - bucketsPathsMap == otherCast.bucketsPathsMap && - script == otherCast.script && - gapPolicy == otherCast.gapPolicy - ) - } - - override fun getWriteableName(): String { - return NAME.preferredName - } - - companion object { - val NAME = ParseField("bucket_selector_ext") - val PARENT_BUCKET_PATH = ParseField("parent_bucket_path") - - @Throws(IOException::class) - fun parse(reducerName: String, parser: XContentParser): BucketSelectorExtAggregationBuilder { - var token: XContentParser.Token - var script: Script? = null - var currentFieldName: String? = null - var bucketsPathsMap: MutableMap? = null - var gapPolicy: GapPolicy? = null - var parentBucketPath: String? = null - var filter: BucketSelectorExtFilter? = null - while (parser.nextToken().also { token = it } !== XContentParser.Token.END_OBJECT) { - if (token === XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName() - } else if (token === XContentParser.Token.VALUE_STRING) { - when { - PipelineAggregator.Parser.BUCKETS_PATH.match(currentFieldName, parser.deprecationHandler) -> { - bucketsPathsMap = HashMap() - bucketsPathsMap["_value"] = parser.text() - } - PipelineAggregator.Parser.GAP_POLICY.match(currentFieldName, parser.deprecationHandler) -> { - gapPolicy = GapPolicy.parse(parser.text(), parser.tokenLocation) - } - Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.deprecationHandler) -> { - script = Script.parse(parser) - } - PARENT_BUCKET_PATH.match(currentFieldName, parser.deprecationHandler) -> { - parentBucketPath = parser.text() - } - else -> { - throw ParsingException( - parser.tokenLocation, - "Unknown key for a $token in [$reducerName]: [$currentFieldName]." - ) - } - } - } else if (token === XContentParser.Token.START_ARRAY) { - if (PipelineAggregator.Parser.BUCKETS_PATH.match(currentFieldName, parser.deprecationHandler)) { - val paths: MutableList = ArrayList() - while (parser.nextToken().also { token = it } !== XContentParser.Token.END_ARRAY) { - val path = parser.text() - paths.add(path) - } - bucketsPathsMap = HashMap() - for (i in paths.indices) { - bucketsPathsMap["_value$i"] = paths[i] - } - } else { - throw ParsingException( - parser.tokenLocation, - "Unknown key for a $token in [$reducerName]: [$currentFieldName]." - ) - } - } else if (token === XContentParser.Token.START_OBJECT) { - when { - Script.SCRIPT_PARSE_FIELD.match(currentFieldName, parser.deprecationHandler) -> { - script = Script.parse(parser) - } - PipelineAggregator.Parser.BUCKETS_PATH.match(currentFieldName, parser.deprecationHandler) -> { - val map = parser.map() - bucketsPathsMap = HashMap() - for ((key, value) in map) { - bucketsPathsMap[key] = value.toString() - } - } - BUCKET_SELECTOR_FILTER.match(currentFieldName, parser.deprecationHandler) -> { - filter = BucketSelectorExtFilter.parse(reducerName, false, parser) - } - BUCKET_SELECTOR_COMPOSITE_AGG_FILTER.match( - currentFieldName, - parser.deprecationHandler - ) -> { - filter = BucketSelectorExtFilter.parse(reducerName, true, parser) - } - else -> { - throw ParsingException( - parser.tokenLocation, - "Unknown key for a $token in [$reducerName]: [$currentFieldName]." - ) - } - } - } else { - throw ParsingException(parser.tokenLocation, "Unexpected token $token in [$reducerName].") - } - } - if (bucketsPathsMap == null) { - throw ParsingException( - parser.tokenLocation, - "Missing required field [" + PipelineAggregator.Parser.BUCKETS_PATH.preferredName + - "] for bucket_selector aggregation [" + reducerName + "]" - ) - } - if (script == null) { - throw ParsingException( - parser.tokenLocation, - "Missing required field [" + Script.SCRIPT_PARSE_FIELD.preferredName + - "] for bucket_selector aggregation [" + reducerName + "]" - ) - } - - if (parentBucketPath == null) { - throw ParsingException( - parser.tokenLocation, - "Missing required field [" + PARENT_BUCKET_PATH + - "] for bucket_selector aggregation [" + reducerName + "]" - ) - } - val factory = BucketSelectorExtAggregationBuilder(reducerName, bucketsPathsMap, script, parentBucketPath, filter) - if (gapPolicy != null) { - factory.gapPolicy(gapPolicy) - } - return factory - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregator.kt b/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregator.kt deleted file mode 100644 index f121138b7..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregator.kt +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.aggregation.bucketselectorext - -import org.apache.lucene.util.BytesRef -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder.Companion.NAME -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.script.BucketAggregationSelectorScript -import org.opensearch.script.Script -import org.opensearch.search.DocValueFormat -import org.opensearch.search.aggregations.Aggregations -import org.opensearch.search.aggregations.InternalAggregation -import org.opensearch.search.aggregations.InternalAggregation.ReduceContext -import org.opensearch.search.aggregations.InternalMultiBucketAggregation -import org.opensearch.search.aggregations.bucket.SingleBucketAggregation -import org.opensearch.search.aggregations.bucket.composite.InternalComposite -import org.opensearch.search.aggregations.bucket.terms.IncludeExclude -import org.opensearch.search.aggregations.pipeline.BucketHelpers -import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy -import org.opensearch.search.aggregations.pipeline.SiblingPipelineAggregator -import org.opensearch.search.aggregations.support.AggregationPath -import java.io.IOException - -class BucketSelectorExtAggregator : SiblingPipelineAggregator { - private var name: String? = null - private var bucketsPathsMap: Map - private var parentBucketPath: String - private var script: Script - private var gapPolicy: GapPolicy - private var bucketSelectorExtFilter: BucketSelectorExtFilter? = null - - constructor( - name: String?, - bucketsPathsMap: Map, - parentBucketPath: String, - script: Script, - gapPolicy: GapPolicy, - filter: BucketSelectorExtFilter?, - metadata: Map? - ) : super(name, bucketsPathsMap.values.toTypedArray(), metadata) { - this.bucketsPathsMap = bucketsPathsMap - this.parentBucketPath = parentBucketPath - this.script = script - this.gapPolicy = gapPolicy - this.bucketSelectorExtFilter = filter - } - - /** - * Read from a stream. - */ - @Suppress("UNCHECKED_CAST") - @Throws(IOException::class) - constructor(sin: StreamInput) : super(sin.readString(), null, null) { - script = Script(sin) - gapPolicy = GapPolicy.readFrom(sin) - bucketsPathsMap = sin.readMap() as Map - parentBucketPath = sin.readString() - if (sin.readBoolean()) { - bucketSelectorExtFilter = BucketSelectorExtFilter(sin) - } else { - bucketSelectorExtFilter = null - } - } - - @Throws(IOException::class) - override fun doWriteTo(out: StreamOutput) { - out.writeString(name) - script.writeTo(out) - gapPolicy.writeTo(out) - out.writeGenericValue(bucketsPathsMap) - out.writeString(parentBucketPath) - if (bucketSelectorExtFilter != null) { - out.writeBoolean(true) - bucketSelectorExtFilter!!.writeTo(out) - } else { - out.writeBoolean(false) - } - } - - override fun getWriteableName(): String { - return NAME.preferredName - } - - override fun doReduce(aggregations: Aggregations, reduceContext: ReduceContext): InternalAggregation { - val parentBucketPathList = AggregationPath.parse(parentBucketPath).pathElementsAsStringList - var subAggregations: Aggregations = aggregations - for (i in 0 until parentBucketPathList.size - 1) { - subAggregations = subAggregations.get(parentBucketPathList[0]).aggregations - } - val originalAgg = subAggregations.get(parentBucketPathList.last()) as InternalMultiBucketAggregation<*, *> - val buckets = originalAgg.buckets - val factory = reduceContext.scriptService().compile(script, BucketAggregationSelectorScript.CONTEXT) - val selectedBucketsIndex: MutableList = ArrayList() - for (i in buckets.indices) { - val bucket = buckets[i] - if (bucketSelectorExtFilter != null) { - var accepted = true - if (bucketSelectorExtFilter!!.isCompositeAggregation) { - val compBucketKeyObj = (bucket as InternalComposite.InternalBucket).key - val filtersMap: HashMap? = bucketSelectorExtFilter!!.filtersMap - for (sourceKey in compBucketKeyObj.keys) { - if (filtersMap != null) { - if (filtersMap.containsKey(sourceKey)) { - val obj = compBucketKeyObj[sourceKey] - accepted = isAccepted(obj!!, filtersMap[sourceKey]) - if (!accepted) break - } else { - accepted = false - break - } - } - } - } else { - accepted = isAccepted(bucket.key, bucketSelectorExtFilter!!.filters) - } - if (!accepted) continue - } - - val vars: MutableMap = HashMap() - if (script.params != null) { - vars.putAll(script.params) - } - for ((varName, bucketsPath) in bucketsPathsMap) { - val value = BucketHelpers.resolveBucketValue(originalAgg, bucket, bucketsPath, gapPolicy) - vars[varName] = value - } - val executableScript = factory.newInstance(vars) - // TODO: can we use one instance of the script for all buckets? it should be stateless? - if (executableScript.execute()) { - selectedBucketsIndex.add(i) - } - } - - return BucketSelectorIndices( - name(), parentBucketPath, selectedBucketsIndex, originalAgg.metadata - ) - } - - private fun isAccepted(obj: Any, filter: IncludeExclude?): Boolean { - return when (obj.javaClass) { - String::class.java -> { - val stringFilter = filter!!.convertToStringFilter(DocValueFormat.RAW) - stringFilter.accept(BytesRef(obj as String)) - } - java.lang.Long::class.java, Long::class.java -> { - val longFilter = filter!!.convertToLongFilter(DocValueFormat.RAW) - longFilter.accept(obj as Long) - } - java.lang.Double::class.java, Double::class.java -> { - val doubleFilter = filter!!.convertToDoubleFilter() - doubleFilter.accept(obj as Long) - } - else -> { - throw IllegalStateException("Object is not comparable. Please use one of String, Long or Double type.") - } - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtFilter.kt b/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtFilter.kt deleted file mode 100644 index c1f7d159d..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtFilter.kt +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.aggregation.bucketselectorext - -import org.opensearch.common.ParseField -import org.opensearch.common.ParsingException -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent.Params -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.search.aggregations.bucket.terms.IncludeExclude -import java.io.IOException - -class BucketSelectorExtFilter : ToXContentObject, Writeable { - // used for composite aggregations - val filtersMap: HashMap? - // used for filtering string term aggregation - val filters: IncludeExclude? - - constructor(filters: IncludeExclude?) { - filtersMap = null - this.filters = filters - } - - constructor(filtersMap: HashMap?) { - this.filtersMap = filtersMap - filters = null - } - - constructor(sin: StreamInput) { - if (sin.readBoolean()) { - val size: Int = sin.readVInt() - filtersMap = java.util.HashMap() - for (i in 0 until size) { - filtersMap[sin.readString()] = IncludeExclude(sin) - } - filters = null - } else { - filters = IncludeExclude(sin) - filtersMap = null - } - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - val isCompAgg = isCompositeAggregation - out.writeBoolean(isCompAgg) - if (isCompAgg) { - out.writeVInt(filtersMap!!.size) - for ((key, value) in filtersMap) { - out.writeString(key) - value.writeTo(out) - } - } else { - filters!!.writeTo(out) - } - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: Params): XContentBuilder { - if (isCompositeAggregation) { - for ((key, filter) in filtersMap!!) { - builder.startObject(key) - filter.toXContent(builder, params) - builder.endObject() - } - } else { - filters!!.toXContent(builder, params) - } - return builder - } - - val isCompositeAggregation: Boolean - get() = if (filtersMap != null && filters == null) { - true - } else if (filtersMap == null && filters != null) { - false - } else { - throw IllegalStateException("Type of selector cannot be determined") - } - - companion object { - const val NAME = "filter" - var BUCKET_SELECTOR_FILTER = ParseField("filter") - var BUCKET_SELECTOR_COMPOSITE_AGG_FILTER = ParseField("composite_agg_filter") - - @Throws(IOException::class) - fun parse(reducerName: String, isCompositeAggregation: Boolean, parser: XContentParser): BucketSelectorExtFilter { - var token: XContentParser.Token - return if (isCompositeAggregation) { - val filtersMap = HashMap() - while (parser.nextToken().also { token = it } !== XContentParser.Token.END_OBJECT) { - if (token === XContentParser.Token.FIELD_NAME) { - val sourceKey = parser.currentName() - token = parser.nextToken() - filtersMap[sourceKey] = parseIncludeExclude(reducerName, parser) - } else { - throw ParsingException( - parser.tokenLocation, - "Unknown key for a " + token + " in [" + reducerName + "]: [" + parser.currentName() + "]." - ) - } - } - BucketSelectorExtFilter(filtersMap) - } else { - BucketSelectorExtFilter(parseIncludeExclude(reducerName, parser)) - } - } - - @Throws(IOException::class) - private fun parseIncludeExclude(reducerName: String, parser: XContentParser): IncludeExclude { - var token: XContentParser.Token - var include: IncludeExclude? = null - var exclude: IncludeExclude? = null - while (parser.nextToken().also { token = it } !== XContentParser.Token.END_OBJECT) { - val fieldName = parser.currentName() - when { - IncludeExclude.INCLUDE_FIELD.match(fieldName, parser.deprecationHandler) -> { - parser.nextToken() - include = IncludeExclude.parseInclude(parser) - } - IncludeExclude.EXCLUDE_FIELD.match(fieldName, parser.deprecationHandler) -> { - parser.nextToken() - exclude = IncludeExclude.parseExclude(parser) - } - else -> { - throw ParsingException( - parser.tokenLocation, - "Unknown key for a $token in [$reducerName]: [$fieldName]." - ) - } - } - } - return IncludeExclude.merge(include, exclude) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorIndices.kt b/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorIndices.kt deleted file mode 100644 index b0ca6ab15..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorIndices.kt +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.aggregation.bucketselectorext - -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent.Params -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.search.aggregations.InternalAggregation -import java.io.IOException -import java.util.Objects - -open class BucketSelectorIndices( - name: String?, - private var parentBucketPath: String, - var bucketIndices: List, - metaData: Map? -) : InternalAggregation(name, metaData) { - - @Throws(IOException::class) - override fun doWriteTo(out: StreamOutput) { - out.writeString(parentBucketPath) - out.writeIntArray(bucketIndices.stream().mapToInt { i: Int? -> i!! }.toArray()) - } - - override fun getWriteableName(): String { - return name - } - - override fun reduce(aggregations: List, reduceContext: ReduceContext): BucketSelectorIndices { - throw UnsupportedOperationException("Not supported") - } - - override fun mustReduceOnSingleInternalAgg(): Boolean { - return false - } - - override fun getProperty(path: MutableList?): Any { - throw UnsupportedOperationException("Not supported") - } - - internal object Fields { - const val PARENT_BUCKET_PATH = "parent_bucket_path" - const val BUCKET_INDICES = "bucket_indices" - } - - @Throws(IOException::class) - override fun doXContentBody(builder: XContentBuilder, params: Params): XContentBuilder { - builder.field(Fields.PARENT_BUCKET_PATH, parentBucketPath) - builder.field(Fields.BUCKET_INDICES, bucketIndices) - otherStatsToXContent(builder) - return builder - } - - @Throws(IOException::class) - protected fun otherStatsToXContent(builder: XContentBuilder): XContentBuilder { - return builder - } - - override fun hashCode(): Int { - return Objects.hash(super.hashCode(), parentBucketPath) - } - - override fun equals(other: Any?): Boolean { - if (this === other) return true - if (other == null || javaClass != other.javaClass) return false - if (!super.equals(other)) return false - val otherCast = other as BucketSelectorIndices - return name == otherCast.name && parentBucketPath == otherCast.parentBucketPath - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertError.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertError.kt deleted file mode 100644 index 72d788684..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertError.kt +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.alerts - -import org.opensearch.alerting.opensearchapi.instant -import org.opensearch.alerting.opensearchapi.optionalTimeField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.time.Instant - -data class AlertError(val timestamp: Instant, val message: String) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readInstant(), // timestamp - sin.readString() // message - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeInstant(timestamp) - out.writeString(message) - } - companion object { - - const val TIMESTAMP_FIELD = "timestamp" - const val MESSAGE_FIELD = "message" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): AlertError { - - lateinit var timestamp: Instant - lateinit var message: String - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - TIMESTAMP_FIELD -> timestamp = requireNotNull(xcp.instant()) - MESSAGE_FIELD -> message = xcp.text() - } - } - return AlertError(timestamp = timestamp, message = message) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): AlertError { - return AlertError(sin) - } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .optionalTimeField(TIMESTAMP_FIELD, timestamp) - .field(MESSAGE_FIELD, message) - .endObject() - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt index b72f6e437..c9b730f1f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertIndices.kt @@ -6,8 +6,8 @@ package org.opensearch.alerting.alerts import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper import org.opensearch.ResourceAlreadyExistsException -import org.opensearch.action.ActionListener import org.opensearch.action.admin.cluster.state.ClusterStateRequest import org.opensearch.action.admin.cluster.state.ClusterStateResponse import org.opensearch.action.admin.indices.alias.Alias @@ -36,6 +36,7 @@ import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTO import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_RETENTION_PERIOD import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_ROLLOVER_PERIOD import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT +import org.opensearch.alerting.util.AlertingException import org.opensearch.alerting.util.IndexUtils import org.opensearch.client.Client import org.opensearch.cluster.ClusterChangedEvent @@ -45,6 +46,8 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.core.action.ActionListener import org.opensearch.threadpool.Scheduler.Cancellable import org.opensearch.threadpool.ThreadPool import java.time.Instant @@ -134,23 +137,28 @@ class AlertIndices( } @Volatile private var alertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + @Volatile private var findingHistoryEnabled = AlertingSettings.FINDING_HISTORY_ENABLED.get(settings) @Volatile private var alertHistoryMaxDocs = AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings) + @Volatile private var findingHistoryMaxDocs = AlertingSettings.FINDING_HISTORY_MAX_DOCS.get(settings) @Volatile private var alertHistoryMaxAge = AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings) + @Volatile private var findingHistoryMaxAge = AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.get(settings) @Volatile private var alertHistoryRolloverPeriod = AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings) + @Volatile private var findingHistoryRolloverPeriod = AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.get(settings) @Volatile private var alertHistoryRetentionPeriod = AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.get(settings) + @Volatile private var findingHistoryRetentionPeriod = AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD.get(settings) @Volatile private var requestTimeout = AlertingSettings.REQUEST_TIMEOUT.get(settings) - @Volatile private var isMaster = false + @Volatile private var isClusterManager = false // for JobsMonitor to report var lastRolloverTime: TimeValue? = null @@ -161,7 +169,9 @@ class AlertIndices( private var alertIndexInitialized: Boolean = false - private var scheduledRollover: Cancellable? = null + private var scheduledAlertRollover: Cancellable? = null + + private var scheduledFindingRollover: Cancellable? = null fun onMaster() { try { @@ -169,9 +179,9 @@ class AlertIndices( rolloverAlertHistoryIndex() rolloverFindingHistoryIndex() // schedule the next rollover for approx MAX_AGE later - scheduledRollover = threadPool + scheduledAlertRollover = threadPool .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) - scheduledRollover = threadPool + scheduledFindingRollover = threadPool .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) } catch (e: Exception) { // This should be run on cluster startup @@ -184,7 +194,8 @@ class AlertIndices( } fun offMaster() { - scheduledRollover?.cancel() + scheduledAlertRollover?.cancel() + scheduledFindingRollover?.cancel() } private fun executorName(): String { @@ -192,12 +203,12 @@ class AlertIndices( } override fun clusterChanged(event: ClusterChangedEvent) { - // Instead of using a LocalNodeMasterListener to track master changes, this service will + // Instead of using a LocalNodeClusterManagerListener to track master changes, this service will // track them here to avoid conditions where master listener events run after other // listeners that depend on what happened in the master listener - if (this.isMaster != event.localNodeMaster()) { - this.isMaster = event.localNodeMaster() - if (this.isMaster) { + if (this.isClusterManager != event.localNodeClusterManager()) { + this.isClusterManager = event.localNodeClusterManager() + if (this.isClusterManager) { onMaster() } else { offMaster() @@ -212,16 +223,16 @@ class AlertIndices( private fun rescheduleAlertRollover() { if (clusterService.state().nodes.isLocalNodeElectedMaster) { - scheduledRollover?.cancel() - scheduledRollover = threadPool + scheduledAlertRollover?.cancel() + scheduledAlertRollover = threadPool .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) } } private fun rescheduleFindingRollover() { if (clusterService.state().nodes.isLocalNodeElectedMaster) { - scheduledRollover?.cancel() - scheduledRollover = threadPool + scheduledFindingRollover?.cancel() + scheduledFindingRollover = threadPool .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) } } @@ -230,7 +241,24 @@ class AlertIndices( return alertIndexInitialized && alertHistoryIndexInitialized } - fun isAlertHistoryEnabled(): Boolean = alertHistoryEnabled + fun isAlertInitialized(dataSources: DataSources): Boolean { + val alertsIndex = dataSources.alertsIndex + val alertsHistoryIndex = dataSources.alertsHistoryIndex + if (alertsIndex == ALERT_INDEX && alertsHistoryIndex == ALERT_HISTORY_WRITE_INDEX) { + return alertIndexInitialized && alertHistoryIndexInitialized + } + if ( + clusterService.state().metadata.indices.containsKey(alertsIndex) && + clusterService.state().metadata.hasAlias(alertsHistoryIndex) + ) { + return true + } + return false + } + + fun isAlertHistoryEnabled(): Boolean { + return alertHistoryEnabled + } fun isFindingHistoryEnabled(): Boolean = findingHistoryEnabled @@ -243,7 +271,36 @@ class AlertIndices( } alertIndexInitialized } + suspend fun createOrUpdateAlertIndex(dataSources: DataSources) { + if (dataSources.alertsIndex == ALERT_INDEX) { + return createOrUpdateAlertIndex() + } + val alertsIndex = dataSources.alertsIndex + if (!clusterService.state().routingTable().hasIndex(alertsIndex)) { + alertIndexInitialized = createIndex(alertsIndex!!, alertMapping()) + } else { + updateIndexMapping(alertsIndex!!, alertMapping()) + } + } + suspend fun createOrUpdateInitialAlertHistoryIndex(dataSources: DataSources) { + if (dataSources.alertsIndex == ALERT_INDEX) { + return createOrUpdateInitialAlertHistoryIndex() + } + if (!clusterService.state().metadata.hasAlias(dataSources.alertsHistoryIndex)) { + createIndex( + dataSources.alertsHistoryIndexPattern ?: ALERT_HISTORY_INDEX_PATTERN, + alertMapping(), + dataSources.alertsHistoryIndex + ) + } else { + updateIndexMapping( + dataSources.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX, + alertMapping(), + true + ) + } + } suspend fun createOrUpdateInitialAlertHistoryIndex() { if (!alertHistoryIndexInitialized) { alertHistoryIndexInitialized = createIndex(ALERT_HISTORY_INDEX_PATTERN, alertMapping(), ALERT_HISTORY_WRITE_INDEX) @@ -273,6 +330,23 @@ class AlertIndices( findingHistoryIndexInitialized } + suspend fun createOrUpdateInitialFindingHistoryIndex(dataSources: DataSources) { + if (dataSources.findingsIndex == FINDING_HISTORY_WRITE_INDEX) { + return createOrUpdateInitialFindingHistoryIndex() + } + val findingsIndex = dataSources.findingsIndex + val findingsIndexPattern = dataSources.findingsIndexPattern ?: FINDING_HISTORY_INDEX_PATTERN + if (!clusterService.state().metadata().hasAlias(findingsIndex)) { + createIndex( + findingsIndexPattern, + findingMapping(), + findingsIndex + ) + } else { + updateIndexMapping(findingsIndex, findingMapping(), true) + } + } + private suspend fun createIndex(index: String, schemaMapping: String, alias: String? = null): Boolean { // This should be a fast check of local cluster state. Should be exceedingly rare that the local cluster // state does not contain the index and multiple nodes concurrently try to create the index. @@ -282,6 +356,7 @@ class AlertIndices( } if (existsResponse.isExists) return true + logger.debug("index: [$index] schema mappings: [$schemaMapping]") val request = CreateIndexRequest(index) .mapping(schemaMapping) .settings(Settings.builder().put("index.hidden", true).build()) @@ -290,8 +365,12 @@ class AlertIndices( return try { val createIndexResponse: CreateIndexResponse = client.admin().indices().suspendUntil { create(request, it) } createIndexResponse.isAcknowledged - } catch (e: ResourceAlreadyExistsException) { - true + } catch (t: Exception) { + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + true + } else { + throw AlertingException.wrap(t) + } } } @@ -302,11 +381,12 @@ class AlertIndices( targetIndex = IndexUtils.getIndexNameWithAlias(clusterState, index) } + // TODO call getMapping and compare actual mappings here instead of this if (targetIndex == IndexUtils.lastUpdatedAlertHistoryIndex || targetIndex == IndexUtils.lastUpdatedFindingHistoryIndex) { return } - var putMappingRequest: PutMappingRequest = PutMappingRequest(targetIndex) + val putMappingRequest: PutMappingRequest = PutMappingRequest(targetIndex) .source(mapping, XContentType.JSON) val updateResponse: AcknowledgedResponse = client.admin().indices().suspendUntil { putMapping(putMappingRequest, it) } if (updateResponse.isAcknowledged) { @@ -374,22 +454,30 @@ class AlertIndices( private fun rolloverAlertHistoryIndex() { rolloverIndex( - alertHistoryIndexInitialized, ALERT_HISTORY_WRITE_INDEX, - ALERT_HISTORY_INDEX_PATTERN, alertMapping(), - alertHistoryMaxDocs, alertHistoryMaxAge, ALERT_HISTORY_WRITE_INDEX + alertHistoryIndexInitialized, + ALERT_HISTORY_WRITE_INDEX, + ALERT_HISTORY_INDEX_PATTERN, + alertMapping(), + alertHistoryMaxDocs, + alertHistoryMaxAge, + ALERT_HISTORY_WRITE_INDEX ) } private fun rolloverFindingHistoryIndex() { rolloverIndex( - findingHistoryIndexInitialized, FINDING_HISTORY_WRITE_INDEX, - FINDING_HISTORY_INDEX_PATTERN, findingMapping(), - findingHistoryMaxDocs, findingHistoryMaxAge, FINDING_HISTORY_WRITE_INDEX + findingHistoryIndexInitialized, + FINDING_HISTORY_WRITE_INDEX, + FINDING_HISTORY_INDEX_PATTERN, + findingMapping(), + findingHistoryMaxDocs, + findingHistoryMaxAge, + FINDING_HISTORY_WRITE_INDEX ) } private fun deleteOldIndices(tag: String, indices: String) { - logger.error("info deleteOldIndices") + logger.info("info deleteOldIndices") val clusterStateRequest = ClusterStateRequest() .clear() .indices(indices) @@ -400,7 +488,7 @@ class AlertIndices( clusterStateRequest, object : ActionListener { override fun onResponse(clusterStateResponse: ClusterStateResponse) { - if (!clusterStateResponse.state.metadata.indices.isEmpty) { + if (clusterStateResponse.state.metadata.indices.isNotEmpty()) { val indicesToDelete = getIndicesToDelete(clusterStateResponse) logger.info("Deleting old $tag indices viz $indicesToDelete") deleteAllOldHistoryIndices(indicesToDelete) @@ -435,7 +523,7 @@ class AlertIndices( ): String? { val creationTime = indexMetadata.creationDate if ((Instant.now().toEpochMilli() - creationTime) > retentionPeriodMillis) { - val alias = indexMetadata.aliases.firstOrNull { writeIndex == it.value.alias } + val alias = indexMetadata.aliases.entries.firstOrNull { writeIndex == it.value.alias } if (alias != null) { if (historyEnabled) { // If the index has the write alias and history is enabled, don't delete the index diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt index a9c704958..07b1a3a91 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/alerts/AlertMover.kt @@ -5,111 +5,246 @@ package org.opensearch.alerting.alerts +import org.apache.logging.log4j.LogManager import org.opensearch.action.bulk.BulkRequest import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse import org.opensearch.action.index.IndexRequest import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.MonitorRunnerExecutionContext import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_HISTORY_WRITE_INDEX import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.ScheduledJobUtils import org.opensearch.client.Client -import org.opensearch.common.bytes.BytesReference import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import org.opensearch.index.VersionType import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder -/** - * Moves defunct active alerts to the alert history index when the corresponding monitor or trigger is deleted. - * - * The logic for moving alerts consists of: - * 1. Find active alerts: - * a. matching monitorId if no monitor is provided (postDelete) - * b. matching monitorId and no triggerIds if monitor is provided (postIndex) - * 2. Move alerts over to [ALERT_HISTORY_WRITE_INDEX] as DELETED - * 3. Delete alerts from [ALERT_INDEX] - * 4. Schedule a retry if there were any failures - */ -suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor? = null) { - val boolQuery = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) +private val log = LogManager.getLogger(AlertMover::class.java) - if (monitor != null) { - boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, monitor.triggers.map { it.id })) - } +class AlertMover { + companion object { + /** + * Moves defunct active alerts to the alert history index when the corresponding monitor or trigger is deleted. + * + * The logic for moving alerts consists of: + * 1. Find active alerts: + * a. matching monitorId if no monitor is provided (postDelete) + * b. matching monitorId and no triggerIds if monitor is provided (postIndex) + * 2. Move alerts over to DataSources.alertsHistoryIndex as DELETED + * 3. Delete alerts from monitor's DataSources.alertsIndex + * 4. Schedule a retry if there were any failures + */ + suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor?) { + var alertIndex = monitor?.dataSources?.alertsIndex ?: ALERT_INDEX + var alertHistoryIndex = monitor?.dataSources?.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX + + val boolQuery = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + + if (monitor != null) { + boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, monitor.triggers.map { it.id })) + } + + val activeAlertsQuery = SearchSourceBuilder.searchSource() + .query(boolQuery) + .version(true) + + val activeAlertsRequest = SearchRequest(alertIndex) + .routing(monitorId) + .source(activeAlertsQuery) + val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } - val activeAlertsQuery = SearchSourceBuilder.searchSource() - .query(boolQuery) - .version(true) - - val activeAlertsRequest = SearchRequest(AlertIndices.ALERT_INDEX) - .routing(monitorId) - .source(activeAlertsQuery) - val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } - - // If no alerts are found, simply return - if (response.hits.totalHits?.value == 0L) return - val indexRequests = response.hits.map { hit -> - IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - .routing(monitorId) - .source( - Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) - .copy(state = Alert.State.DELETED) - .toXContentWithUser(XContentFactory.jsonBuilder()) + // If no alerts are found, simply return + if (response.hits.totalHits?.value == 0L) return + val indexRequests = response.hits.map { hit -> + IndexRequest(alertHistoryIndex) + .routing(monitorId) + .source( + Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) + .copy(state = Alert.State.DELETED) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + .version(hit.version) + .versionType(VersionType.EXTERNAL_GTE) + .id(hit.id) + } + val copyRequest = BulkRequest().add(indexRequests) + val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } + + val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { + DeleteRequest(alertIndex, it.id) + .routing(monitorId) + .version(it.version) + .versionType(VersionType.EXTERNAL_GTE) + } + val deleteResponse: BulkResponse = client.suspendUntil { bulk(BulkRequest().add(deleteRequests), it) } + + if (copyResponse.hasFailures()) { + val retryCause = copyResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to copy alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + + copyResponse.buildFailureMessage(), + retryCause + ) + } + if (deleteResponse.hasFailures()) { + val retryCause = deleteResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to delete alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + + deleteResponse.buildFailureMessage(), + retryCause + ) + } + } + + private fun alertContentParser(bytesReference: BytesReference): XContentParser { + val xcp = XContentHelper.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, + bytesReference, XContentType.JSON ) - .version(hit.version) - .versionType(VersionType.EXTERNAL_GTE) - .id(hit.id) - } - val copyRequest = BulkRequest().add(indexRequests) - val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } - - val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { - DeleteRequest(AlertIndices.ALERT_INDEX, it.id) - .routing(monitorId) - .version(it.version) - .versionType(VersionType.EXTERNAL_GTE) - } - val deleteResponse: BulkResponse = client.suspendUntil { bulk(BulkRequest().add(deleteRequests), it) } - - if (copyResponse.hasFailures()) { - val retryCause = copyResponse.items.filter { it.isFailed } - .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } - ?.failure?.cause - throw RuntimeException( - "Failed to copy alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + - copyResponse.buildFailureMessage(), - retryCause - ) - } - if (deleteResponse.hasFailures()) { - val retryCause = deleteResponse.items.filter { it.isFailed } - .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } - ?.failure?.cause - throw RuntimeException( - "Failed to delete alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + - deleteResponse.buildFailureMessage(), - retryCause - ) - } -} + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + return xcp + } + + /** + * Moves defunct active alerts to the alert history index when the corresponding workflow or trigger is deleted. + * + * The logic for moving alerts consists of: + * 1. Find active alerts: + * a. matching workflowId if no workflow is provided (postDelete) + * b. matching workflowid and chained alert triggerIds if monitor is provided (postIndex) + * 2. Move alerts over to DataSources.alertsHistoryIndex as DELETED + * 3. Delete alerts from monitor's DataSources.alertsIndex + * 4. Schedule a retry if there were any failures + */ + suspend fun moveAlerts(client: Client, workflowId: String, workflow: Workflow?, monitorCtx: MonitorRunnerExecutionContext) { + var alertIndex = ALERT_INDEX + var alertHistoryIndex = ALERT_HISTORY_WRITE_INDEX + if (workflow != null) { + if ( + workflow.inputs.isNotEmpty() && workflow.inputs[0] is CompositeInput && + (workflow.inputs[0] as CompositeInput).sequence.delegates.isNotEmpty() + ) { + var i = 0 + val delegates = (workflow.inputs[0] as CompositeInput).sequence.delegates + try { + var getResponse: GetResponse? = null + while (i < delegates.size && (getResponse == null || getResponse.isExists == false)) { + getResponse = + client.suspendUntil { + client.get( + GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, delegates[i].monitorId), + it + ) + } + if (getResponse!!.isExists) { + val monitor = + ScheduledJobUtils.parseMonitorFromScheduledJobDocSource( + monitorCtx.xContentRegistry!!, + response = getResponse + ) + + alertIndex = monitor.dataSources.alertsIndex + alertHistoryIndex = + if (monitor.dataSources.alertsHistoryIndex == null) alertHistoryIndex + else monitor.dataSources.alertsHistoryIndex!! + } + i++ + } + } catch (e: Exception) { + log.error("Failed to get delegate monitor for workflow $workflowId. Assuming default alert indices", e) + } + } + } + val dataSources = DataSources().copy(alertsHistoryIndex = alertHistoryIndex, alertsIndex = alertIndex) + /** check if alert index is initialized **/ + if (monitorCtx.alertIndices!!.isAlertInitialized(dataSources) == false) + return + val boolQuery = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowId)) + + if (workflow != null) { + boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, workflow.triggers.map { it.id })) + } -private fun alertContentParser(bytesReference: BytesReference): XContentParser { - val xcp = XContentHelper.createParser( - NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, - bytesReference, XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - return xcp + val activeAlertsQuery = SearchSourceBuilder.searchSource() + .query(boolQuery) + .version(true) + + val activeAlertsRequest = SearchRequest(alertIndex) + .routing(workflowId) + .source(activeAlertsQuery) + val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } + + // If no alerts are found, simply return + if (response.hits.totalHits?.value == 0L) return + val indexRequests = response.hits.map { hit -> + IndexRequest(alertHistoryIndex) + .routing(workflowId) + .source( + Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) + .copy(state = Alert.State.DELETED) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + .version(hit.version) + .versionType(VersionType.EXTERNAL_GTE) + .id(hit.id) + } + val copyRequest = BulkRequest().add(indexRequests) + val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } + + val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { + DeleteRequest(alertIndex, it.id) + .routing(workflowId) + .version(it.version) + .versionType(VersionType.EXTERNAL_GTE) + } + val deleteResponse: BulkResponse = client.suspendUntil { bulk(BulkRequest().add(deleteRequests), it) } + + if (copyResponse.hasFailures()) { + val retryCause = copyResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to copy alerts for [$workflowId, ${workflow?.triggers?.map { it.id }}]: " + + copyResponse.buildFailureMessage(), + retryCause + ) + } + if (deleteResponse.hasFailures()) { + val retryCause = deleteResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to delete alerts for [$workflowId, ${workflow?.triggers?.map { it.id }}]: " + + deleteResponse.buildFailureMessage(), + retryCause + ) + } + } + } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt new file mode 100644 index 000000000..999b9a977 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt @@ -0,0 +1,53 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.parsers + +import org.opensearch.alerting.chainedAlertCondition.resolvers.ChainedAlertRPNResolver +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator + +/** + * The postfix (Reverse Polish Notation) parser. + * Uses the Shunting-yard algorithm to parse a mathematical expression + * @param triggerExpression String containing the trigger expression for the monitor + */ +class ChainedAlertExpressionParser( + triggerExpression: String +) : ChainedAlertExpressionRPNBaseParser(triggerExpression) { + + override fun parse(): ChainedAlertRPNResolver { + val expression = expressionToParse.replace(" ", "") + + val splitters = ArrayList() + CAExpressionOperator.values().forEach { splitters.add(it.value) } + + val breaks = ArrayList().apply { add(expression) } + for (s in splitters) { + val a = ArrayList() + for (ind in 0 until breaks.size) { + breaks[ind].let { + if (it.length > 1) { + a.addAll(breakString(breaks[ind], s)) + } else a.add(it) + } + } + breaks.clear() + breaks.addAll(a) + } + + return ChainedAlertRPNResolver(convertInfixToPostfix(breaks)) + } + + private fun breakString(input: String, delimeter: String): ArrayList { + val tokens = input.split(delimeter) + val array = ArrayList() + for (t in tokens) { + array.add(t) + array.add(delimeter) + } + array.removeAt(array.size - 1) + return array + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt new file mode 100644 index 000000000..ff3c29db7 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt @@ -0,0 +1,114 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.parsers + +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionToken +import org.opensearch.alerting.chainedAlertCondition.tokens.ChainedAlertExpressionConstant +import org.opensearch.alerting.chainedAlertCondition.tokens.ExpressionToken +import java.util.Stack + +/** + * This is the abstract base class which holds the trigger expression parsing logic; + * using the Infix to Postfix a.k.a. Reverse Polish Notation (RPN) parser. + * It also uses the Shunting-Yard algorithm to parse the given trigger expression. + * + * @param expressionToParse Complete string containing the trigger expression + */ +abstract class ChainedAlertExpressionRPNBaseParser( + protected val expressionToParse: String +) : ExpressionParser { + /** + * To perform the Infix-to-postfix conversion of the trigger expression + */ + protected fun convertInfixToPostfix(expTokens: List): ArrayList { + val expTokenStack = Stack() + val outputExpTokens = ArrayList() + + for (tokenString in expTokens) { + if (tokenString.isEmpty()) continue + when (val expToken = assignToken(tokenString)) { + is CAExpressionToken -> outputExpTokens.add(expToken) + is CAExpressionOperator -> { + when (expToken) { + CAExpressionOperator.PAR_LEFT -> expTokenStack.push(expToken) + CAExpressionOperator.PAR_RIGHT -> { + var topExpToken = expTokenStack.popExpTokenOrNull() + while (topExpToken != null && topExpToken != CAExpressionOperator.PAR_LEFT) { + outputExpTokens.add(topExpToken) + topExpToken = expTokenStack.popExpTokenOrNull() + } + if (topExpToken != CAExpressionOperator.PAR_LEFT) + throw java.lang.IllegalArgumentException("No matching left parenthesis.") + } + else -> { + var op2 = expTokenStack.peekExpTokenOrNull() + while (op2 != null) { + val c = expToken.precedence.compareTo(op2.precedence) + if (c < 0 || !expToken.rightAssociative && c <= 0) { + outputExpTokens.add(expTokenStack.pop()) + } else { + break + } + op2 = expTokenStack.peekExpTokenOrNull() + } + expTokenStack.push(expToken) + } + } + } + } + } + + while (!expTokenStack.isEmpty()) { + expTokenStack.peekExpTokenOrNull()?.let { + if (it == CAExpressionOperator.PAR_LEFT) + throw java.lang.IllegalArgumentException("No matching right parenthesis.") + } + val top = expTokenStack.pop() + outputExpTokens.add(top) + } + + return outputExpTokens + } + + /** + * Looks up and maps the expression token that matches the string version of that expression unit + */ + private fun assignToken(tokenString: String): ExpressionToken { + + // Check "query" string in trigger expression such as in 'query[name="abc"]' + if (tokenString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) + return CAExpressionToken(tokenString) + + // Check operators in trigger expression such as in [&&, ||, !] + for (op in CAExpressionOperator.values()) { + if (op.value == tokenString) return op + } + + // Check any constants in trigger expression such as in ["name, "id", "tag", [", "]", "="] + for (con in ChainedAlertExpressionConstant.ConstantType.values()) { + if (tokenString == con.ident) return ChainedAlertExpressionConstant(con) + } + + throw IllegalArgumentException("Error while processing the trigger expression '$tokenString'") + } + + private inline fun Stack.popExpTokenOrNull(): T? { + return try { + pop() as T + } catch (e: java.lang.Exception) { + null + } + } + + private inline fun Stack.peekExpTokenOrNull(): T? { + return try { + peek() as T + } catch (e: java.lang.Exception) { + null + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt new file mode 100644 index 000000000..e2ece9d40 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.parsers + +import org.opensearch.alerting.chainedAlertCondition.resolvers.ChainedAlertTriggerResolver + +interface ExpressionParser { + fun parse(): ChainedAlertTriggerResolver +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt new file mode 100644 index 000000000..dfec9614f --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt @@ -0,0 +1,110 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.resolvers + +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionToken +import org.opensearch.alerting.chainedAlertCondition.tokens.ChainedAlertExpressionConstant +import org.opensearch.alerting.chainedAlertCondition.tokens.ExpressionToken +import java.util.Stack + +/** + * Solves the Trigger Expression using the Reverse Polish Notation (RPN) based solver + * @param polishNotation an array of expression tokens organized in the RPN order + */ +class ChainedAlertRPNResolver( + private val polishNotation: ArrayList, +) : ChainedAlertTriggerResolver { + + private val eqString by lazy { + val stringBuilder = StringBuilder() + for (expToken in polishNotation) { + when (expToken) { + is CAExpressionToken -> stringBuilder.append(expToken.value) + is CAExpressionOperator -> stringBuilder.append(expToken.value) + is ChainedAlertExpressionConstant -> stringBuilder.append(expToken.type.ident) + else -> throw Exception() + } + stringBuilder.append(" ") + } + stringBuilder.toString() + } + + override fun toString(): String = eqString + + /** + * Evaluates the trigger expression expressed provided in form of the RPN token array. + * @param queryToDocIds Map to hold the resultant document id per query id + * @return evaluates the final set of document id + */ + override fun evaluate(alertGeneratingMonitors: Set): Boolean { + val tokenStack = Stack() + val res = true + for (expToken in polishNotation) { + when (expToken) { + is CAExpressionToken -> tokenStack.push(resolveMonitorExpression(expToken.value, alertGeneratingMonitors)) + is CAExpressionOperator -> { + val right = tokenStack.pop() + val expr = when (expToken) { + CAExpressionOperator.AND -> ChainedAlertTriggerExpression.And(tokenStack.pop(), right) + CAExpressionOperator.OR -> ChainedAlertTriggerExpression.Or(tokenStack.pop(), right) + CAExpressionOperator.NOT -> ChainedAlertTriggerExpression.Not(res, right) + else -> throw IllegalArgumentException("No matching operator.") + } + tokenStack.push(expr.resolve()) + } + } + } + return tokenStack.pop() + } + + override fun getMonitorIds(parsedTriggerCondition: ChainedAlertRPNResolver): Set { + val monitorIds = mutableSetOf() + for (expToken in polishNotation) { + when (expToken) { + is CAExpressionToken -> { + val monitorExpString = expToken.value + if (!monitorExpString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) + continue + val token = monitorExpString.substringAfter(ChainedAlertExpressionConstant.ConstantType.BRACKET_LEFT.ident) + .substringBefore(ChainedAlertExpressionConstant.ConstantType.BRACKET_RIGHT.ident) + if (token.isEmpty()) continue + val tokens = token.split(ChainedAlertExpressionConstant.ConstantType.EQUALS.ident) + if (tokens.isEmpty() || tokens.size != 2) continue + val identifier = tokens[0] + val value = tokens[1] + when (identifier) { + ChainedAlertExpressionConstant.ConstantType.ID.ident -> { + monitorIds.add(value) + } + } + } + is CAExpressionOperator -> { + continue + } + } + } + return monitorIds + } + + private fun resolveMonitorExpression(monitorExpString: String, alertGeneratingMonitors: Set): Boolean { + if (!monitorExpString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) return false + val token = monitorExpString.substringAfter(ChainedAlertExpressionConstant.ConstantType.BRACKET_LEFT.ident) + .substringBefore(ChainedAlertExpressionConstant.ConstantType.BRACKET_RIGHT.ident) + if (token.isEmpty()) return false + + val tokens = token.split(ChainedAlertExpressionConstant.ConstantType.EQUALS.ident) + if (tokens.isEmpty() || tokens.size != 2) return false + + val identifier = tokens[0] + val value = tokens[1] + + return when (identifier) { + ChainedAlertExpressionConstant.ConstantType.ID.ident -> alertGeneratingMonitors.contains(value) + else -> false + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt new file mode 100644 index 000000000..4b373d853 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt @@ -0,0 +1,32 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.resolvers + +sealed class ChainedAlertTriggerExpression { + + fun resolve(): Boolean = when (this) { + is And -> resolveAnd(boolean1, boolean2) + is Or -> resolveOr(boolean1, boolean2) + is Not -> resolveNot(result, boolean2) + } + + private fun resolveAnd(boolean1: Boolean, boolean2: Boolean): Boolean { + return boolean1 && boolean2 + } + + private fun resolveOr(boolean1: Boolean, boolean2: Boolean): Boolean { + return boolean1 || boolean2 + } + + private fun resolveNot(result: Boolean, boolean2: Boolean): Boolean { + return result && !boolean2 + } + + // Operators implemented as operator functions + class And(val boolean1: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() + class Or(val boolean1: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() + class Not(val result: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt new file mode 100644 index 000000000..6f2ff2de0 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.resolvers + +interface ChainedAlertTriggerResolver { + fun getMonitorIds(parsedTriggerCondition: ChainedAlertRPNResolver): Set + fun evaluate(alertGeneratingMonitors: Set): Boolean +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt new file mode 100644 index 000000000..084b6aa70 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +/** + * To define all the operators used in the trigger expression + */ +enum class CAExpressionOperator(val value: String, val precedence: Int, val rightAssociative: Boolean) : ExpressionToken { + + AND("&&", 2, false), + OR("||", 2, false), + + NOT("!", 3, true), + + PAR_LEFT("(", 1, false), + PAR_RIGHT(")", 1, false) +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt new file mode 100644 index 000000000..ddf439d3f --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +/** + * To define the tokens in Trigger expression such as monitor[id=“id1"] or monitor[id=“id2"] and monitor[id=“id3"] + */ +internal data class CAExpressionToken(val value: String) : ExpressionToken diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt new file mode 100644 index 000000000..4b35bc4a8 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt @@ -0,0 +1,24 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +/** + * To define all the tokens which could be part of expression constant such as query[id=new_id], query[name=new_name], + * query[tag=new_tag] + */ +class ChainedAlertExpressionConstant(val type: ConstantType) : ExpressionToken { + + enum class ConstantType(val ident: String) { + MONITOR("monitor"), + + ID("id"), + + BRACKET_LEFT("["), + BRACKET_RIGHT("]"), + + EQUALS("=") + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt new file mode 100644 index 000000000..38efed313 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt @@ -0,0 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +interface ExpressionToken diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/ActionExecutionResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/ActionExecutionResult.kt deleted file mode 100644 index ecdbd8ea4..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/ActionExecutionResult.kt +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.opensearchapi.instant -import org.opensearch.alerting.opensearchapi.optionalTimeField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils -import java.io.IOException -import java.time.Instant - -/** - * When an alert triggered, the trigger's actions will be executed. - * Action execution result records action throttle result and is a part of Alert. - */ -data class ActionExecutionResult( - val actionId: String, - val lastExecutionTime: Instant?, - val throttledCount: Int = 0 -) : Writeable, ToXContentObject { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // actionId - sin.readOptionalInstant(), // lastExecutionTime - sin.readInt() // throttledCount - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field(ACTION_ID_FIELD, actionId) - .optionalTimeField(LAST_EXECUTION_TIME_FIELD, lastExecutionTime) - .field(THROTTLED_COUNT_FIELD, throttledCount) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(actionId) - out.writeOptionalInstant(lastExecutionTime) - out.writeInt(throttledCount) - } - - companion object { - const val ACTION_ID_FIELD = "action_id" - const val LAST_EXECUTION_TIME_FIELD = "last_execution_time" - const val THROTTLED_COUNT_FIELD = "throttled_count" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): ActionExecutionResult { - lateinit var actionId: String - var throttledCount: Int = 0 - var lastExecutionTime: Instant? = null - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - ACTION_ID_FIELD -> actionId = xcp.text() - THROTTLED_COUNT_FIELD -> throttledCount = xcp.intValue() - LAST_EXECUTION_TIME_FIELD -> lastExecutionTime = xcp.instant() - - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing action") - } - } - } - - requireNotNull(actionId) { "Must set action id" } - return ActionExecutionResult(actionId, lastExecutionTime, throttledCount) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): ActionExecutionResult { - return ActionExecutionResult(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/AggregationResultBucket.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/AggregationResultBucket.kt deleted file mode 100644 index 9ba7f0291..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/AggregationResultBucket.kt +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.common.ParsingException -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.util.Locale - -data class AggregationResultBucket( - val parentBucketPath: String?, - val bucketKeys: List, - val bucket: Map? // TODO: Should reduce contents to only top-level to not include sub-aggs here -) : Writeable, ToXContentObject { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this(sin.readString(), sin.readStringList(), sin.readMap()) - - override fun writeTo(out: StreamOutput) { - out.writeString(parentBucketPath) - out.writeStringCollection(bucketKeys) - out.writeMap(bucket) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - innerXContent(builder) - return builder.endObject() - } - - fun innerXContent(builder: XContentBuilder): XContentBuilder { - builder.startObject(CONFIG_NAME) - .field(PARENTS_BUCKET_PATH, parentBucketPath) - .field(BUCKET_KEYS, bucketKeys.toTypedArray()) - .field(BUCKET, bucket) - .endObject() - return builder - } - - companion object { - const val CONFIG_NAME = "agg_alert_content" - const val PARENTS_BUCKET_PATH = "parent_bucket_path" - const val BUCKET_KEYS = "bucket_keys" - private const val BUCKET = "bucket" - - fun parse(xcp: XContentParser): AggregationResultBucket { - var parentBucketPath: String? = null - var bucketKeys = mutableListOf() - var bucket: MutableMap? = null - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - - if (CONFIG_NAME != xcp.currentName()) { - throw ParsingException( - xcp.tokenLocation, - String.format( - Locale.ROOT, "Failed to parse object: expecting token with name [%s] but found [%s]", - CONFIG_NAME, xcp.currentName() - ) - ) - } - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - PARENTS_BUCKET_PATH -> parentBucketPath = xcp.text() - BUCKET_KEYS -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - bucketKeys.add(xcp.text()) - } - } - BUCKET -> bucket = xcp.map() - } - } - return AggregationResultBucket(parentBucketPath, bucketKeys, bucket) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Alert.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Alert.kt deleted file mode 100644 index 51e617f44..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Alert.kt +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.alerts.AlertError -import org.opensearch.alerting.opensearchapi.instant -import org.opensearch.alerting.opensearchapi.optionalTimeField -import org.opensearch.alerting.opensearchapi.optionalUserField -import org.opensearch.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.lucene.uid.Versions -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.commons.authuser.User -import java.io.IOException -import java.time.Instant - -data class Alert( - val id: String = NO_ID, - val version: Long = NO_VERSION, - val schemaVersion: Int = NO_SCHEMA_VERSION, - val monitorId: String, - val monitorName: String, - val monitorVersion: Long, - val monitorUser: User?, - val triggerId: String, - val triggerName: String, - val findingIds: List, - val relatedDocIds: List, - val state: State, - val startTime: Instant, - val endTime: Instant? = null, - val lastNotificationTime: Instant? = null, - val acknowledgedTime: Instant? = null, - val errorMessage: String? = null, - val errorHistory: List, - val severity: String, - val actionExecutionResults: List, - val aggregationResultBucket: AggregationResultBucket? = null -) : Writeable, ToXContent { - - init { - if (errorMessage != null) require(state == State.DELETED || state == State.ERROR) { - "Attempt to create an alert with an error in state: $state" - } - } - - constructor( - monitor: Monitor, - trigger: QueryLevelTrigger, - startTime: Instant, - lastNotificationTime: Instant?, - state: State = State.ACTIVE, - errorMessage: String? = null, - errorHistory: List = mutableListOf(), - actionExecutionResults: List = mutableListOf(), - schemaVersion: Int = NO_SCHEMA_VERSION - ) : this( - monitorId = monitor.id, monitorName = monitor.name, monitorVersion = monitor.version, monitorUser = monitor.user, - triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, - lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, - severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, - aggregationResultBucket = null, findingIds = emptyList(), relatedDocIds = emptyList() - ) - - constructor( - monitor: Monitor, - trigger: BucketLevelTrigger, - startTime: Instant, - lastNotificationTime: Instant?, - state: State = State.ACTIVE, - errorMessage: String? = null, - errorHistory: List = mutableListOf(), - actionExecutionResults: List = mutableListOf(), - schemaVersion: Int = NO_SCHEMA_VERSION - ) : this( - monitorId = monitor.id, monitorName = monitor.name, monitorVersion = monitor.version, monitorUser = monitor.user, - triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, - lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, - severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, - aggregationResultBucket = null, findingIds = emptyList(), relatedDocIds = emptyList() - ) - - constructor( - monitor: Monitor, - trigger: BucketLevelTrigger, - startTime: Instant, - lastNotificationTime: Instant?, - state: State = State.ACTIVE, - errorMessage: String? = null, - errorHistory: List = mutableListOf(), - actionExecutionResults: List = mutableListOf(), - schemaVersion: Int = NO_SCHEMA_VERSION, - aggregationResultBucket: AggregationResultBucket - ) : this( - monitorId = monitor.id, monitorName = monitor.name, monitorVersion = monitor.version, monitorUser = monitor.user, - triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, - lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, - severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, - aggregationResultBucket = aggregationResultBucket, findingIds = emptyList(), relatedDocIds = emptyList() - ) - - constructor( - id: String = NO_ID, - monitor: Monitor, - trigger: DocumentLevelTrigger, - findingIds: List, - relatedDocIds: List, - startTime: Instant, - lastNotificationTime: Instant?, - state: State = State.ACTIVE, - errorMessage: String? = null, - errorHistory: List = mutableListOf(), - actionExecutionResults: List = mutableListOf(), - schemaVersion: Int = NO_SCHEMA_VERSION - ) : this( - id = id, monitorId = monitor.id, monitorName = monitor.name, monitorVersion = monitor.version, monitorUser = monitor.user, - triggerId = trigger.id, triggerName = trigger.name, state = state, startTime = startTime, - lastNotificationTime = lastNotificationTime, errorMessage = errorMessage, errorHistory = errorHistory, - severity = trigger.severity, actionExecutionResults = actionExecutionResults, schemaVersion = schemaVersion, - aggregationResultBucket = null, findingIds = findingIds, relatedDocIds = relatedDocIds - ) - - enum class State { - ACTIVE, ACKNOWLEDGED, COMPLETED, ERROR, DELETED - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - id = sin.readString(), - version = sin.readLong(), - schemaVersion = sin.readInt(), - monitorId = sin.readString(), - monitorName = sin.readString(), - monitorVersion = sin.readLong(), - monitorUser = if (sin.readBoolean()) { - User(sin) - } else null, - triggerId = sin.readString(), - triggerName = sin.readString(), - findingIds = sin.readStringList(), - relatedDocIds = sin.readStringList(), - state = sin.readEnum(State::class.java), - startTime = sin.readInstant(), - endTime = sin.readOptionalInstant(), - lastNotificationTime = sin.readOptionalInstant(), - acknowledgedTime = sin.readOptionalInstant(), - errorMessage = sin.readOptionalString(), - errorHistory = sin.readList(::AlertError), - severity = sin.readString(), - actionExecutionResults = sin.readList(::ActionExecutionResult), - aggregationResultBucket = if (sin.readBoolean()) AggregationResultBucket(sin) else null - ) - - fun isAcknowledged(): Boolean = (state == State.ACKNOWLEDGED) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeInt(schemaVersion) - out.writeString(monitorId) - out.writeString(monitorName) - out.writeLong(monitorVersion) - out.writeBoolean(monitorUser != null) - monitorUser?.writeTo(out) - out.writeString(triggerId) - out.writeString(triggerName) - out.writeStringCollection(findingIds) - out.writeStringCollection(relatedDocIds) - out.writeEnum(state) - out.writeInstant(startTime) - out.writeOptionalInstant(endTime) - out.writeOptionalInstant(lastNotificationTime) - out.writeOptionalInstant(acknowledgedTime) - out.writeOptionalString(errorMessage) - out.writeCollection(errorHistory) - out.writeString(severity) - out.writeCollection(actionExecutionResults) - if (aggregationResultBucket != null) { - out.writeBoolean(true) - aggregationResultBucket.writeTo(out) - } else { - out.writeBoolean(false) - } - } - - companion object { - - const val ALERT_ID_FIELD = "id" - const val SCHEMA_VERSION_FIELD = "schema_version" - const val ALERT_VERSION_FIELD = "version" - const val MONITOR_ID_FIELD = "monitor_id" - const val MONITOR_VERSION_FIELD = "monitor_version" - const val MONITOR_NAME_FIELD = "monitor_name" - const val MONITOR_USER_FIELD = "monitor_user" - const val TRIGGER_ID_FIELD = "trigger_id" - const val TRIGGER_NAME_FIELD = "trigger_name" - const val FINDING_IDS = "finding_ids" - const val RELATED_DOC_IDS = "related_doc_ids" - const val STATE_FIELD = "state" - const val START_TIME_FIELD = "start_time" - const val LAST_NOTIFICATION_TIME_FIELD = "last_notification_time" - const val END_TIME_FIELD = "end_time" - const val ACKNOWLEDGED_TIME_FIELD = "acknowledged_time" - const val ERROR_MESSAGE_FIELD = "error_message" - const val ALERT_HISTORY_FIELD = "alert_history" - const val SEVERITY_FIELD = "severity" - const val ACTION_EXECUTION_RESULTS_FIELD = "action_execution_results" - const val BUCKET_KEYS = AggregationResultBucket.BUCKET_KEYS - const val PARENTS_BUCKET_PATH = AggregationResultBucket.PARENTS_BUCKET_PATH - const val NO_ID = "" - const val NO_VERSION = Versions.NOT_FOUND - - @JvmStatic @JvmOverloads - @Throws(IOException::class) - fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): Alert { - - lateinit var monitorId: String - var schemaVersion = NO_SCHEMA_VERSION - lateinit var monitorName: String - var monitorVersion: Long = Versions.NOT_FOUND - var monitorUser: User? = null - lateinit var triggerId: String - lateinit var triggerName: String - val findingIds = mutableListOf() - val relatedDocIds = mutableListOf() - lateinit var state: State - lateinit var startTime: Instant - lateinit var severity: String - var endTime: Instant? = null - var lastNotificationTime: Instant? = null - var acknowledgedTime: Instant? = null - var errorMessage: String? = null - val errorHistory: MutableList = mutableListOf() - val actionExecutionResults: MutableList = mutableListOf() - var aggAlertBucket: AggregationResultBucket? = null - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - MONITOR_ID_FIELD -> monitorId = xcp.text() - SCHEMA_VERSION_FIELD -> schemaVersion = xcp.intValue() - MONITOR_NAME_FIELD -> monitorName = xcp.text() - MONITOR_VERSION_FIELD -> monitorVersion = xcp.longValue() - MONITOR_USER_FIELD -> monitorUser = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) null else User.parse(xcp) - TRIGGER_ID_FIELD -> triggerId = xcp.text() - FINDING_IDS -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - findingIds.add(xcp.text()) - } - } - RELATED_DOC_IDS -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - relatedDocIds.add(xcp.text()) - } - } - STATE_FIELD -> state = State.valueOf(xcp.text()) - TRIGGER_NAME_FIELD -> triggerName = xcp.text() - START_TIME_FIELD -> startTime = requireNotNull(xcp.instant()) - END_TIME_FIELD -> endTime = xcp.instant() - LAST_NOTIFICATION_TIME_FIELD -> lastNotificationTime = xcp.instant() - ACKNOWLEDGED_TIME_FIELD -> acknowledgedTime = xcp.instant() - ERROR_MESSAGE_FIELD -> errorMessage = xcp.textOrNull() - ALERT_HISTORY_FIELD -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - errorHistory.add(AlertError.parse(xcp)) - } - } - SEVERITY_FIELD -> severity = xcp.text() - ACTION_EXECUTION_RESULTS_FIELD -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - actionExecutionResults.add(ActionExecutionResult.parse(xcp)) - } - } - AggregationResultBucket.CONFIG_NAME -> { - // If an Alert with aggAlertBucket contents is indexed into the alerts index first, then - // that field will be added to the mappings. - // In this case, that field will default to null when it isn't present for Alerts created by Query-Level Monitors - // (even though the toXContent doesn't output the field) so null is being accounted for here. - aggAlertBucket = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) { - null - } else { - AggregationResultBucket.parse(xcp) - } - } - } - } - - return Alert( - id = id, version = version, schemaVersion = schemaVersion, monitorId = requireNotNull(monitorId), - monitorName = requireNotNull(monitorName), monitorVersion = monitorVersion, monitorUser = monitorUser, - triggerId = requireNotNull(triggerId), triggerName = requireNotNull(triggerName), - state = requireNotNull(state), startTime = requireNotNull(startTime), endTime = endTime, - lastNotificationTime = lastNotificationTime, acknowledgedTime = acknowledgedTime, - errorMessage = errorMessage, errorHistory = errorHistory, severity = severity, - actionExecutionResults = actionExecutionResults, aggregationResultBucket = aggAlertBucket, findingIds = findingIds, - relatedDocIds = relatedDocIds - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Alert { - return Alert(sin) - } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return createXContentBuilder(builder, true) - } - - fun toXContentWithUser(builder: XContentBuilder): XContentBuilder { - return createXContentBuilder(builder, false) - } - private fun createXContentBuilder(builder: XContentBuilder, secure: Boolean): XContentBuilder { - builder.startObject() - .field(ALERT_ID_FIELD, id) - .field(ALERT_VERSION_FIELD, version) - .field(MONITOR_ID_FIELD, monitorId) - .field(SCHEMA_VERSION_FIELD, schemaVersion) - .field(MONITOR_VERSION_FIELD, monitorVersion) - .field(MONITOR_NAME_FIELD, monitorName) - - if (!secure) { - builder.optionalUserField(MONITOR_USER_FIELD, monitorUser) - } - - builder.field(TRIGGER_ID_FIELD, triggerId) - .field(TRIGGER_NAME_FIELD, triggerName) - .field(FINDING_IDS, findingIds.toTypedArray()) - .field(RELATED_DOC_IDS, relatedDocIds.toTypedArray()) - .field(STATE_FIELD, state) - .field(ERROR_MESSAGE_FIELD, errorMessage) - .field(ALERT_HISTORY_FIELD, errorHistory.toTypedArray()) - .field(SEVERITY_FIELD, severity) - .field(ACTION_EXECUTION_RESULTS_FIELD, actionExecutionResults.toTypedArray()) - .optionalTimeField(START_TIME_FIELD, startTime) - .optionalTimeField(LAST_NOTIFICATION_TIME_FIELD, lastNotificationTime) - .optionalTimeField(END_TIME_FIELD, endTime) - .optionalTimeField(ACKNOWLEDGED_TIME_FIELD, acknowledgedTime) - aggregationResultBucket?.innerXContent(builder) - builder.endObject() - return builder - } - - fun asTemplateArg(): Map { - return mapOf( - ACKNOWLEDGED_TIME_FIELD to acknowledgedTime?.toEpochMilli(), - ALERT_ID_FIELD to id, - ALERT_VERSION_FIELD to version, - END_TIME_FIELD to endTime?.toEpochMilli(), - ERROR_MESSAGE_FIELD to errorMessage, - LAST_NOTIFICATION_TIME_FIELD to lastNotificationTime?.toEpochMilli(), - SEVERITY_FIELD to severity, - START_TIME_FIELD to startTime.toEpochMilli(), - STATE_FIELD to state.toString(), - // Converting bucket keys to comma separated String to avoid manipulation in Action mustache templates - BUCKET_KEYS to aggregationResultBucket?.bucketKeys?.joinToString(","), - PARENTS_BUCKET_PATH to aggregationResultBucket?.parentBucketPath, - FINDING_IDS to findingIds.joinToString(","), - RELATED_DOC_IDS to relatedDocIds.joinToString(",") - ) - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt index d1d5411f6..2e2b24b19 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/AlertingConfigAccessor.kt @@ -9,19 +9,16 @@ import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.withContext import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.model.destination.email.EmailGroup import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.client.Client -import org.opensearch.common.bytes.BytesReference import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType -import org.opensearch.index.IndexNotFoundException +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.xcontent.NamedXContentRegistry /** * This is an accessor class to retrieve documents/information from the Alerting config index. @@ -29,28 +26,6 @@ import org.opensearch.index.IndexNotFoundException class AlertingConfigAccessor { companion object { - suspend fun getMonitorMetadata(client: Client, xContentRegistry: NamedXContentRegistry, metadataId: String): MonitorMetadata? { - return try { - val jobSource = getAlertingConfigDocumentSource(client, "Monitor Metadata", metadataId) - withContext(Dispatchers.IO) { - val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - jobSource, XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - MonitorMetadata.parse(xcp) - } - } catch (e: IllegalStateException) { - if (e.message?.equals("Monitor Metadata document with id $metadataId not found or source is empty") == true) { - return null - } else throw e - } catch (e: IndexNotFoundException) { - if (e.message?.equals("no such index [.opendistro-alerting-config]") == true) { - return null - } else throw e - } - } - suspend fun getEmailAccountInfo(client: Client, xContentRegistry: NamedXContentRegistry, emailAccountId: String): EmailAccount { val source = getAlertingConfigDocumentSource(client, "Email account", emailAccountId) return withContext(Dispatchers.IO) { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/BucketLevelTrigger.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/BucketLevelTrigger.kt deleted file mode 100644 index a77944a11..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/BucketLevelTrigger.kt +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.alerting.model.Trigger.Companion.ACTIONS_FIELD -import org.opensearch.alerting.model.Trigger.Companion.ID_FIELD -import org.opensearch.alerting.model.Trigger.Companion.NAME_FIELD -import org.opensearch.alerting.model.Trigger.Companion.SEVERITY_FIELD -import org.opensearch.alerting.model.action.Action -import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField -import org.opensearch.common.UUIDs -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -/** - * A multi-alert Trigger available with Bucket-Level Monitors that filters aggregation buckets via a pipeline - * aggregator. - */ -data class BucketLevelTrigger( - override val id: String = UUIDs.base64UUID(), - override val name: String, - override val severity: String, - val bucketSelector: BucketSelectorExtAggregationBuilder, - override val actions: List -) : Trigger { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readString(), // name - sin.readString(), // severity - BucketSelectorExtAggregationBuilder(sin), // condition - sin.readList(::Action) // actions - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(BUCKET_LEVEL_TRIGGER_FIELD) - .field(ID_FIELD, id) - .field(NAME_FIELD, name) - .field(SEVERITY_FIELD, severity) - .startObject(CONDITION_FIELD) - bucketSelector.internalXContent(builder, params) - builder.endObject() - .field(ACTIONS_FIELD, actions.toTypedArray()) - .endObject() - .endObject() - return builder - } - - override fun name(): String { - return BUCKET_LEVEL_TRIGGER_FIELD - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeString(name) - out.writeString(severity) - bucketSelector.writeTo(out) - out.writeCollection(actions) - } - - fun asTemplateArg(): Map { - return mapOf( - ID_FIELD to id, - NAME_FIELD to name, - SEVERITY_FIELD to severity, - ACTIONS_FIELD to actions.map { it.asTemplateArg() }, - PARENT_BUCKET_PATH to getParentBucketPath() - ) - } - - fun getParentBucketPath(): String { - return bucketSelector.parentBucketPath - } - - companion object { - const val BUCKET_LEVEL_TRIGGER_FIELD = "bucket_level_trigger" - const val CONDITION_FIELD = "condition" - const val PARENT_BUCKET_PATH = "parentBucketPath" - - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry( - Trigger::class.java, ParseField(BUCKET_LEVEL_TRIGGER_FIELD), - CheckedFunction { parseInner(it) } - ) - - @JvmStatic - @Throws(IOException::class) - fun parseInner(xcp: XContentParser): BucketLevelTrigger { - var id = UUIDs.base64UUID() // assign a default triggerId if one is not specified - lateinit var name: String - lateinit var severity: String - val actions: MutableList = mutableListOf() - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - lateinit var bucketSelector: BucketSelectorExtAggregationBuilder - - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - - xcp.nextToken() - when (fieldName) { - ID_FIELD -> id = xcp.text() - NAME_FIELD -> name = xcp.text() - SEVERITY_FIELD -> severity = xcp.text() - CONDITION_FIELD -> { - // Using the trigger id as the name in the bucket selector since it is validated for uniqueness within Monitors. - // The contents of the trigger definition are round-tripped through parse and toXContent during Monitor creation - // ensuring that the id is available here in the version of the Monitor object that will be executed, even if the - // user submitted a custom trigger id after the condition definition. - bucketSelector = BucketSelectorExtAggregationBuilder.parse(id, xcp) - } - ACTIONS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - actions.add(Action.parse(xcp)) - } - } - } - } - - return BucketLevelTrigger( - id = requireNotNull(id) { "Trigger id is null." }, - name = requireNotNull(name) { "Trigger name is null" }, - severity = requireNotNull(severity) { "Trigger severity is null" }, - bucketSelector = requireNotNull(bucketSelector) { "Trigger condition is null" }, - actions = requireNotNull(actions) { "Trigger actions are null" } - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): BucketLevelTrigger { - return BucketLevelTrigger(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt index 63131d835..ffc302d98 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt @@ -5,10 +5,11 @@ package org.opensearch.alerting.model -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException data class BucketLevelTriggerRunResult( diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt new file mode 100644 index 000000000..b95e533e9 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt @@ -0,0 +1,69 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.script.ScriptException +import java.io.IOException +import java.time.Instant + +data class ChainedAlertTriggerRunResult( + override var triggerName: String, + var triggered: Boolean, + override var error: Exception?, + var actionResults: MutableMap = mutableMapOf(), + val associatedAlertIds: Set, +) : TriggerRunResult(triggerName, error) { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + triggerName = sin.readString(), + error = sin.readException(), + triggered = sin.readBoolean(), + actionResults = sin.readMap() as MutableMap, + associatedAlertIds = sin.readStringList().toSet() + ) + + override fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") + } + for (actionResult in actionResults.values) { + if (actionResult.error != null) { + return AlertError(Instant.now(), "Failed running action:\n${actionResult.error.userErrorMessage()}") + } + } + return null + } + + override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) + return builder + .field("triggered", triggered) + .field("action_results", actionResults as Map) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeBoolean(triggered) + out.writeMap(actionResults as Map) + out.writeStringCollection(associatedAlertIds) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): TriggerRunResult { + return ChainedAlertTriggerRunResult(sin) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/ClusterMetricsTriggerRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/ClusterMetricsTriggerRunResult.kt new file mode 100644 index 000000000..a19de0637 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/ClusterMetricsTriggerRunResult.kt @@ -0,0 +1,110 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.script.ScriptException +import java.io.IOException +import java.time.Instant + +data class ClusterMetricsTriggerRunResult( + override var triggerName: String, + override var triggered: Boolean, + override var error: Exception?, + override var actionResults: MutableMap = mutableMapOf(), + var clusterTriggerResults: List = listOf() +) : QueryLevelTriggerRunResult( + triggerName = triggerName, + error = error, + triggered = triggered, + actionResults = actionResults +) { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + triggerName = sin.readString(), + error = sin.readException(), + triggered = sin.readBoolean(), + actionResults = sin.readMap() as MutableMap, + clusterTriggerResults = sin.readList((ClusterTriggerResult.Companion)::readFrom) + ) + + override fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") + } + for (actionResult in actionResults.values) { + if (actionResult.error != null) { + return AlertError(Instant.now(), "Failed running action:\n${actionResult.error.userErrorMessage()}") + } + } + return null + } + + override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) + builder + .field(TRIGGERED_FIELD, triggered) + .field(ACTION_RESULTS_FIELD, actionResults as Map) + .startArray(CLUSTER_RESULTS_FIELD) + clusterTriggerResults.forEach { it.toXContent(builder, params) } + return builder.endArray() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeBoolean(triggered) + out.writeMap(actionResults as Map) + clusterTriggerResults.forEach { it.writeTo(out) } + } + + companion object { + const val TRIGGERED_FIELD = "triggered" + const val ACTION_RESULTS_FIELD = "action_results" + const val CLUSTER_RESULTS_FIELD = "cluster_results" + } + + data class ClusterTriggerResult( + val cluster: String, + val triggered: Boolean, + ) : ToXContentObject, Writeable { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + cluster = sin.readString(), + triggered = sin.readBoolean() + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .startObject(cluster) + .field(TRIGGERED_FIELD, triggered) + .endObject() + .endObject() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(cluster) + out.writeBoolean(triggered) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): ClusterTriggerResult { + return ClusterTriggerResult(sin) + } + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt index a6acd027a..0caad1f4a 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentExecutionContext.kt @@ -5,7 +5,7 @@ package org.opensearch.alerting.model -import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocLevelQuery data class DocumentExecutionContext( val queries: List, diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTrigger.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTrigger.kt deleted file mode 100644 index 003d738fa..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTrigger.kt +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.model.Trigger.Companion.ACTIONS_FIELD -import org.opensearch.alerting.model.Trigger.Companion.ID_FIELD -import org.opensearch.alerting.model.Trigger.Companion.NAME_FIELD -import org.opensearch.alerting.model.Trigger.Companion.SEVERITY_FIELD -import org.opensearch.alerting.model.action.Action -import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField -import org.opensearch.common.UUIDs -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.script.Script -import java.io.IOException - -/** - * A single-alert Trigger that uses Painless scripts which execute on the response of the Monitor input query to define - * alerting conditions. - */ -data class DocumentLevelTrigger( - override val id: String = UUIDs.base64UUID(), - override val name: String, - override val severity: String, - override val actions: List, - val condition: Script -) : Trigger { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readString(), // name - sin.readString(), // severity - sin.readList(::Action), // actions - Script(sin) - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(DOCUMENT_LEVEL_TRIGGER_FIELD) - .field(ID_FIELD, id) - .field(NAME_FIELD, name) - .field(SEVERITY_FIELD, severity) - .startObject(CONDITION_FIELD) - .field(SCRIPT_FIELD, condition) - .endObject() - .field(ACTIONS_FIELD, actions.toTypedArray()) - .endObject() - .endObject() - return builder - } - - override fun name(): String { - return DOCUMENT_LEVEL_TRIGGER_FIELD - } - - /** Returns a representation of the trigger suitable for passing into painless and mustache scripts. */ - fun asTemplateArg(): Map { - return mapOf( - ID_FIELD to id, - NAME_FIELD to name, - SEVERITY_FIELD to severity, - ACTIONS_FIELD to actions.map { it.asTemplateArg() } - ) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeString(name) - out.writeString(severity) - out.writeCollection(actions) - condition.writeTo(out) - } - - companion object { - const val DOCUMENT_LEVEL_TRIGGER_FIELD = "document_level_trigger" - const val CONDITION_FIELD = "condition" - const val SCRIPT_FIELD = "script" - const val QUERY_IDS_FIELD = "query_ids" - - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry( - Trigger::class.java, ParseField(DOCUMENT_LEVEL_TRIGGER_FIELD), - CheckedFunction { parseInner(it) } - ) - - @JvmStatic @Throws(IOException::class) - fun parseInner(xcp: XContentParser): DocumentLevelTrigger { - var id = UUIDs.base64UUID() // assign a default triggerId if one is not specified - lateinit var name: String - lateinit var severity: String - lateinit var condition: Script - val queryIds: MutableList = mutableListOf() - val actions: MutableList = mutableListOf() - - if (xcp.currentToken() != Token.START_OBJECT && xcp.currentToken() != Token.FIELD_NAME) { - XContentParserUtils.throwUnknownToken(xcp.currentToken(), xcp.tokenLocation) - } - - // If the parser began on START_OBJECT, move to the next token so that the while loop enters on - // the fieldName (or END_OBJECT if it's empty). - if (xcp.currentToken() == Token.START_OBJECT) xcp.nextToken() - - while (xcp.currentToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - - xcp.nextToken() - when (fieldName) { - ID_FIELD -> id = xcp.text() - NAME_FIELD -> name = xcp.text() - SEVERITY_FIELD -> severity = xcp.text() - CONDITION_FIELD -> { - xcp.nextToken() - condition = Script.parse(xcp) - require(condition.lang == Script.DEFAULT_SCRIPT_LANG) { - "Invalid script language. Allowed languages are [${Script.DEFAULT_SCRIPT_LANG}]" - } - xcp.nextToken() - } - QUERY_IDS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - queryIds.add(xcp.text()) - } - } - ACTIONS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - actions.add(Action.parse(xcp)) - } - } - } - xcp.nextToken() - } - - return DocumentLevelTrigger( - name = requireNotNull(name) { "Trigger name is null" }, - severity = requireNotNull(severity) { "Trigger severity is null" }, - condition = requireNotNull(condition) { "Trigger condition is null" }, - actions = requireNotNull(actions) { "Trigger actions are null" }, - id = requireNotNull(id) { "Trigger id is null." } - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): DocumentLevelTrigger { - return DocumentLevelTrigger(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt index 4bf9a8317..9d98aab42 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt @@ -5,10 +5,10 @@ package org.opensearch.alerting.model -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.script.ScriptException import java.io.IOException diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Finding.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Finding.kt deleted file mode 100644 index 1e8a186ff..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Finding.kt +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.opensearchapi.instant -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.time.Instant - -/** - * A wrapper of the log event that enriches the event by also including information about the monitor it triggered. - */ -class Finding( - val id: String = NO_ID, - val relatedDocIds: List, - val monitorId: String, - val monitorName: String, - val index: String, - val docLevelQueries: List, - val timestamp: Instant -) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - id = sin.readString(), - relatedDocIds = sin.readStringList(), - monitorId = sin.readString(), - monitorName = sin.readString(), - index = sin.readString(), - docLevelQueries = sin.readList((DocLevelQuery)::readFrom), - timestamp = sin.readInstant() - ) - - fun asTemplateArg(): Map { - return mapOf( - FINDING_ID_FIELD to id, - RELATED_DOC_IDS_FIELD to relatedDocIds, - MONITOR_ID_FIELD to monitorId, - MONITOR_NAME_FIELD to monitorName, - INDEX_FIELD to index, - QUERIES_FIELD to docLevelQueries, - TIMESTAMP_FIELD to timestamp.toEpochMilli() - ) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(FINDING_ID_FIELD, id) - .field(RELATED_DOC_IDS_FIELD, relatedDocIds) - .field(MONITOR_ID_FIELD, monitorId) - .field(MONITOR_NAME_FIELD, monitorName) - .field(INDEX_FIELD, index) - .field(QUERIES_FIELD, docLevelQueries.toTypedArray()) - .field(TIMESTAMP_FIELD, timestamp.toEpochMilli()) - builder.endObject() - return builder - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeStringCollection(relatedDocIds) - out.writeString(monitorId) - out.writeString(monitorName) - out.writeString(index) - out.writeCollection(docLevelQueries) - out.writeInstant(timestamp) - } - - companion object { - const val FINDING_ID_FIELD = "id" - const val RELATED_DOC_IDS_FIELD = "related_doc_ids" - const val MONITOR_ID_FIELD = "monitor_id" - const val MONITOR_NAME_FIELD = "monitor_name" - const val INDEX_FIELD = "index" - const val QUERIES_FIELD = "queries" - const val TIMESTAMP_FIELD = "timestamp" - const val NO_ID = "" - - @JvmStatic @JvmOverloads - @Throws(IOException::class) - fun parse(xcp: XContentParser): Finding { - var id: String = NO_ID - val relatedDocIds: MutableList = mutableListOf() - lateinit var monitorId: String - lateinit var monitorName: String - lateinit var index: String - val queries: MutableList = mutableListOf() - lateinit var timestamp: Instant - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - FINDING_ID_FIELD -> id = xcp.text() - RELATED_DOC_IDS_FIELD -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - relatedDocIds.add(xcp.text()) - } - } - MONITOR_ID_FIELD -> monitorId = xcp.text() - MONITOR_NAME_FIELD -> monitorName = xcp.text() - INDEX_FIELD -> index = xcp.text() - QUERIES_FIELD -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - queries.add(DocLevelQuery.parse(xcp)) - } - } - TIMESTAMP_FIELD -> { - timestamp = requireNotNull(xcp.instant()) - } - } - } - - return Finding( - id = id, - relatedDocIds = relatedDocIds, - monitorId = monitorId, - monitorName = monitorName, - index = index, - docLevelQueries = queries, - timestamp = timestamp - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Finding { - return Finding(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingDocument.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingDocument.kt deleted file mode 100644 index bb6728b35..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingDocument.kt +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.apache.logging.log4j.LogManager -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils -import java.io.IOException - -private val log = LogManager.getLogger(FindingDocument::class.java) - -class FindingDocument( - val index: String, - val id: String, - val found: Boolean, - val document: String -) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - index = sin.readString(), - id = sin.readString(), - found = sin.readBoolean(), - document = sin.readString() - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field(INDEX_FIELD, index) - .field(FINDING_DOCUMENT_ID_FIELD, id) - .field(FOUND_FIELD, found) - .field(DOCUMENT_FIELD, document) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(index) - out.writeString(id) - out.writeBoolean(found) - out.writeString(document) - } - - companion object { - const val INDEX_FIELD = "index" - const val FINDING_DOCUMENT_ID_FIELD = "id" - const val FOUND_FIELD = "found" - const val DOCUMENT_FIELD = "document" - const val NO_ID = "" - const val NO_INDEX = "" - - @JvmStatic @JvmOverloads - @Throws(IOException::class) - fun parse(xcp: XContentParser, id: String = NO_ID, index: String = NO_INDEX): FindingDocument { - var found = false - var document: String = "" - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - FOUND_FIELD -> found = xcp.booleanValue() - DOCUMENT_FIELD -> document = xcp.text() - } - } - - return FindingDocument( - index = index, - id = id, - found = found, - document = document - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): FindingDocument { - return FindingDocument(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingWithDocs.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingWithDocs.kt deleted file mode 100644 index 5fbcb98ff..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/FindingWithDocs.kt +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.apache.logging.log4j.LogManager -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils -import java.io.IOException - -private val log = LogManager.getLogger(Finding::class.java) - -class FindingWithDocs( - val finding: Finding, - val documents: List -) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - finding = Finding.readFrom(sin), - documents = sin.readList((FindingDocument)::readFrom) - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - finding.writeTo(out) - documents.forEach { - it.writeTo(out) - } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(FINDING_FIELD, finding) - .field(DOCUMENTS_FIELD, documents) - builder.endObject() - return builder - } - - companion object { - const val FINDING_FIELD = "finding" - const val DOCUMENTS_FIELD = "document_list" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): FindingWithDocs { - lateinit var finding: Finding - val documents: MutableList = mutableListOf() - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - FINDING_FIELD -> finding = Finding.parse(xcp) - DOCUMENTS_FIELD -> { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - documents.add(FindingDocument.parse(xcp)) - } - } - } - } - - return FindingWithDocs( - finding = finding, - documents = documents - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): FindingWithDocs { - return FindingWithDocs(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Monitor.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Monitor.kt deleted file mode 100644 index 295174230..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Monitor.kt +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.core.model.ClusterMetricsInput -import org.opensearch.alerting.core.model.CronSchedule -import org.opensearch.alerting.core.model.Input -import org.opensearch.alerting.core.model.Schedule -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.opensearchapi.instant -import org.opensearch.alerting.opensearchapi.optionalTimeField -import org.opensearch.alerting.opensearchapi.optionalUserField -import org.opensearch.alerting.settings.AlertingSettings.Companion.MONITOR_MAX_INPUTS -import org.opensearch.alerting.settings.AlertingSettings.Companion.MONITOR_MAX_TRIGGERS -import org.opensearch.alerting.settings.SupportedClusterMetricsSettings -import org.opensearch.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION -import org.opensearch.alerting.util._ID -import org.opensearch.alerting.util._VERSION -import org.opensearch.alerting.util.isBucketLevelMonitor -import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.commons.authuser.User -import java.io.IOException -import java.time.Instant -import java.util.Locale - -/** - * A value object that represents a Monitor. Monitors are used to periodically execute a source query and check the - * results. - */ -data class Monitor( - override val id: String = NO_ID, - override val version: Long = NO_VERSION, - override val name: String, - override val enabled: Boolean, - override val schedule: Schedule, - override val lastUpdateTime: Instant, - override val enabledTime: Instant?, - // TODO: Check how this behaves during rolling upgrade/multi-version cluster - // Can read/write and parsing break if it's done from an old -> new version of the plugin? - val monitorType: MonitorType, - val user: User?, - val schemaVersion: Int = NO_SCHEMA_VERSION, - val inputs: List, - val triggers: List, - val uiMetadata: Map -) : ScheduledJob { - - override val type = MONITOR_TYPE - - init { - // Ensure that trigger ids are unique within a monitor - val triggerIds = mutableSetOf() - triggers.forEach { trigger -> - require(triggerIds.add(trigger.id)) { "Duplicate trigger id: ${trigger.id}. Trigger ids must be unique." } - // Verify Trigger type based on Monitor type - when (monitorType) { - MonitorType.QUERY_LEVEL_MONITOR -> - require(trigger is QueryLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } - MonitorType.BUCKET_LEVEL_MONITOR -> - require(trigger is BucketLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } - MonitorType.CLUSTER_METRICS_MONITOR -> - require(trigger is QueryLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } - MonitorType.DOC_LEVEL_MONITOR -> - require(trigger is DocumentLevelTrigger) { "Incompatible trigger [${trigger.id}] for monitor type [$monitorType]" } - } - } - if (enabled) { - requireNotNull(enabledTime) - } else { - require(enabledTime == null) - } - require(inputs.size <= MONITOR_MAX_INPUTS) { "Monitors can only have $MONITOR_MAX_INPUTS search input." } - require(triggers.size <= MONITOR_MAX_TRIGGERS) { "Monitors can only support up to $MONITOR_MAX_TRIGGERS triggers." } - if (this.isBucketLevelMonitor()) { - inputs.forEach { input -> - require(input is SearchInput) { "Unsupported input [$input] for Monitor" } - // TODO: Keeping query validation simple for now, only term aggregations have full support for the "group by" on the - // initial release. Should either add tests for other aggregation types or add validation to prevent using them. - require(input.query.aggregations() != null && !input.query.aggregations().aggregatorFactories.isEmpty()) { - "At least one aggregation is required for the input [$input]" - } - } - } - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - id = sin.readString(), - version = sin.readLong(), - name = sin.readString(), - enabled = sin.readBoolean(), - schedule = Schedule.readFrom(sin), - lastUpdateTime = sin.readInstant(), - enabledTime = sin.readOptionalInstant(), - monitorType = sin.readEnum(MonitorType::class.java), - user = if (sin.readBoolean()) { - User(sin) - } else null, - schemaVersion = sin.readInt(), - inputs = sin.readList((Input)::readFrom), - triggers = sin.readList((Trigger)::readFrom), - uiMetadata = suppressWarning(sin.readMap()) - ) - - // This enum classifies different Monitors - // This is different from 'type' which denotes the Scheduled Job type - enum class MonitorType(val value: String) { - QUERY_LEVEL_MONITOR("query_level_monitor"), - BUCKET_LEVEL_MONITOR("bucket_level_monitor"), - CLUSTER_METRICS_MONITOR("cluster_metrics_monitor"), - DOC_LEVEL_MONITOR("doc_level_monitor"); - - override fun toString(): String { - return value - } - } - - /** Returns a representation of the monitor suitable for passing into painless and mustache scripts. */ - fun asTemplateArg(): Map { - return mapOf(_ID to id, _VERSION to version, NAME_FIELD to name, ENABLED_FIELD to enabled) - } - - fun toXContentWithUser(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return createXContentBuilder(builder, params, false) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return createXContentBuilder(builder, params, true) - } - - private fun createXContentBuilder(builder: XContentBuilder, params: ToXContent.Params, secure: Boolean): XContentBuilder { - builder.startObject() - if (params.paramAsBoolean("with_type", false)) builder.startObject(type) - builder.field(TYPE_FIELD, type) - .field(SCHEMA_VERSION_FIELD, schemaVersion) - .field(NAME_FIELD, name) - .field(MONITOR_TYPE_FIELD, monitorType) - - if (!secure) { - builder.optionalUserField(USER_FIELD, user) - } - - builder.field(ENABLED_FIELD, enabled) - .optionalTimeField(ENABLED_TIME_FIELD, enabledTime) - .field(SCHEDULE_FIELD, schedule) - .field(INPUTS_FIELD, inputs.toTypedArray()) - .field(TRIGGERS_FIELD, triggers.toTypedArray()) - .optionalTimeField(LAST_UPDATE_TIME_FIELD, lastUpdateTime) - if (uiMetadata.isNotEmpty()) builder.field(UI_METADATA_FIELD, uiMetadata) - if (params.paramAsBoolean("with_type", false)) builder.endObject() - return builder.endObject() - } - - override fun fromDocument(id: String, version: Long): Monitor = copy(id = id, version = version) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeString(name) - out.writeBoolean(enabled) - if (schedule is CronSchedule) { - out.writeEnum(Schedule.TYPE.CRON) - } else { - out.writeEnum(Schedule.TYPE.INTERVAL) - } - schedule.writeTo(out) - out.writeInstant(lastUpdateTime) - out.writeOptionalInstant(enabledTime) - out.writeEnum(monitorType) - out.writeBoolean(user != null) - user?.writeTo(out) - out.writeInt(schemaVersion) - // Outputting type with each Input so that the generic Input.readFrom() can read it - out.writeVInt(inputs.size) - inputs.forEach { - if (it is SearchInput) out.writeEnum(Input.Type.SEARCH_INPUT) - else out.writeEnum(Input.Type.DOCUMENT_LEVEL_INPUT) - it.writeTo(out) - } - // Outputting type with each Trigger so that the generic Trigger.readFrom() can read it - out.writeVInt(triggers.size) - triggers.forEach { - when (it) { - is BucketLevelTrigger -> out.writeEnum(Trigger.Type.BUCKET_LEVEL_TRIGGER) - is DocumentLevelTrigger -> out.writeEnum(Trigger.Type.DOCUMENT_LEVEL_TRIGGER) - else -> out.writeEnum(Trigger.Type.QUERY_LEVEL_TRIGGER) - } - it.writeTo(out) - } - out.writeMap(uiMetadata) - } - - companion object { - const val MONITOR_TYPE = "monitor" - const val TYPE_FIELD = "type" - const val MONITOR_TYPE_FIELD = "monitor_type" - const val SCHEMA_VERSION_FIELD = "schema_version" - const val NAME_FIELD = "name" - const val USER_FIELD = "user" - const val ENABLED_FIELD = "enabled" - const val SCHEDULE_FIELD = "schedule" - const val TRIGGERS_FIELD = "triggers" - const val NO_ID = "" - const val NO_VERSION = 1L - const val INPUTS_FIELD = "inputs" - const val LAST_UPDATE_TIME_FIELD = "last_update_time" - const val UI_METADATA_FIELD = "ui_metadata" - const val ENABLED_TIME_FIELD = "enabled_time" - - // This is defined here instead of in ScheduledJob to avoid having the ScheduledJob class know about all - // the different subclasses and creating circular dependencies - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry( - ScheduledJob::class.java, - ParseField(MONITOR_TYPE), - CheckedFunction { parse(it) } - ) - - @JvmStatic - @JvmOverloads - @Throws(IOException::class) - fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): Monitor { - var name: String? = null - // Default to QUERY_LEVEL_MONITOR to cover Monitors that existed before the addition of MonitorType - var monitorType: String = MonitorType.QUERY_LEVEL_MONITOR.toString() - var user: User? = null - var schedule: Schedule? = null - var lastUpdateTime: Instant? = null - var enabledTime: Instant? = null - var uiMetadata: Map = mapOf() - var enabled = true - var schemaVersion = NO_SCHEMA_VERSION - val triggers: MutableList = mutableListOf() - val inputs: MutableList = mutableListOf() - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - SCHEMA_VERSION_FIELD -> schemaVersion = xcp.intValue() - NAME_FIELD -> name = xcp.text() - MONITOR_TYPE_FIELD -> { - monitorType = xcp.text() - val allowedTypes = MonitorType.values().map { it.value } - if (!allowedTypes.contains(monitorType)) { - throw IllegalStateException("Monitor type should be one of $allowedTypes") - } - } - USER_FIELD -> user = if (xcp.currentToken() == Token.VALUE_NULL) null else User.parse(xcp) - ENABLED_FIELD -> enabled = xcp.booleanValue() - SCHEDULE_FIELD -> schedule = Schedule.parse(xcp) - INPUTS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - val input = Input.parse(xcp) - if (input is ClusterMetricsInput) - SupportedClusterMetricsSettings.validateApiType(input) - inputs.add(input) - } - } - TRIGGERS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - triggers.add(Trigger.parse(xcp)) - } - } - ENABLED_TIME_FIELD -> enabledTime = xcp.instant() - LAST_UPDATE_TIME_FIELD -> lastUpdateTime = xcp.instant() - UI_METADATA_FIELD -> uiMetadata = xcp.map() - else -> { - xcp.skipChildren() - } - } - } - - if (enabled && enabledTime == null) { - enabledTime = Instant.now() - } else if (!enabled) { - enabledTime = null - } - return Monitor( - id, - version, - requireNotNull(name) { "Monitor name is null" }, - enabled, - requireNotNull(schedule) { "Monitor schedule is null" }, - lastUpdateTime ?: Instant.now(), - enabledTime, - MonitorType.valueOf(monitorType.uppercase(Locale.ROOT)), - user, - schemaVersion, - inputs.toList(), - triggers.toList(), - uiMetadata - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Monitor? { - return Monitor(sin) - } - - @Suppress("UNCHECKED_CAST") - fun suppressWarning(map: MutableMap?): MutableMap { - return map as MutableMap - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorMetadata.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorMetadata.kt index a78972a33..17534a30e 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorMetadata.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorMetadata.kt @@ -5,37 +5,50 @@ package org.opensearch.alerting.model -import org.opensearch.alerting.opensearchapi.instant -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils +import org.opensearch.alerting.model.destination.Destination.Companion.NO_ID +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.util.instant +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.seqno.SequenceNumbers import java.io.IOException import java.time.Instant data class MonitorMetadata( val id: String, + val seqNo: Long = SequenceNumbers.UNASSIGNED_SEQ_NO, + val primaryTerm: Long = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, val monitorId: String, val lastActionExecutionTimes: List, - val lastRunContext: Map + val lastRunContext: Map, + // Maps (sourceIndex + monitorId) --> concreteQueryIndex + val sourceToQueryIndexMapping: MutableMap = mutableMapOf() ) : Writeable, ToXContent { @Throws(IOException::class) constructor(sin: StreamInput) : this( id = sin.readString(), + seqNo = sin.readLong(), + primaryTerm = sin.readLong(), monitorId = sin.readString(), lastActionExecutionTimes = sin.readList(ActionExecutionTime::readFrom), - lastRunContext = Monitor.suppressWarning(sin.readMap()) + lastRunContext = Monitor.suppressWarning(sin.readMap()), + sourceToQueryIndexMapping = sin.readMap() as MutableMap ) override fun writeTo(out: StreamOutput) { out.writeString(id) + out.writeLong(seqNo) + out.writeLong(primaryTerm) out.writeString(monitorId) out.writeCollection(lastActionExecutionTimes) out.writeMap(lastRunContext) + out.writeMap(sourceToQueryIndexMapping as MutableMap) } override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { @@ -44,6 +57,9 @@ data class MonitorMetadata( builder.field(MONITOR_ID_FIELD, monitorId) .field(LAST_ACTION_EXECUTION_FIELD, lastActionExecutionTimes.toTypedArray()) if (lastRunContext.isNotEmpty()) builder.field(LAST_RUN_CONTEXT_FIELD, lastRunContext) + if (sourceToQueryIndexMapping.isNotEmpty()) { + builder.field(SOURCE_TO_QUERY_INDEX_MAP_FIELD, sourceToQueryIndexMapping as MutableMap) + } if (params.paramAsBoolean("with_type", false)) builder.endObject() return builder.endObject() } @@ -53,13 +69,20 @@ data class MonitorMetadata( const val MONITOR_ID_FIELD = "monitor_id" const val LAST_ACTION_EXECUTION_FIELD = "last_action_execution_times" const val LAST_RUN_CONTEXT_FIELD = "last_run_context" + const val SOURCE_TO_QUERY_INDEX_MAP_FIELD = "source_to_query_index_mapping" @JvmStatic @JvmOverloads @Throws(IOException::class) - fun parse(xcp: XContentParser): MonitorMetadata { + fun parse( + xcp: XContentParser, + id: String = NO_ID, + seqNo: Long = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm: Long = SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ): MonitorMetadata { lateinit var monitorId: String val lastActionExecutionTimes = mutableListOf() var lastRunContext: Map = mapOf() + var sourceToQueryIndexMapping: MutableMap = mutableMapOf() XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { @@ -75,14 +98,18 @@ data class MonitorMetadata( } } LAST_RUN_CONTEXT_FIELD -> lastRunContext = xcp.map() + SOURCE_TO_QUERY_INDEX_MAP_FIELD -> sourceToQueryIndexMapping = xcp.map() as MutableMap } } return MonitorMetadata( - "$monitorId-metadata", + if (id != NO_ID) id else "$monitorId-metadata", + seqNo = seqNo, + primaryTerm = primaryTerm, monitorId = monitorId, lastActionExecutionTimes = lastActionExecutionTimes, - lastRunContext = lastRunContext + lastRunContext = lastRunContext, + sourceToQueryIndexMapping = sourceToQueryIndexMapping ) } @@ -91,6 +118,19 @@ data class MonitorMetadata( fun readFrom(sin: StreamInput): MonitorMetadata { return MonitorMetadata(sin) } + + /** workflowMetadataId is used as key for monitor metadata in the case when the workflow execution happens + so the monitor lastRunContext (in the case of doc level monitor) is not interfering with the monitor execution + WorkflowMetadataId will be either workflowId-metadata (when executing the workflow as it is scheduled) + or timestampWithUUID-metadata (when a workflow is executed in a dry-run mode) + In the case of temp workflow, doc level monitors must have lastRunContext created from scratch + That's why we are using workflowMetadataId - in order to ensure that the doc level monitor metadata is created from scratch + **/ + fun getId(monitor: Monitor, workflowMetadataId: String? = null): String { + return if (workflowMetadataId.isNullOrEmpty()) "${monitor.id}-metadata" + // WorkflowMetadataId already contains -metadata suffix + else "$workflowMetadataId-${monitor.id}-metadata" + } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorRunResult.kt index 7e13f9281..18a433848 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorRunResult.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/MonitorRunResult.kt @@ -7,13 +7,14 @@ package org.opensearch.alerting.model import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchException -import org.opensearch.alerting.alerts.AlertError -import org.opensearch.alerting.opensearchapi.optionalTimeField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.script.ScriptException import java.io.IOException import java.time.Instant diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/QueryLevelTrigger.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/QueryLevelTrigger.kt deleted file mode 100644 index d8b00442d..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/QueryLevelTrigger.kt +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.model.Trigger.Companion.ACTIONS_FIELD -import org.opensearch.alerting.model.Trigger.Companion.ID_FIELD -import org.opensearch.alerting.model.Trigger.Companion.NAME_FIELD -import org.opensearch.alerting.model.Trigger.Companion.SEVERITY_FIELD -import org.opensearch.alerting.model.action.Action -import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField -import org.opensearch.common.UUIDs -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.script.Script -import java.io.IOException - -/** - * A single-alert Trigger that uses Painless scripts which execute on the response of the Monitor input query to define - * alerting conditions. - */ -data class QueryLevelTrigger( - override val id: String = UUIDs.base64UUID(), - override val name: String, - override val severity: String, - override val actions: List, - val condition: Script -) : Trigger { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readString(), // name - sin.readString(), // severity - sin.readList(::Action), // actions - Script(sin) // condition - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(QUERY_LEVEL_TRIGGER_FIELD) - .field(ID_FIELD, id) - .field(NAME_FIELD, name) - .field(SEVERITY_FIELD, severity) - .startObject(CONDITION_FIELD) - .field(SCRIPT_FIELD, condition) - .endObject() - .field(ACTIONS_FIELD, actions.toTypedArray()) - .endObject() - .endObject() - return builder - } - - override fun name(): String { - return QUERY_LEVEL_TRIGGER_FIELD - } - - /** Returns a representation of the trigger suitable for passing into painless and mustache scripts. */ - fun asTemplateArg(): Map { - return mapOf( - ID_FIELD to id, NAME_FIELD to name, SEVERITY_FIELD to severity, - ACTIONS_FIELD to actions.map { it.asTemplateArg() } - ) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeString(name) - out.writeString(severity) - out.writeCollection(actions) - condition.writeTo(out) - } - - companion object { - const val QUERY_LEVEL_TRIGGER_FIELD = "query_level_trigger" - const val CONDITION_FIELD = "condition" - const val SCRIPT_FIELD = "script" - - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry( - Trigger::class.java, ParseField(QUERY_LEVEL_TRIGGER_FIELD), - CheckedFunction { parseInner(it) } - ) - - /** - * This parse method needs to account for both the old and new Trigger format. - * In the old format, only one Trigger existed (which is now QueryLevelTrigger) and it was - * not a named object. - * - * The parse() method in the Trigger interface needs to consume the outer START_OBJECT to be able - * to infer whether it is dealing with the old or new Trigger format. This means that the currentToken at - * the time this parseInner method is called could differ based on which format is being dealt with. - * - * Old Format - * ---------- - * { - * "id": ..., - * ^ - * Current token starts here - * "name" ..., - * ... - * } - * - * New Format - * ---------- - * { - * "query_level_trigger": { - * "id": ..., ^ Current token starts here - * "name": ..., - * ... - * } - * } - * - * It isn't typically conventional but this parse method will account for both START_OBJECT - * and FIELD_NAME as the starting token to cover both cases. - */ - @JvmStatic @Throws(IOException::class) - fun parseInner(xcp: XContentParser): QueryLevelTrigger { - var id = UUIDs.base64UUID() // assign a default triggerId if one is not specified - lateinit var name: String - lateinit var severity: String - lateinit var condition: Script - val actions: MutableList = mutableListOf() - - if (xcp.currentToken() != Token.START_OBJECT && xcp.currentToken() != Token.FIELD_NAME) { - XContentParserUtils.throwUnknownToken(xcp.currentToken(), xcp.tokenLocation) - } - - // If the parser began on START_OBJECT, move to the next token so that the while loop enters on - // the fieldName (or END_OBJECT if it's empty). - if (xcp.currentToken() == Token.START_OBJECT) xcp.nextToken() - - while (xcp.currentToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - - xcp.nextToken() - when (fieldName) { - ID_FIELD -> id = xcp.text() - NAME_FIELD -> name = xcp.text() - SEVERITY_FIELD -> severity = xcp.text() - CONDITION_FIELD -> { - xcp.nextToken() - condition = Script.parse(xcp) - require(condition.lang == Script.DEFAULT_SCRIPT_LANG) { - "Invalid script language. Allowed languages are [${Script.DEFAULT_SCRIPT_LANG}]" - } - xcp.nextToken() - } - ACTIONS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - actions.add(Action.parse(xcp)) - } - } - } - xcp.nextToken() - } - - return QueryLevelTrigger( - name = requireNotNull(name) { "Trigger name is null" }, - severity = requireNotNull(severity) { "Trigger severity is null" }, - condition = requireNotNull(condition) { "Trigger condition is null" }, - actions = requireNotNull(actions) { "Trigger actions are null" }, - id = requireNotNull(id) { "Trigger id is null." } - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): QueryLevelTrigger { - return QueryLevelTrigger(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt index 190df4f3b..5917c1ecf 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt @@ -5,20 +5,20 @@ package org.opensearch.alerting.model -import org.opensearch.alerting.alerts.AlertError -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.script.ScriptException import java.io.IOException import java.time.Instant -data class QueryLevelTriggerRunResult( +open class QueryLevelTriggerRunResult( override var triggerName: String, - var triggered: Boolean, + open var triggered: Boolean, override var error: Exception?, - var actionResults: MutableMap = mutableMapOf() + open var actionResults: MutableMap = mutableMapOf() ) : TriggerRunResult(triggerName, error) { @Throws(IOException::class) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Table.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Table.kt deleted file mode 100644 index 0a9ff0e4f..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Table.kt +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import java.io.IOException - -data class Table( - val sortOrder: String, - val sortString: String, - val missing: String?, - val size: Int, - val startIndex: Int, - val searchString: String? -) : Writeable { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sortOrder = sin.readString(), - sortString = sin.readString(), - missing = sin.readOptionalString(), - size = sin.readInt(), - startIndex = sin.readInt(), - searchString = sin.readOptionalString() - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(sortOrder) - out.writeString(sortString) - out.writeOptionalString(missing) - out.writeInt(size) - out.writeInt(startIndex) - out.writeOptionalString(searchString) - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Table { - return Table(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/Trigger.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/Trigger.kt deleted file mode 100644 index e3a9b12ab..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/Trigger.kt +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.alerting.model.action.Action -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -interface Trigger : Writeable, ToXContentObject { - - enum class Type(val value: String) { - DOCUMENT_LEVEL_TRIGGER(DocumentLevelTrigger.DOCUMENT_LEVEL_TRIGGER_FIELD), - QUERY_LEVEL_TRIGGER(QueryLevelTrigger.QUERY_LEVEL_TRIGGER_FIELD), - BUCKET_LEVEL_TRIGGER(BucketLevelTrigger.BUCKET_LEVEL_TRIGGER_FIELD); - - override fun toString(): String { - return value - } - } - - companion object { - const val ID_FIELD = "id" - const val NAME_FIELD = "name" - const val SEVERITY_FIELD = "severity" - const val ACTIONS_FIELD = "actions" - - @Throws(IOException::class) - fun parse(xcp: XContentParser): Trigger { - val trigger: Trigger - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) - val triggerTypeNames = Type.values().map { it.toString() } - if (triggerTypeNames.contains(xcp.currentName())) { - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - trigger = xcp.namedObject(Trigger::class.java, xcp.currentName(), null) - ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) - } else { - // Infer the old Trigger (now called QueryLevelTrigger) when it is not defined as a named - // object to remain backwards compatible when parsing the old format - trigger = QueryLevelTrigger.parseInner(xcp) - ensureExpectedToken(Token.END_OBJECT, xcp.currentToken(), xcp) - } - return trigger - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Trigger { - return when (val type = sin.readEnum(Trigger.Type::class.java)) { - Type.QUERY_LEVEL_TRIGGER -> QueryLevelTrigger(sin) - Type.BUCKET_LEVEL_TRIGGER -> BucketLevelTrigger(sin) - Type.DOCUMENT_LEVEL_TRIGGER -> DocumentLevelTrigger(sin) - // This shouldn't be reachable but ensuring exhaustiveness as Kotlin warns - // enum can be null in Java - else -> throw IllegalStateException("Unexpected input [$type] when reading Trigger") - } - } - } - - /** The id of the Trigger in the [SCHEDULED_JOBS_INDEX] */ - val id: String - - /** The name of the Trigger */ - val name: String - - /** The severity of the Trigger, used to classify the subsequent Alert */ - val severity: String - - /** The actions executed if the Trigger condition evaluates to true */ - val actions: List - - fun name(): String -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/TriggerRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/TriggerRunResult.kt index 0522076f9..c3aec89f2 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/TriggerRunResult.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/TriggerRunResult.kt @@ -5,11 +5,11 @@ package org.opensearch.alerting.model -import org.opensearch.alerting.alerts.AlertError -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException import java.time.Instant diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/WorkflowMetadata.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/WorkflowMetadata.kt new file mode 100644 index 000000000..9ab7b43f8 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/WorkflowMetadata.kt @@ -0,0 +1,105 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.util.instant +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import java.io.IOException +import java.time.Instant + +data class WorkflowMetadata( + val id: String, + val workflowId: String, + val monitorIds: List, + val latestRunTime: Instant, + val latestExecutionId: String +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + id = sin.readString(), + workflowId = sin.readString(), + monitorIds = sin.readStringList(), + latestRunTime = sin.readInstant(), + latestExecutionId = sin.readString() + ) + + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeString(workflowId) + out.writeStringCollection(monitorIds) + out.writeInstant(latestRunTime) + out.writeString(latestExecutionId) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(METADATA) + builder.field(WORKFLOW_ID_FIELD, workflowId) + .field(MONITOR_IDS_FIELD, monitorIds) + .optionalTimeField(LATEST_RUN_TIME, latestRunTime) + .field(LATEST_EXECUTION_ID, latestExecutionId) + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + + companion object { + const val METADATA = "workflow_metadata" + const val WORKFLOW_ID_FIELD = "workflow_id" + const val MONITOR_IDS_FIELD = "monitor_ids" + const val LATEST_RUN_TIME = "latest_run_time" + const val LATEST_EXECUTION_ID = "latest_execution_id" + + @JvmStatic @JvmOverloads + @Throws(IOException::class) + fun parse(xcp: XContentParser): WorkflowMetadata { + lateinit var workflowId: String + var monitorIds = mutableListOf() + lateinit var latestRunTime: Instant + lateinit var latestExecutionId: String + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + WORKFLOW_ID_FIELD -> workflowId = xcp.text() + MONITOR_IDS_FIELD -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + monitorIds.add(xcp.text()) + } + } + LATEST_RUN_TIME -> latestRunTime = xcp.instant()!! + LATEST_EXECUTION_ID -> latestExecutionId = xcp.text() + } + } + return WorkflowMetadata( + id = "$workflowId-metadata", + workflowId = workflowId, + monitorIds = monitorIds, + latestRunTime = latestRunTime, + latestExecutionId = latestExecutionId + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): WorkflowMetadata { + return WorkflowMetadata(sin) + } + + fun getId(workflowId: String? = null) = "$workflowId-metadata" + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/WorkflowRunResult.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/WorkflowRunResult.kt new file mode 100644 index 000000000..cabdc6330 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/WorkflowRunResult.kt @@ -0,0 +1,82 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException +import java.lang.Exception +import java.time.Instant + +data class WorkflowRunResult( + val workflowId: String, + val workflowName: String, + val monitorRunResults: List> = mutableListOf(), + val executionStartTime: Instant, + var executionEndTime: Instant? = null, + val executionId: String, + val error: Exception? = null, + val triggerResults: Map = mapOf(), +) : Writeable, ToXContent { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + workflowId = sin.readString(), + workflowName = sin.readString(), + monitorRunResults = sin.readList> { s: StreamInput -> MonitorRunResult.readFrom(s) }, + executionStartTime = sin.readInstant(), + executionEndTime = sin.readOptionalInstant(), + executionId = sin.readString(), + error = sin.readException(), + triggerResults = suppressWarning(sin.readMap()) as Map + ) + + override fun writeTo(out: StreamOutput) { + out.writeString(workflowId) + out.writeString(workflowName) + out.writeList(monitorRunResults) + out.writeInstant(executionStartTime) + out.writeOptionalInstant(executionEndTime) + out.writeString(executionId) + out.writeException(error) + out.writeMap(triggerResults) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + builder.field("execution_id", executionId) + builder.field("workflow_name", workflowName) + builder.field("workflow_id", workflowId) + builder.field("trigger_results", triggerResults) + builder.startArray("monitor_run_results") + for (monitorResult in monitorRunResults) { + monitorResult.toXContent(builder, ToXContent.EMPTY_PARAMS) + } + builder.endArray() + .field("execution_start_time", executionStartTime) + .field("execution_end_time", executionEndTime) + .field("error", error?.message) + .endObject() + return builder + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): WorkflowRunResult { + return WorkflowRunResult(sin) + } + + @Suppress("UNCHECKED_CAST") + fun suppressWarning(map: MutableMap?): Map { + return map as Map + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/Action.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/action/Action.kt deleted file mode 100644 index 6e38a2b31..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/Action.kt +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.action - -import org.opensearch.common.UUIDs -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils -import org.opensearch.script.Script -import java.io.IOException - -/** - * This class holds the data and parser logic for Action which is part of a trigger - */ -data class Action( - val name: String, - val destinationId: String, - val subjectTemplate: Script?, - val messageTemplate: Script, - val throttleEnabled: Boolean, - val throttle: Throttle?, - val id: String = UUIDs.base64UUID(), - val actionExecutionPolicy: ActionExecutionPolicy? = null -) : Writeable, ToXContentObject { - - init { - if (subjectTemplate != null) { - require(subjectTemplate.lang == MUSTACHE) { "subject_template must be a mustache script" } - } - require(messageTemplate.lang == MUSTACHE) { "message_template must be a mustache script" } - - if (actionExecutionPolicy?.actionExecutionScope is PerExecutionActionScope) { - require(throttle == null) { "Throttle is currently not supported for per execution action scope" } - } - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // name - sin.readString(), // destinationId - sin.readOptionalWriteable(::Script), // subjectTemplate - Script(sin), // messageTemplate - sin.readBoolean(), // throttleEnabled - sin.readOptionalWriteable(::Throttle), // throttle - sin.readString(), // id - sin.readOptionalWriteable(::ActionExecutionPolicy) // actionExecutionPolicy - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - val xContentBuilder = builder.startObject() - .field(ID_FIELD, id) - .field(NAME_FIELD, name) - .field(DESTINATION_ID_FIELD, destinationId) - .field(MESSAGE_TEMPLATE_FIELD, messageTemplate) - .field(THROTTLE_ENABLED_FIELD, throttleEnabled) - if (subjectTemplate != null) { - xContentBuilder.field(SUBJECT_TEMPLATE_FIELD, subjectTemplate) - } - if (throttle != null) { - xContentBuilder.field(THROTTLE_FIELD, throttle) - } - if (actionExecutionPolicy != null) { - xContentBuilder.field(ACTION_EXECUTION_POLICY_FIELD, actionExecutionPolicy) - } - return xContentBuilder.endObject() - } - - fun asTemplateArg(): Map { - return mapOf(NAME_FIELD to name) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(name) - out.writeString(destinationId) - if (subjectTemplate != null) { - out.writeBoolean(true) - subjectTemplate.writeTo(out) - } else { - out.writeBoolean(false) - } - messageTemplate.writeTo(out) - out.writeBoolean(throttleEnabled) - if (throttle != null) { - out.writeBoolean(true) - throttle.writeTo(out) - } else { - out.writeBoolean(false) - } - out.writeString(id) - if (actionExecutionPolicy != null) { - out.writeBoolean(true) - actionExecutionPolicy.writeTo(out) - } else { - out.writeBoolean(false) - } - } - - companion object { - const val ID_FIELD = "id" - const val NAME_FIELD = "name" - const val DESTINATION_ID_FIELD = "destination_id" - const val SUBJECT_TEMPLATE_FIELD = "subject_template" - const val MESSAGE_TEMPLATE_FIELD = "message_template" - const val THROTTLE_ENABLED_FIELD = "throttle_enabled" - const val THROTTLE_FIELD = "throttle" - const val ACTION_EXECUTION_POLICY_FIELD = "action_execution_policy" - const val MUSTACHE = "mustache" - const val SUBJECT = "subject" - const val MESSAGE = "message" - const val MESSAGE_ID = "messageId" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): Action { - var id = UUIDs.base64UUID() // assign a default action id if one is not specified - lateinit var name: String - lateinit var destinationId: String - var subjectTemplate: Script? = null // subject template could be null for some destinations - lateinit var messageTemplate: Script - var throttleEnabled = false - var throttle: Throttle? = null - var actionExecutionPolicy: ActionExecutionPolicy? = null - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - ID_FIELD -> id = xcp.text() - NAME_FIELD -> name = xcp.textOrNull() - DESTINATION_ID_FIELD -> destinationId = xcp.textOrNull() - SUBJECT_TEMPLATE_FIELD -> { - subjectTemplate = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) null else - Script.parse(xcp, Script.DEFAULT_TEMPLATE_LANG) - } - MESSAGE_TEMPLATE_FIELD -> messageTemplate = Script.parse(xcp, Script.DEFAULT_TEMPLATE_LANG) - THROTTLE_FIELD -> { - throttle = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) null else Throttle.parse(xcp) - } - THROTTLE_ENABLED_FIELD -> { - throttleEnabled = xcp.booleanValue() - } - ACTION_EXECUTION_POLICY_FIELD -> { - actionExecutionPolicy = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) { - null - } else { - ActionExecutionPolicy.parse(xcp) - } - } - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing action") - } - } - } - - if (throttleEnabled) { - requireNotNull(throttle, { "Action throttle enabled but not set throttle value" }) - } - - return Action( - requireNotNull(name) { "Action name is null" }, - requireNotNull(destinationId) { "Destination id is null" }, - subjectTemplate, - requireNotNull(messageTemplate) { "Action message template is null" }, - throttleEnabled, - throttle, - id = requireNotNull(id), - actionExecutionPolicy = actionExecutionPolicy - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Action { - return Action(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/ActionExecutionPolicy.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/action/ActionExecutionPolicy.kt deleted file mode 100644 index 809f06624..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/ActionExecutionPolicy.kt +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.action - -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -/** - * This class represents the container for various configurations which control Action behavior. - */ -data class ActionExecutionPolicy( - val actionExecutionScope: ActionExecutionScope -) : Writeable, ToXContentObject { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this ( - ActionExecutionScope.readFrom(sin) // actionExecutionScope - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(ACTION_EXECUTION_SCOPE, actionExecutionScope) - return builder.endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - if (actionExecutionScope is PerAlertActionScope) { - out.writeEnum(ActionExecutionScope.Type.PER_ALERT) - } else { - out.writeEnum(ActionExecutionScope.Type.PER_EXECUTION) - } - actionExecutionScope.writeTo(out) - } - - companion object { - const val ACTION_EXECUTION_SCOPE = "action_execution_scope" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): ActionExecutionPolicy { - lateinit var actionExecutionScope: ActionExecutionScope - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - ACTION_EXECUTION_SCOPE -> actionExecutionScope = ActionExecutionScope.parse(xcp) - } - } - - return ActionExecutionPolicy( - requireNotNull(actionExecutionScope) { "Action execution scope is null" } - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): ActionExecutionPolicy { - return ActionExecutionPolicy(sin) - } - - /** - * The default [ActionExecutionPolicy] configuration for Bucket-Level Monitors. - * - * If Query-Level Monitors integrate the use of [ActionExecutionPolicy] then a separate default configuration - * will need to be made depending on the desired behavior. - */ - fun getDefaultConfigurationForBucketLevelMonitor(): ActionExecutionPolicy { - val defaultActionExecutionScope = PerAlertActionScope( - actionableAlerts = setOf(AlertCategory.DEDUPED, AlertCategory.NEW) - ) - return ActionExecutionPolicy(actionExecutionScope = defaultActionExecutionScope) - } - - /** - * The default [ActionExecutionPolicy] configuration for Document-Level Monitors. - * - * If Query-Level Monitors integrate the use of [ActionExecutionPolicy] then a separate default configuration - * will need to be made depending on the desired behavior. - */ - fun getDefaultConfigurationForDocumentLevelMonitor(): ActionExecutionPolicy { - val defaultActionExecutionScope = PerAlertActionScope( - actionableAlerts = setOf(AlertCategory.DEDUPED, AlertCategory.NEW) - ) - return ActionExecutionPolicy(actionExecutionScope = defaultActionExecutionScope) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/ActionExecutionScope.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/action/ActionExecutionScope.kt deleted file mode 100644 index 2e0256520..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/ActionExecutionScope.kt +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.action - -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.lang.IllegalArgumentException - -/** - * This class represents configurations used to control the scope of Action executions when Alerts are created. - */ -sealed class ActionExecutionScope : Writeable, ToXContentObject { - - enum class Type { PER_ALERT, PER_EXECUTION } - - companion object { - const val PER_ALERT_FIELD = "per_alert" - const val PER_EXECUTION_FIELD = "per_execution" - const val ACTIONABLE_ALERTS_FIELD = "actionable_alerts" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): ActionExecutionScope { - var type: Type? = null - var actionExecutionScope: ActionExecutionScope? = null - val alertFilter = mutableSetOf() - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - // If the type field has already been set, the user has provided more than one type of schedule - if (type != null) { - throw IllegalArgumentException("You can only specify one type of action execution scope.") - } - - when (fieldName) { - PER_ALERT_FIELD -> { - type = Type.PER_ALERT - while (xcp.nextToken() != Token.END_OBJECT) { - val perAlertFieldName = xcp.currentName() - xcp.nextToken() - when (perAlertFieldName) { - ACTIONABLE_ALERTS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - val allowedCategories = AlertCategory.values().map { it.toString() } - while (xcp.nextToken() != Token.END_ARRAY) { - val alertCategory = xcp.text() - if (!allowedCategories.contains(alertCategory)) { - throw IllegalStateException("Actionable alerts should be one of $allowedCategories") - } - alertFilter.add(AlertCategory.valueOf(alertCategory)) - } - } - else -> throw IllegalArgumentException( - "Invalid field [$perAlertFieldName] found in per alert action execution scope." - ) - } - } - } - PER_EXECUTION_FIELD -> { - type = Type.PER_EXECUTION - while (xcp.nextToken() != Token.END_OBJECT) {} - } - else -> throw IllegalArgumentException("Invalid field [$fieldName] found in action execution scope.") - } - } - - if (type == Type.PER_ALERT) { - actionExecutionScope = PerAlertActionScope(alertFilter) - } else if (type == Type.PER_EXECUTION) { - actionExecutionScope = PerExecutionActionScope() - } - - return requireNotNull(actionExecutionScope) { "Action execution scope is null." } - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): ActionExecutionScope { - val type = sin.readEnum(ActionExecutionScope.Type::class.java) - return if (type == Type.PER_ALERT) { - PerAlertActionScope(sin) - } else { - PerExecutionActionScope(sin) - } - } - } - - abstract fun getExecutionScope(): Type -} - -data class PerAlertActionScope( - val actionableAlerts: Set -) : ActionExecutionScope() { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readSet { si -> si.readEnum(AlertCategory::class.java) } // alertFilter - ) - - override fun getExecutionScope(): Type = Type.PER_ALERT - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(PER_ALERT_FIELD) - .field(ACTIONABLE_ALERTS_FIELD, actionableAlerts.toTypedArray()) - .endObject() - return builder.endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeCollection(actionableAlerts) { o, v -> o.writeEnum(v) } - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): PerAlertActionScope { - return PerAlertActionScope(sin) - } - } -} - -class PerExecutionActionScope() : ActionExecutionScope() { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this() - - override fun hashCode(): Int { - return javaClass.hashCode() - } - - // Creating an equals method that just checks class type rather than reference since this is currently stateless. - // Otherwise, it would have been a dataclass which would have handled this. - override fun equals(other: Any?): Boolean { - if (this === other) return true - if (other?.javaClass != javaClass) return false - return true - } - - override fun getExecutionScope(): Type = Type.PER_EXECUTION - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(PER_EXECUTION_FIELD) - .endObject() - return builder.endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) {} - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): PerExecutionActionScope { - return PerExecutionActionScope(sin) - } - } -} - -enum class AlertCategory { DEDUPED, NEW, COMPLETED } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/Throttle.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/action/Throttle.kt deleted file mode 100644 index 177345b44..000000000 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/action/Throttle.kt +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.action - -import org.apache.commons.codec.binary.StringUtils -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils -import java.io.IOException -import java.time.temporal.ChronoUnit -import java.util.Locale - -data class Throttle( - val value: Int, - val unit: ChronoUnit -) : Writeable, ToXContentObject { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this ( - sin.readInt(), // value - sin.readEnum(ChronoUnit::class.java) // unit - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field(VALUE_FIELD, value) - .field(UNIT_FIELD, unit.name) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeInt(value) - out.writeEnum(unit) - } - - companion object { - const val VALUE_FIELD = "value" - const val UNIT_FIELD = "unit" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): Throttle { - var value: Int = 0 - var unit: ChronoUnit = ChronoUnit.MINUTES // only support MINUTES throttle unit currently - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - UNIT_FIELD -> { - val unitString = xcp.text().uppercase(Locale.ROOT) - require(StringUtils.equals(unitString, ChronoUnit.MINUTES.name), { "Only support MINUTES throttle unit currently" }) - unit = ChronoUnit.valueOf(unitString) - } - VALUE_FIELD -> { - val currentToken = xcp.currentToken() - require(currentToken != XContentParser.Token.VALUE_NULL, { "Throttle value can't be null" }) - when { - currentToken.isValue -> { - value = xcp.intValue() - require(value > 0, { "Can only set positive throttle period" }) - } - else -> { - XContentParserUtils.throwUnknownToken(currentToken, xcp.tokenLocation) - } - } - } - - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing action") - } - } - } - return Throttle(value = value, unit = requireNotNull(unit)) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Throttle { - return Throttle(sin) - } - } -} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Chime.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Chime.kt index 33840ce9f..06d066ded 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Chime.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Chime.kt @@ -5,15 +5,14 @@ package org.opensearch.alerting.model.destination -import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException -import java.lang.IllegalStateException /** * A value object that represents a Chime message. Chime message will be diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/CustomWebhook.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/CustomWebhook.kt index 9bc0d8224..5758576d8 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/CustomWebhook.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/CustomWebhook.kt @@ -5,15 +5,14 @@ package org.opensearch.alerting.model.destination -import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException -import java.lang.IllegalStateException /** * A value object that represents a Custom webhook message. Webhook message will be diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Destination.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Destination.kt index ce0987514..a5043b3cf 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Destination.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Destination.kt @@ -8,24 +8,24 @@ package org.opensearch.alerting.model.destination import org.apache.logging.log4j.LogManager import org.opensearch.alerting.model.destination.email.Email import org.opensearch.alerting.opensearchapi.convertToMap -import org.opensearch.alerting.opensearchapi.instant -import org.opensearch.alerting.opensearchapi.optionalTimeField -import org.opensearch.alerting.opensearchapi.optionalUserField import org.opensearch.alerting.util.DestinationType -import org.opensearch.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertAlertingToNotificationMethodType -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION +import org.opensearch.commons.alerting.util.instant +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.commons.alerting.util.optionalUserField import org.opensearch.commons.authuser.User import org.opensearch.commons.destination.message.LegacyBaseMessage import org.opensearch.commons.destination.message.LegacyChimeMessage import org.opensearch.commons.destination.message.LegacyCustomWebhookMessage import org.opensearch.commons.destination.message.LegacyEmailMessage import org.opensearch.commons.destination.message.LegacySlackMessage +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException import java.time.Instant import java.util.Locale @@ -260,7 +260,7 @@ data class Destination( } DestinationType.CUSTOM_WEBHOOK -> { destinationMessage = LegacyCustomWebhookMessage.Builder(name) - .withUrl(getLegacyCustomWebhookMessageURL(customWebhook)) + .withUrl(getLegacyCustomWebhookMessageURL(customWebhook, compiledMessage)) .withHeaderParams(customWebhook?.headerParams) .withMessage(compiledMessage).build() } @@ -296,7 +296,7 @@ data class Destination( return content } - private fun getLegacyCustomWebhookMessageURL(customWebhook: CustomWebhook?): String { + private fun getLegacyCustomWebhookMessageURL(customWebhook: CustomWebhook?, message: String): String { return LegacyCustomWebhookMessage.Builder(name) .withUrl(customWebhook?.url) .withScheme(customWebhook?.scheme) @@ -304,6 +304,7 @@ data class Destination( .withPort(customWebhook?.port) .withPath(customWebhook?.path) .withQueryParams(customWebhook?.queryParams) + .withMessage(message) .build().uri.toString() } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/DestinationContextFactory.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/DestinationContextFactory.kt index fb3a2074b..263962ac7 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/DestinationContextFactory.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/DestinationContextFactory.kt @@ -12,8 +12,8 @@ import org.opensearch.alerting.model.destination.email.Recipient import org.opensearch.alerting.settings.DestinationSettings.Companion.SecureDestinationSettings import org.opensearch.alerting.util.DestinationType import org.opensearch.client.Client -import org.opensearch.common.settings.SecureString -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.core.common.settings.SecureString +import org.opensearch.core.xcontent.NamedXContentRegistry /** * This class is responsible for generating [DestinationContext]. diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/SNS.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/SNS.kt index 04a6adb56..f9c6ec59f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/SNS.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/SNS.kt @@ -5,10 +5,10 @@ package org.opensearch.alerting.model.destination -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import java.io.IOException import java.lang.IllegalStateException import java.util.regex.Pattern diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Slack.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Slack.kt index f27222159..14f623616 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Slack.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/Slack.kt @@ -5,15 +5,14 @@ package org.opensearch.alerting.model.destination -import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException -import java.lang.IllegalStateException /** * A value object that represents a Slack message. Slack message will be diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/Email.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/Email.kt index 418172984..75635ab38 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/Email.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/Email.kt @@ -6,14 +6,14 @@ package org.opensearch.alerting.model.destination.email import org.opensearch.alerting.util.isValidEmail -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException import java.lang.IllegalStateException import java.util.Locale diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailAccount.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailAccount.kt index 47d6509e9..9d0bb7f00 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailAccount.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailAccount.kt @@ -5,17 +5,17 @@ package org.opensearch.alerting.model.destination.email -import org.opensearch.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION import org.opensearch.alerting.util.isValidEmail -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.settings.SecureString -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.common.settings.SecureString +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException /** diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailGroup.kt b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailGroup.kt index f0a547842..a960da5f5 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailGroup.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/model/destination/email/EmailGroup.kt @@ -5,17 +5,17 @@ package org.opensearch.alerting.model.destination.email -import org.opensearch.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION import org.opensearch.alerting.util.isValidEmail -import org.opensearch.common.Strings -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import java.io.IOException /** diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt index 5cdde7424..f953876a4 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt @@ -9,12 +9,12 @@ import org.apache.logging.log4j.LogManager import org.apache.logging.log4j.Logger import org.opensearch.action.support.WriteRequest.RefreshPolicy import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.AcknowledgeAlertAction -import org.opensearch.alerting.action.AcknowledgeAlertRequest import org.opensearch.alerting.util.REFRESH import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler.ReplacedRoute @@ -65,7 +65,7 @@ class RestAcknowledgeAlertAction : BaseRestHandler() { val acknowledgeAlertRequest = AcknowledgeAlertRequest(monitorId, alertIds, refreshPolicy) return RestChannelConsumer { channel -> - client.execute(AcknowledgeAlertAction.INSTANCE, acknowledgeAlertRequest, RestToXContentListener(channel)) + client.execute(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, acknowledgeAlertRequest, RestToXContentListener(channel)) } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt new file mode 100644 index 000000000..968856a48 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt @@ -0,0 +1,82 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.apache.logging.log4j.Logger +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.POST +import org.opensearch.rest.action.RestToXContentListener +import java.io.IOException + +private val log: Logger = LogManager.getLogger(RestAcknowledgeAlertAction::class.java) + +/** + * This class consists of the REST handler to acknowledge chained alerts. + * The user provides the workflowID to which these alerts pertain and in the content of the request provides + * the ids to the chained alerts user would like to acknowledge. + */ +class RestAcknowledgeChainedAlertAction : BaseRestHandler() { + + override fun getName(): String { + return "acknowledge_chained_alert_action" + } + + override fun routes(): List { + // Acknowledge alerts + return mutableListOf( + Route( + POST, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_acknowledge/alerts" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_acknowledge/alerts") + + val workflowId = request.param("workflowID") + require(!workflowId.isNullOrEmpty()) { "Missing workflow id." } + val alertIds = getAlertIds(request.contentParser()) + require(alertIds.isNotEmpty()) { "You must provide at least one alert id." } + + val acknowledgeAlertRequest = AcknowledgeChainedAlertRequest(workflowId, alertIds) + return RestChannelConsumer { channel -> + client.execute(AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, acknowledgeAlertRequest, RestToXContentListener(channel)) + } + } + + /** + * Parse the request content and return a list of the alert ids to acknowledge + */ + private fun getAlertIds(xcp: XContentParser): List { + val ids = mutableListOf() + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + "alerts" -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + ids.add(xcp.text()) + } + } + } + } + return ids + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt index 70c81f121..bf6e63f68 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt @@ -8,11 +8,10 @@ import org.apache.logging.log4j.LogManager import org.apache.logging.log4j.Logger import org.opensearch.action.support.WriteRequest.RefreshPolicy import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.DeleteMonitorAction -import org.opensearch.alerting.action.DeleteMonitorRequest -import org.opensearch.alerting.model.Alert import org.opensearch.alerting.util.REFRESH import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler.ReplacedRoute @@ -60,7 +59,7 @@ class RestDeleteMonitorAction : BaseRestHandler() { val deleteMonitorRequest = DeleteMonitorRequest(monitorId, refreshPolicy) return RestChannelConsumer { channel -> - client.execute(DeleteMonitorAction.INSTANCE, deleteMonitorRequest, RestToXContentListener(channel)) + client.execute(AlertingActions.DELETE_MONITOR_ACTION_TYPE, deleteMonitorRequest, RestToXContentListener(channel)) } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt new file mode 100644 index 000000000..a61a9b51c --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt @@ -0,0 +1,60 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.REFRESH +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteWorkflowRequest +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestToXContentListener +import java.io.IOException + +/** + * This class consists of the REST handler to delete workflows. + */ +class RestDeleteWorkflowAction : BaseRestHandler() { + + private val log = LogManager.getLogger(javaClass) + + override fun getName(): String { + return "delete_workflow_action" + } + + override fun routes(): List { + return listOf( + RestHandler.Route( + RestRequest.Method.DELETE, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}") + + val workflowId = request.param("workflowID") + val deleteDelegateMonitors = request.paramAsBoolean("deleteDelegateMonitors", false) + log.debug("${request.method()} ${request.uri()}") + + val refreshPolicy = + WriteRequest.RefreshPolicy.parse(request.param(REFRESH, WriteRequest.RefreshPolicy.IMMEDIATE.value)) + val deleteWorkflowRequest = DeleteWorkflowRequest(workflowId, deleteDelegateMonitors) + + return RestChannelConsumer { channel -> + client.execute( + AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, deleteWorkflowRequest, + RestToXContentListener(channel) + ) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt index ee90a5cdf..740dcb2d6 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt @@ -9,11 +9,11 @@ import org.apache.logging.log4j.LogManager import org.opensearch.alerting.AlertingPlugin import org.opensearch.alerting.action.ExecuteMonitorAction import org.opensearch.alerting.action.ExecuteMonitorRequest -import org.opensearch.alerting.model.Monitor import org.opensearch.client.node.NodeClient import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.XContentParser.Token.START_OBJECT -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.core.xcontent.XContentParser.Token.START_OBJECT +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler.ReplacedRoute diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt new file mode 100644 index 000000000..de8da1bac --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt @@ -0,0 +1,59 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.alerting.action.ExecuteWorkflowRequest +import org.opensearch.client.node.NodeClient +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestToXContentListener +import java.time.Instant + +private val log = LogManager.getLogger(RestExecuteWorkflowAction::class.java) + +class RestExecuteWorkflowAction : BaseRestHandler() { + + override fun getName(): String = "execute_workflow_action" + + override fun routes(): List { + return listOf( + RestHandler.Route(RestRequest.Method.POST, "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_execute") + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/_execute") + + return RestChannelConsumer { channel -> + val dryrun = request.paramAsBoolean("dryrun", false) + val requestEnd = request.paramAsTime("period_end", TimeValue(Instant.now().toEpochMilli())) + + if (request.hasParam("workflowID")) { + val workflowId = request.param("workflowID") + val execWorkflowRequest = ExecuteWorkflowRequest(dryrun, requestEnd, workflowId, null) + client.execute(ExecuteWorkflowAction.INSTANCE, execWorkflowRequest, RestToXContentListener(channel)) + } else { + val xcp = request.contentParser() + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val workflow = Workflow.parse(xcp, Workflow.NO_ID, Workflow.NO_VERSION) + val execWorkflowRequest = ExecuteWorkflowRequest(dryrun, requestEnd, null, workflow) + client.execute(ExecuteWorkflowAction.INSTANCE, execWorkflowRequest, RestToXContentListener(channel)) + } + } + } + + override fun responseParams(): Set { + return setOf("dryrun", "period_end", "workflowID") + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt index a189c3f52..aabcf8d6c 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt @@ -7,10 +7,10 @@ package org.opensearch.alerting.resthandler import org.apache.logging.log4j.LogManager import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.GetAlertsAction -import org.opensearch.alerting.action.GetAlertsRequest -import org.opensearch.alerting.model.Table import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.model.Table import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler.ReplacedRoute @@ -57,6 +57,13 @@ class RestGetAlertsAction : BaseRestHandler() { val severityLevel = request.param("severityLevel", "ALL") val alertState = request.param("alertState", "ALL") val monitorId: String? = request.param("monitorId") + val workflowId: String? = request.param("workflowIds") + val workflowIds = mutableListOf() + if (workflowId.isNullOrEmpty() == false) { + workflowIds.add(workflowId) + } else { + workflowIds.add("") + } val table = Table( sortOrder, sortString, @@ -66,10 +73,10 @@ class RestGetAlertsAction : BaseRestHandler() { searchString ) - val getAlertsRequest = GetAlertsRequest(table, severityLevel, alertState, monitorId) + val getAlertsRequest = GetAlertsRequest(table, severityLevel, alertState, monitorId, null, workflowIds = workflowIds) return RestChannelConsumer { channel -> - client.execute(GetAlertsAction.INSTANCE, getAlertsRequest, RestToXContentListener(channel)) + client.execute(AlertingActions.GET_ALERTS_ACTION_TYPE, getAlertsRequest, RestToXContentListener(channel)) } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt index d6ee82f71..7e5e1530f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt @@ -9,9 +9,9 @@ import org.apache.logging.log4j.LogManager import org.opensearch.alerting.AlertingPlugin import org.opensearch.alerting.action.GetDestinationsAction import org.opensearch.alerting.action.GetDestinationsRequest -import org.opensearch.alerting.model.Table import org.opensearch.alerting.util.context import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.model.Table import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler.ReplacedRoute diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt index 69b65d142..75607a701 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt @@ -7,10 +7,10 @@ package org.opensearch.alerting.resthandler import org.apache.logging.log4j.LogManager import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.GetFindingsAction -import org.opensearch.alerting.action.GetFindingsRequest -import org.opensearch.alerting.model.Table import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.model.Table import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler.Route @@ -61,7 +61,7 @@ class RestGetFindingsAction : BaseRestHandler() { ) return RestChannelConsumer { channel -> - client.execute(GetFindingsAction.INSTANCE, getFindingsSearchRequest, RestToXContentListener(channel)) + client.execute(AlertingActions.GET_FINDINGS_ACTION_TYPE, getFindingsSearchRequest, RestToXContentListener(channel)) } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt index 4d9bac033..54270b717 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt @@ -6,10 +6,10 @@ package org.opensearch.alerting.resthandler import org.apache.logging.log4j.LogManager import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.GetMonitorAction -import org.opensearch.alerting.action.GetMonitorRequest import org.opensearch.alerting.util.context import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetMonitorRequest import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler.ReplacedRoute @@ -69,7 +69,7 @@ class RestGetMonitorAction : BaseRestHandler() { val getMonitorRequest = GetMonitorRequest(monitorId, RestActions.parseVersion(request), request.method(), srcContext) return RestChannelConsumer { channel -> - client.execute(GetMonitorAction.INSTANCE, getMonitorRequest, RestToXContentListener(channel)) + client.execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, RestToXContentListener(channel)) } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetRemoteIndexesAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetRemoteIndexesAction.kt new file mode 100644 index 000000000..591ab2c3e --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetRemoteIndexesAction.kt @@ -0,0 +1,50 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.GetRemoteIndexesAction +import org.opensearch.alerting.action.GetRemoteIndexesRequest +import org.opensearch.client.node.NodeClient +import org.opensearch.core.common.Strings +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestToXContentListener + +private val log = LogManager.getLogger(RestGetRemoteIndexesAction::class.java) + +class RestGetRemoteIndexesAction : BaseRestHandler() { + val ROUTE = "${AlertingPlugin.REMOTE_BASE_URI}/indexes" + + override fun getName(): String { + return "get_remote_indexes_action" + } + + override fun routes(): List { + return mutableListOf( + RestHandler.Route(RestRequest.Method.GET, ROUTE) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} $ROUTE") + val indexes = Strings.splitStringByCommaToArray(request.param(GetRemoteIndexesRequest.INDEXES_FIELD, "")) + val includeMappings = request.paramAsBoolean(GetRemoteIndexesRequest.INCLUDE_MAPPINGS_FIELD, false) + return RestChannelConsumer { + channel -> + client.execute( + GetRemoteIndexesAction.INSTANCE, + GetRemoteIndexesRequest( + indexes = indexes.toList(), + includeMappings = includeMappings + ), + RestToXContentListener(channel) + ) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt new file mode 100644 index 000000000..1a2ca4426 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt @@ -0,0 +1,59 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetWorkflowRequest +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestToXContentListener +import org.opensearch.search.fetch.subphase.FetchSourceContext + +/** + * This class consists of the REST handler to retrieve a workflow . + */ +class RestGetWorkflowAction : BaseRestHandler() { + + private val log = LogManager.getLogger(javaClass) + + override fun getName(): String { + return "get_workflow_action" + } + + override fun routes(): List { + return listOf( + RestHandler.Route( + RestRequest.Method.GET, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}") + + val workflowId = request.param("workflowID") + if (workflowId == null || workflowId.isEmpty()) { + throw IllegalArgumentException("missing id") + } + + var srcContext = context(request) + if (request.method() == RestRequest.Method.HEAD) { + srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE + } + val getWorkflowRequest = + GetWorkflowRequest(workflowId, request.method()) + return RestChannelConsumer { + channel -> + client.execute(AlertingActions.GET_WORKFLOW_ACTION_TYPE, getWorkflowRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt new file mode 100644 index 000000000..474c32d4a --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt @@ -0,0 +1,92 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest +import org.opensearch.commons.alerting.model.Table +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.action.RestToXContentListener + +/** + * This class consists of the REST handler to retrieve chained alerts by workflow id. + */ +class RestGetWorkflowAlertsAction : BaseRestHandler() { + + private val log = LogManager.getLogger(RestGetWorkflowAlertsAction::class.java) + + override fun getName(): String { + return "get_workflow_alerts_action" + } + + override fun routes(): List { + return mutableListOf( + Route( + GET, + "${AlertingPlugin.WORKFLOW_BASE_URI}/alerts" + ) + ) + } + + override fun replacedRoutes(): MutableList { + return mutableListOf() + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/alerts") + + val sortString = request.param("sortString", "monitor_name.keyword") + val sortOrder = request.param("sortOrder", "asc") + val missing: String? = request.param("missing") + val size = request.paramAsInt("size", 20) + val startIndex = request.paramAsInt("startIndex", 0) + val searchString = request.param("searchString", "") + val severityLevel = request.param("severityLevel", "ALL") + val alertState = request.param("alertState", "ALL") + val workflowId: String? = request.param("workflowIds") + val alertId: String? = request.param("alertIds") + val getAssociatedAlerts: Boolean = request.param("getAssociatedAlerts", "false").toBoolean() + val workflowIds = mutableListOf() + if (workflowId.isNullOrEmpty() == false) { + workflowIds.add(workflowId) + } + val alertIds = mutableListOf() + if (alertId.isNullOrEmpty() == false) { + alertIds.add(alertId) + } + val table = Table( + sortOrder, + sortString, + missing, + size, + startIndex, + searchString + ) + + val getWorkflowAlertsRequest = GetWorkflowAlertsRequest( + table, + severityLevel, + alertState, + alertIndex = null, + associatedAlertsIndex = null, + workflowIds = workflowIds, + monitorIds = emptyList(), + getAssociatedAlerts = getAssociatedAlerts, + alertIds = alertIds + ) + return RestChannelConsumer { channel -> + client.execute(AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, getWorkflowAlertsRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt index 65283826c..5e4a8c155 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt @@ -7,20 +7,23 @@ package org.opensearch.alerting.resthandler import org.apache.logging.log4j.LogManager import org.opensearch.action.support.WriteRequest import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.IndexMonitorAction -import org.opensearch.alerting.action.IndexMonitorRequest -import org.opensearch.alerting.action.IndexMonitorResponse -import org.opensearch.alerting.model.BucketLevelTrigger -import org.opensearch.alerting.model.DocumentLevelTrigger -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.QueryLevelTrigger +import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.util.IF_PRIMARY_TERM import org.opensearch.alerting.util.IF_SEQ_NO import org.opensearch.alerting.util.REFRESH import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexMonitorRequest +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken import org.opensearch.index.seqno.SequenceNumbers import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer @@ -32,7 +35,6 @@ import org.opensearch.rest.RestRequest import org.opensearch.rest.RestRequest.Method.POST import org.opensearch.rest.RestRequest.Method.PUT import org.opensearch.rest.RestResponse -import org.opensearch.rest.RestStatus import org.opensearch.rest.action.RestResponseListener import java.io.IOException import java.time.Instant @@ -82,6 +84,9 @@ class RestIndexMonitorAction : BaseRestHandler() { val xcp = request.contentParser() ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) val monitor = Monitor.parse(xcp, id).copy(lastUpdateTime = Instant.now()) + val rbacRoles = request.contentParser().map()["rbac_roles"] as List? + + validateDataSources(monitor) val monitorType = monitor.monitorType val triggers = monitor.triggers when (monitorType) { @@ -99,6 +104,13 @@ class RestIndexMonitorAction : BaseRestHandler() { } } } + Monitor.MonitorType.CLUSTER_METRICS_MONITOR -> { + triggers.forEach { + if (it !is QueryLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for cluster metrics monitor") + } + } + } Monitor.MonitorType.DOC_LEVEL_MONITOR -> { triggers.forEach { if (it !is DocumentLevelTrigger) { @@ -114,10 +126,22 @@ class RestIndexMonitorAction : BaseRestHandler() { } else { WriteRequest.RefreshPolicy.IMMEDIATE } - val indexMonitorRequest = IndexMonitorRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), monitor) + val indexMonitorRequest = IndexMonitorRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), monitor, rbacRoles) return RestChannelConsumer { channel -> - client.execute(IndexMonitorAction.INSTANCE, indexMonitorRequest, indexMonitorResponse(channel, request.method())) + client.execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, indexMonitorRequest, indexMonitorResponse(channel, request.method())) + } + } + + private fun validateDataSources(monitor: Monitor) { // Data Sources will currently be supported only at transport layer. + if (monitor.dataSources != null) { + if ( + monitor.dataSources.queryIndex != ScheduledJob.DOC_LEVEL_QUERIES_INDEX || + monitor.dataSources.findingsIndex != AlertIndices.FINDING_HISTORY_WRITE_INDEX || + monitor.dataSources.alertsIndex != AlertIndices.ALERT_INDEX + ) { + throw IllegalArgumentException("Custom Data Sources are not allowed.") + } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt new file mode 100644 index 000000000..d631ed710 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt @@ -0,0 +1,99 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting.resthandler + +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IF_PRIMARY_TERM +import org.opensearch.alerting.util.IF_SEQ_NO +import org.opensearch.alerting.util.REFRESH +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexWorkflowRequest +import org.opensearch.commons.alerting.action.IndexWorkflowResponse +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.seqno.SequenceNumbers +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.BytesRestResponse +import org.opensearch.rest.RestChannel +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestResponse +import org.opensearch.rest.action.RestResponseListener +import java.io.IOException +import java.time.Instant + +/** + * Rest handlers to create and update workflows. + */ +class RestIndexWorkflowAction : BaseRestHandler() { + + override fun getName(): String { + return "index_workflow_action" + } + + override fun routes(): List { + return listOf( + RestHandler.Route(RestRequest.Method.POST, AlertingPlugin.WORKFLOW_BASE_URI), + RestHandler.Route( + RestRequest.Method.PUT, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val id = request.param("workflowID", Workflow.NO_ID) + if (request.method() == RestRequest.Method.PUT && Workflow.NO_ID == id) { + throw AlertingException.wrap(IllegalArgumentException("Missing workflow ID")) + } + + // Validate request by parsing JSON to Monitor + val xcp = request.contentParser() + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val workflow = Workflow.parse(xcp, id).copy(lastUpdateTime = Instant.now()) + val rbacRoles = request.contentParser().map()["rbac_roles"] as List? + + val seqNo = request.paramAsLong(IF_SEQ_NO, SequenceNumbers.UNASSIGNED_SEQ_NO) + val primaryTerm = request.paramAsLong(IF_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_PRIMARY_TERM) + val refreshPolicy = if (request.hasParam(REFRESH)) { + WriteRequest.RefreshPolicy.parse(request.param(REFRESH)) + } else { + WriteRequest.RefreshPolicy.IMMEDIATE + } + val workflowRequest = + IndexWorkflowRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), workflow, rbacRoles) + + return RestChannelConsumer { channel -> + client.execute(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, workflowRequest, indexMonitorResponse(channel, request.method())) + } + } + + private fun indexMonitorResponse(channel: RestChannel, restMethod: RestRequest.Method): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: IndexWorkflowResponse): RestResponse { + var returnStatus = RestStatus.CREATED + if (restMethod == RestRequest.Method.PUT) + returnStatus = RestStatus.OK + + val restResponse = + BytesRestResponse(returnStatus, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)) + if (returnStatus == RestStatus.CREATED) { + val location = "${AlertingPlugin.WORKFLOW_BASE_URI}/${response.id}" + restResponse.addHeader("Location", location) + } + return restResponse + } + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt index f979c4c50..66281942a 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt @@ -9,15 +9,16 @@ import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.AlertingPlugin import org.opensearch.alerting.action.SearchEmailAccountAction -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.util.context import org.opensearch.client.node.NodeClient -import org.opensearch.common.bytes.BytesReference import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.common.xcontent.XContentFactory.jsonBuilder import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.index.query.QueryBuilders import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BytesRestResponse @@ -26,7 +27,6 @@ import org.opensearch.rest.RestHandler.ReplacedRoute import org.opensearch.rest.RestHandler.Route import org.opensearch.rest.RestRequest import org.opensearch.rest.RestResponse -import org.opensearch.rest.RestStatus import org.opensearch.rest.action.RestResponseListener import org.opensearch.search.builder.SearchSourceBuilder import java.io.IOException diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt index f6fc16f85..4550bb4a9 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt @@ -9,24 +9,25 @@ import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.AlertingPlugin import org.opensearch.alerting.action.SearchEmailGroupAction -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.alerting.model.destination.email.EmailGroup import org.opensearch.alerting.util.context import org.opensearch.client.node.NodeClient -import org.opensearch.common.bytes.BytesReference import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.ToXContent import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent import org.opensearch.index.query.QueryBuilders import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.BytesRestResponse import org.opensearch.rest.RestChannel import org.opensearch.rest.RestHandler.ReplacedRoute import org.opensearch.rest.RestHandler.Route import org.opensearch.rest.RestRequest import org.opensearch.rest.RestResponse -import org.opensearch.rest.RestStatus import org.opensearch.rest.action.RestResponseListener import org.opensearch.search.builder.SearchSourceBuilder import java.io.IOException diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt index 44446bb78..5cc9cbd34 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt @@ -9,23 +9,22 @@ import org.apache.logging.log4j.LogManager import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.SearchMonitorAction -import org.opensearch.alerting.action.SearchMonitorRequest import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_ALERT_INDEX_PATTERN -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.context import org.opensearch.client.node.NodeClient import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.bytes.BytesReference import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.common.xcontent.XContentFactory.jsonBuilder import org.opensearch.common.xcontent.XContentType -import org.opensearch.index.query.QueryBuilders +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.SearchMonitorRequest +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS import org.opensearch.rest.BaseRestHandler import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.BytesRestResponse @@ -36,7 +35,6 @@ import org.opensearch.rest.RestRequest import org.opensearch.rest.RestRequest.Method.GET import org.opensearch.rest.RestRequest.Method.POST import org.opensearch.rest.RestResponse -import org.opensearch.rest.RestStatus import org.opensearch.rest.action.RestResponseListener import org.opensearch.search.builder.SearchSourceBuilder import java.io.IOException @@ -97,21 +95,13 @@ class RestSearchMonitorAction( searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) searchSourceBuilder.fetchSource(context(request)) - val queryBuilder = QueryBuilders.boolQuery().must(searchSourceBuilder.query()) - if (index == SCHEDULED_JOBS_INDEX) { - queryBuilder.filter(QueryBuilders.existsQuery(Monitor.MONITOR_TYPE)) - } - - searchSourceBuilder.query(queryBuilder) - .seqNoAndPrimaryTerm(true) - .version(true) val searchRequest = SearchRequest() .source(searchSourceBuilder) .indices(index) val searchMonitorRequest = SearchMonitorRequest(searchRequest) return RestChannelConsumer { channel -> - client.execute(SearchMonitorAction.INSTANCE, searchMonitorRequest, searchMonitorResponse(channel)) + client.execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, searchMonitorRequest, searchMonitorResponse(channel)) } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt index 065e0ab80..72518ed48 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt @@ -5,11 +5,11 @@ package org.opensearch.alerting.script -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.BucketLevelTrigger import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Monitor import java.time.Instant data class BucketLevelTriggerExecutionContext( diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt new file mode 100644 index 000000000..d4bf4cb59 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt @@ -0,0 +1,40 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.Workflow +import java.time.Instant + +data class ChainedAlertTriggerExecutionContext( + val workflow: Workflow, + val workflowRunResult: WorkflowRunResult, + val periodStart: Instant, + val periodEnd: Instant?, + val error: Exception? = null, + val trigger: ChainedAlertTrigger, + val alertGeneratingMonitors: Set, + val monitorIdToAlertIdsMap: Map>, + val alert: Alert? = null +) { + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + open fun asTemplateArg(): Map { + return mapOf( + "monitor" to workflow.asTemplateArg(), + "results" to workflowRunResult, + "periodStart" to periodStart, + "error" to error, + "alertGeneratingMonitors" to alertGeneratingMonitors, + "monitorIdToAlertIdsMap" to monitorIdToAlertIdsMap + ) + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt index 67938387e..66de731f6 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt @@ -5,9 +5,9 @@ package org.opensearch.alerting.script -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.DocumentLevelTrigger -import org.opensearch.alerting.model.Monitor +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor import java.time.Instant data class DocumentLevelTriggerExecutionContext( diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt index 6a492c316..2c7b53097 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt @@ -5,11 +5,11 @@ package org.opensearch.alerting.script -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.QueryLevelTriggerRunResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger import java.time.Instant data class QueryLevelTriggerExecutionContext( diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/script/TriggerExecutionContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/script/TriggerExecutionContext.kt index f00033525..7ad1bfc86 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/script/TriggerExecutionContext.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/script/TriggerExecutionContext.kt @@ -5,9 +5,9 @@ package org.opensearch.alerting.script -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.Trigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.Trigger import java.time.Instant abstract class TriggerExecutionContext( diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/service/DeleteMonitorService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/service/DeleteMonitorService.kt new file mode 100644 index 000000000..97d35e52e --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/service/DeleteMonitorService.kt @@ -0,0 +1,187 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.service + +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import org.apache.logging.log4j.LogManager +import org.apache.lucene.search.join.ScoreMode +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.delete.DeleteResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.IndicesOptions +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.MonitorMetadataService +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH +import org.opensearch.alerting.util.use +import org.opensearch.client.Client +import org.opensearch.commons.alerting.action.DeleteMonitorResponse +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.action.ActionListener +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import kotlin.coroutines.resume +import kotlin.coroutines.resumeWithException +import kotlin.coroutines.suspendCoroutine + +/** + * Component used when deleting the monitors + */ +object DeleteMonitorService : + CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("WorkflowMetadataService")) { + private val log = LogManager.getLogger(this.javaClass) + + private lateinit var client: Client + + fun initialize( + client: Client, + ) { + DeleteMonitorService.client = client + } + + /** + * Deletes the monitor, docLevelQueries and monitor metadata + * @param monitor monitor to be deleted + * @param refreshPolicy + */ + suspend fun deleteMonitor(monitor: Monitor, refreshPolicy: RefreshPolicy): DeleteMonitorResponse { + val deleteResponse = deleteMonitor(monitor.id, refreshPolicy) + deleteDocLevelMonitorQueriesAndIndices(monitor) + deleteMetadata(monitor) + return DeleteMonitorResponse(deleteResponse.id, deleteResponse.version) + } + + private suspend fun deleteMonitor(monitorId: String, refreshPolicy: RefreshPolicy): DeleteResponse { + val deleteMonitorRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) + .setRefreshPolicy(refreshPolicy) + return client.suspendUntil { delete(deleteMonitorRequest, it) } + } + + private suspend fun deleteMetadata(monitor: Monitor) { + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, "${monitor.id}-metadata") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + try { + val deleteResponse: DeleteResponse = client.suspendUntil { delete(deleteRequest, it) } + log.debug("Monitor metadata: ${deleteResponse.id} deletion result: ${deleteResponse.result}") + } catch (e: Exception) { + // we only log the error and don't fail the request because if monitor document has been deleted, + // we cannot retry based on this failure + log.error("Failed to delete monitor metadata ${deleteRequest.id()}.", e) + } + } + + private suspend fun deleteDocLevelMonitorQueriesAndIndices(monitor: Monitor) { + try { + val metadata = MonitorMetadataService.getMetadata(monitor) + metadata?.sourceToQueryIndexMapping?.forEach { (_, queryIndex) -> + + val indicesExistsResponse: IndicesExistsResponse = + client.suspendUntil { + client.admin().indices().exists(IndicesExistsRequest(queryIndex), it) + } + if (indicesExistsResponse.isExists == false) { + return + } + // Check if there's any queries from other monitors in this queryIndex, + // to avoid unnecessary doc deletion, if we could just delete index completely + val searchResponse: SearchResponse = client.suspendUntil { + search( + SearchRequest(queryIndex).source( + SearchSourceBuilder() + .size(0) + .query( + QueryBuilders.boolQuery().mustNot( + QueryBuilders.matchQuery("monitor_id", monitor.id) + ) + ) + ).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), + it + ) + } + if (searchResponse.hits.totalHits.value == 0L) { + val ack: AcknowledgedResponse = client.suspendUntil { + client.admin().indices().delete( + DeleteIndexRequest(queryIndex).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), + it + ) + } + if (ack.isAcknowledged == false) { + log.error("Deletion of concrete queryIndex:$queryIndex is not ack'd!") + } + } else { + // Delete all queries added by this monitor + val response: BulkByScrollResponse = suspendCoroutine { cont -> + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(queryIndex) + .filter(QueryBuilders.matchQuery("monitor_id", monitor.id)) + .refresh(true) + .execute( + object : ActionListener { + override fun onResponse(response: BulkByScrollResponse) = cont.resume(response) + override fun onFailure(t: Exception) = cont.resumeWithException(t) + } + ) + } + } + } + } catch (e: Exception) { + // we only log the error and don't fail the request because if monitor document has been deleted successfully, + // we cannot retry based on this failure + log.error("Failed to delete doc level queries from query index.", e) + } + } + + /** + * Checks if the monitor is part of the workflow + * + * @param monitorId id of monitor that is checked if it is a workflow delegate + */ + suspend fun monitorIsWorkflowDelegate(monitorId: String): Boolean { + val queryBuilder = QueryBuilders.nestedQuery( + WORKFLOW_DELEGATE_PATH, + QueryBuilders.boolQuery().must( + QueryBuilders.matchQuery( + WORKFLOW_MONITOR_PATH, + monitorId + ) + ), + ScoreMode.None + ) + try { + val searchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .source(SearchSourceBuilder().query(queryBuilder)) + + client.threadPool().threadContext.stashContext().use { + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + if (searchResponse.hits.totalHits?.value == 0L) { + return false + } + + val workflowIds = searchResponse.hits.hits.map { it.id }.joinToString() + log.info("Monitor $monitorId can't be deleted since it belongs to $workflowIds") + return true + } + } catch (ex: Exception) { + log.error("Error getting the monitor workflows", ex) + throw AlertingException.wrap(ex) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt b/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt index 1268703c9..743d582e5 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/settings/AlertingSettings.kt @@ -16,10 +16,8 @@ import java.util.concurrent.TimeUnit class AlertingSettings { companion object { - - const val MONITOR_MAX_INPUTS = 1 - const val MONITOR_MAX_TRIGGERS = 10 const val DEFAULT_MAX_ACTIONABLE_ALERT_COUNT = 50L + const val DEFAULT_FINDINGS_INDEXING_BATCH_SIZE = 1000 val ALERTING_MAX_MONITORS = Setting.intSetting( "plugins.alerting.monitor.max_monitors", @@ -154,6 +152,20 @@ class AlertingSettings { "plugins.alerting.max_actionable_alert_count", DEFAULT_MAX_ACTIONABLE_ALERT_COUNT, -1L, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val REMOTE_MONITORING_ENABLED = Setting.boolSetting( + "plugins.alerting.remote_monitoring_enabled", + false, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + + val FINDINGS_INDEXING_BATCH_SIZE = Setting.intSetting( + "plugins.alerting.alert_findings_indexing_batch_size", + DEFAULT_FINDINGS_INDEXING_BATCH_SIZE, + 1, Setting.Property.NodeScope, Setting.Property.Dynamic ) } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/settings/DestinationSettings.kt b/alerting/src/main/kotlin/org/opensearch/alerting/settings/DestinationSettings.kt index 2829f4df8..14086ce68 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/settings/DestinationSettings.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/settings/DestinationSettings.kt @@ -6,10 +6,10 @@ package org.opensearch.alerting.settings import org.opensearch.common.settings.SecureSetting -import org.opensearch.common.settings.SecureString import org.opensearch.common.settings.Setting import org.opensearch.common.settings.Setting.AffixSetting import org.opensearch.common.settings.Settings +import org.opensearch.core.common.settings.SecureString import java.util.function.Function /** diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt b/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt index 387b6cec9..84c000150 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt @@ -17,9 +17,6 @@ class LegacyOpenDistroAlertingSettings { companion object { - const val MONITOR_MAX_INPUTS = 1 - const val MONITOR_MAX_TRIGGERS = 10 - val ALERTING_MAX_MONITORS = Setting.intSetting( "opendistro.alerting.monitor.max_monitors", 1000, diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt b/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt index 7b3283561..73bae6463 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt @@ -7,9 +7,9 @@ package org.opensearch.alerting.settings import org.opensearch.alerting.util.DestinationType import org.opensearch.common.settings.SecureSetting -import org.opensearch.common.settings.SecureString import org.opensearch.common.settings.Setting import org.opensearch.common.settings.Settings +import org.opensearch.core.common.settings.SecureString import java.util.function.Function /** diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt b/alerting/src/main/kotlin/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt index fe568aee7..f71051ea2 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt @@ -14,15 +14,18 @@ import org.opensearch.action.admin.cluster.state.ClusterStateRequest import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest import org.opensearch.action.admin.indices.recovery.RecoveryRequest -import org.opensearch.alerting.core.model.ClusterMetricsInput -import org.opensearch.alerting.core.model.ClusterMetricsInput.ClusterMetricType +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatIndicesRequestWrapper +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatShardsRequestWrapper import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.ClusterMetricsInput.ClusterMetricType +import org.opensearch.commons.alerting.util.IndexUtils.Companion.supportedClusterMetricsSettings /** * A class that supports storing a unique set of API paths that can be accessed by general users. */ -class SupportedClusterMetricsSettings { +class SupportedClusterMetricsSettings : org.opensearch.commons.alerting.settings.SupportedClusterMetricsSettings { companion object { const val RESOURCE_FILE = "supported_json_payloads.json" @@ -83,12 +86,14 @@ class SupportedClusterMetricsSettings { fun resolveToActionRequest(clusterMetricsInput: ClusterMetricsInput): ActionRequest { val pathParams = clusterMetricsInput.parsePathParams() return when (clusterMetricsInput.clusterMetricType) { + ClusterMetricType.CAT_INDICES -> CatIndicesRequestWrapper(pathParams) ClusterMetricType.CAT_PENDING_TASKS -> PendingClusterTasksRequest() ClusterMetricType.CAT_RECOVERY -> { if (pathParams.isEmpty()) return RecoveryRequest() val pathParamsArray = pathParams.split(",").toTypedArray() return RecoveryRequest(*pathParamsArray) } + ClusterMetricType.CAT_SHARDS -> CatShardsRequestWrapper(pathParams) ClusterMetricType.CAT_SNAPSHOTS -> { return GetSnapshotsRequest(pathParams, arrayOf(GetSnapshotsRequest.ALL_SNAPSHOTS)) } @@ -131,9 +136,17 @@ class SupportedClusterMetricsSettings { * @param clusterMetricsInput The [ClusterMetricsInput] to validate. * @throws IllegalArgumentException when supportedApiList does not contain the provided path. */ - fun validateApiType(clusterMetricsInput: ClusterMetricsInput) { + fun validateApiTyped(clusterMetricsInput: ClusterMetricsInput) { if (!supportedApiList.keys.contains(clusterMetricsInput.clusterMetricType.defaultPath)) throw IllegalArgumentException("API path not in supportedApiList.") } } + + constructor() { + supportedClusterMetricsSettings = this + } + + override fun validateApiType(clusterMetricsInput: ClusterMetricsInput) { + validateApiTyped(clusterMetricsInput) + } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt index d5303fc18..19e59bd1e 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/SecureTransportAction.kt @@ -7,14 +7,14 @@ package org.opensearch.alerting.transport import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.commons.ConfigConstants import org.opensearch.commons.authuser.User -import org.opensearch.rest.RestStatus +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus private val log = LogManager.getLogger(SecureTransportAction::class.java) @@ -61,7 +61,7 @@ interface SecureTransportAction { /** * 'all_access' role users are treated as admins. */ - private fun isAdmin(user: User?): Boolean { + fun isAdmin(user: User?): Boolean { return when { user == null -> { false diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt index e0e4be1d9..aa65c6826 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt @@ -9,7 +9,8 @@ import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.ResourceNotFoundException +import org.opensearch.action.ActionRequest import org.opensearch.action.bulk.BulkRequest import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.delete.DeleteRequest @@ -19,31 +20,39 @@ import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction import org.opensearch.action.update.UpdateRequest -import org.opensearch.alerting.action.AcknowledgeAlertAction -import org.opensearch.alerting.action.AcknowledgeAlertRequest -import org.opensearch.alerting.action.AcknowledgeAlertResponse -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.opensearchapi.optionalTimeField import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest +import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetMonitorResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestRequest import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext import org.opensearch.tasks.Task import org.opensearch.transport.TransportService import java.time.Instant +import java.util.Locale private val log = LogManager.getLogger(TransportAcknowledgeAlertAction::class.java) private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) @@ -54,21 +63,53 @@ class TransportAcknowledgeAlertAction @Inject constructor( clusterService: ClusterService, actionFilters: ActionFilters, val settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - AcknowledgeAlertAction.NAME, transportService, actionFilters, ::AcknowledgeAlertRequest + val xContentRegistry: NamedXContentRegistry, + val transportGetMonitorAction: TransportGetMonitorAction +) : HandledTransportAction( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_NAME, transportService, actionFilters, ::AcknowledgeAlertRequest ) { - @Volatile private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + @Volatile + private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) init { clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } } - override fun doExecute(task: Task, request: AcknowledgeAlertRequest, actionListener: ActionListener) { + override fun doExecute( + task: Task, + acknowledgeAlertRequest: ActionRequest, + actionListener: ActionListener + ) { + val request = acknowledgeAlertRequest as? AcknowledgeAlertRequest + ?: recreateObject(acknowledgeAlertRequest) { AcknowledgeAlertRequest(it) } client.threadPool().threadContext.stashContext().use { scope.launch { - AcknowledgeHandler(client, actionListener, request).start() + val getMonitorResponse: GetMonitorResponse = + transportGetMonitorAction.client.suspendUntil { + val getMonitorRequest = GetMonitorRequest( + monitorId = request.monitorId, + -3L, + RestRequest.Method.GET, + FetchSourceContext.FETCH_SOURCE + ) + execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, it) + } + if (getMonitorResponse.monitor == null) { + actionListener.onFailure( + AlertingException.wrap( + ResourceNotFoundException( + String.format( + Locale.ROOT, + "No monitor found with id [%s]", + request.monitorId + ) + ) + ) + ) + } else { + AcknowledgeHandler(client, actionListener, request).start(getMonitorResponse.monitor!!) + } } } } @@ -80,14 +121,14 @@ class TransportAcknowledgeAlertAction @Inject constructor( ) { val alerts = mutableMapOf() - suspend fun start() = findActiveAlerts() + suspend fun start(monitor: Monitor) = findActiveAlerts(monitor) - private suspend fun findActiveAlerts() { + private suspend fun findActiveAlerts(monitor: Monitor) { val queryBuilder = QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, request.monitorId)) .filter(QueryBuilders.termsQuery("_id", request.alertIds)) val searchRequest = SearchRequest() - .indices(AlertIndices.ALERT_INDEX) + .indices(monitor.dataSources.alertsIndex) .routing(request.monitorId) .source( SearchSourceBuilder() @@ -98,13 +139,14 @@ class TransportAcknowledgeAlertAction @Inject constructor( ) try { val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - onSearchResponse(searchResponse) + onSearchResponse(searchResponse, monitor) } catch (t: Exception) { actionListener.onFailure(AlertingException.wrap(t)) } } - private suspend fun onSearchResponse(response: SearchResponse) { + private suspend fun onSearchResponse(response: SearchResponse, monitor: Monitor) { + val alertsHistoryIndex = monitor.dataSources.alertsHistoryIndex val updateRequests = mutableListOf() val copyRequests = mutableListOf() response.hits.forEach { hit -> @@ -117,8 +159,11 @@ class TransportAcknowledgeAlertAction @Inject constructor( alerts[alert.id] = alert if (alert.state == Alert.State.ACTIVE) { - if (alert.findingIds.isEmpty() || !isAlertHistoryEnabled) { - val updateRequest = UpdateRequest(AlertIndices.ALERT_INDEX, alert.id) + if ( + alert.findingIds.isEmpty() || + !isAlertHistoryEnabled + ) { + val updateRequest = UpdateRequest(monitor.dataSources.alertsIndex, alert.id) .routing(request.monitorId) .setIfSeqNo(hit.seqNo) .setIfPrimaryTerm(hit.primaryTerm) @@ -130,7 +175,7 @@ class TransportAcknowledgeAlertAction @Inject constructor( ) updateRequests.add(updateRequest) } else { - val copyRequest = IndexRequest(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + val copyRequest = IndexRequest(alertsHistoryIndex) .routing(request.monitorId) .id(alert.id) .source( @@ -153,14 +198,14 @@ class TransportAcknowledgeAlertAction @Inject constructor( client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(request.refreshPolicy), it) } else null - onBulkResponse(updateResponse, copyResponse) + onBulkResponse(updateResponse, copyResponse, monitor) } catch (t: Exception) { log.error("ack error: ${t.message}") actionListener.onFailure(AlertingException.wrap(t)) } } - private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?) { + private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?, monitor: Monitor) { val deleteRequests = mutableListOf() val missing = request.alertIds.toMutableSet() val acknowledged = mutableListOf() @@ -189,7 +234,7 @@ class TransportAcknowledgeAlertAction @Inject constructor( log.info("got a failureResponse: ${item.failureMessage}") failed.add(alerts[item.id]!!) } else { - val deleteRequest = DeleteRequest(AlertIndices.ALERT_INDEX, item.id) + val deleteRequest = DeleteRequest(monitor.dataSources.alertsIndex, item.id) .routing(request.monitorId) deleteRequests.add(deleteRequest) } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt new file mode 100644 index 000000000..9d51ee6d2 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt @@ -0,0 +1,297 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.ResourceNotFoundException +import org.opensearch.action.ActionRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest +import org.opensearch.action.update.UpdateRequest +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils +import org.opensearch.alerting.util.use +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse +import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.time.Instant +import java.util.Locale + +private val log = LogManager.getLogger(TransportAcknowledgeChainedAlertAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportAcknowledgeChainedAlertAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_NAME, + transportService, + actionFilters, + ::AcknowledgeChainedAlertRequest +) { + @Volatile + private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } + } + + override fun doExecute( + task: Task, + AcknowledgeChainedAlertRequest: ActionRequest, + actionListener: ActionListener, + ) { + val request = AcknowledgeChainedAlertRequest as? AcknowledgeChainedAlertRequest + ?: recreateObject(AcknowledgeChainedAlertRequest) { AcknowledgeChainedAlertRequest(it) } + client.threadPool().threadContext.stashContext().use { + scope.launch { + try { + val getResponse = getWorkflow(request.workflowId) + if (getResponse.isExists == false) { + actionListener.onFailure( + AlertingException.wrap( + ResourceNotFoundException( + String.format( + Locale.ROOT, + "No workflow found with id [%s]", + request.workflowId + ) + ) + ) + ) + } else { + val workflow = ScheduledJobUtils.parseWorkflowFromScheduledJobDocSource(xContentRegistry, getResponse) + AcknowledgeHandler(client, actionListener, request).start(workflow = workflow) + } + } catch (e: Exception) { + log.error("Failed to acknowledge chained alerts from request $request", e) + actionListener.onFailure(AlertingException.wrap(e)) + } + } + } + } + + private suspend fun getWorkflow(workflowId: String): GetResponse { + return client.suspendUntil { client.get(GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, workflowId), it) } + } + + inner class AcknowledgeHandler( + private val client: Client, + private val actionListener: ActionListener, + private val request: AcknowledgeChainedAlertRequest, + ) { + val alerts = mutableMapOf() + + suspend fun start(workflow: Workflow) = findActiveAlerts(workflow) + + private suspend fun findActiveAlerts(workflow: Workflow) { + try { + val queryBuilder = QueryBuilders.boolQuery() + .must( + QueryBuilders.wildcardQuery("workflow_id", request.workflowId) + ) + .must(QueryBuilders.termsQuery("_id", request.alertIds)) + if (workflow.inputs.isEmpty() || (workflow.inputs[0] is CompositeInput) == false) { + actionListener.onFailure( + OpenSearchStatusException("Workflow ${workflow.id} is invalid", RestStatus.INTERNAL_SERVER_ERROR) + ) + return + } + val compositeInput = workflow.inputs[0] as CompositeInput + val workflowId = compositeInput.sequence.delegates[0].monitorId + val dataSources: DataSources = getDataSources(workflowId) + val searchRequest = SearchRequest() + .indices(dataSources.alertsIndex) + .routing(request.workflowId) + .source( + SearchSourceBuilder() + .query(queryBuilder) + .version(true) + .seqNoAndPrimaryTerm(true) + .size(request.alertIds.size) + ) + + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + onSearchResponse(searchResponse, workflow, dataSources) + } catch (t: Exception) { + log.error("Failed to acknowledge chained alert ${request.alertIds} for workflow ${request.workflowId}", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun getDataSources(monitorId: String): DataSources { + val getResponse: GetResponse = client.suspendUntil { client.get(GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId), it) } + return ScheduledJobUtils.parseMonitorFromScheduledJobDocSource(xContentRegistry, getResponse).dataSources + } + + private suspend fun onSearchResponse(response: SearchResponse, workflow: Workflow, dataSources: DataSources) { + val alertsHistoryIndex = dataSources.alertsHistoryIndex + val updateRequests = mutableListOf() + val copyRequests = mutableListOf() + response.hits.forEach { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alerts[alert.id] = alert + + if (alert.state == Alert.State.ACTIVE) { + if ( + alert.findingIds.isEmpty() || + !isAlertHistoryEnabled + ) { + val updateRequest = UpdateRequest(dataSources.alertsIndex, alert.id) + .routing(request.workflowId) + .setIfSeqNo(hit.seqNo) + .setIfPrimaryTerm(hit.primaryTerm) + .doc( + XContentFactory.jsonBuilder().startObject() + .field(Alert.STATE_FIELD, Alert.State.ACKNOWLEDGED.toString()) + .optionalTimeField(Alert.ACKNOWLEDGED_TIME_FIELD, Instant.now()) + .endObject() + ) + updateRequests.add(updateRequest) + } else { + val copyRequest = IndexRequest(alertsHistoryIndex) + .routing(request.workflowId) + .id(alert.id) + .source( + alert.copy(state = Alert.State.ACKNOWLEDGED, acknowledgedTime = Instant.now()) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + copyRequests.add(copyRequest) + } + } + } + + try { + val updateResponse: BulkResponse? = if (updateRequests.isNotEmpty()) { + client.suspendUntil { + client.bulk(BulkRequest().add(updateRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + } else null + val copyResponse: BulkResponse? = if (copyRequests.isNotEmpty()) { + client.suspendUntil { + client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + } else null + onBulkResponse(updateResponse, copyResponse, dataSources) + } catch (t: Exception) { + log.error("Failed to acknowledge chained alert ${request.alertIds} for workflow ${request.workflowId}", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?, dataSources: DataSources) { + val deleteRequests = mutableListOf() + val acknowledged = mutableListOf() + val missing = request.alertIds.toMutableSet() + val failed = mutableListOf() + + alerts.values.forEach { + if (it.state != Alert.State.ACTIVE) { + missing.remove(it.id) + failed.add(it) + } + } + + updateResponse?.items?.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + + copyResponse?.items?.forEach { item -> + log.info("got a copyResponse: $item") + missing.remove(item.id) + if (item.isFailed) { + log.info("got a failureResponse: ${item.failureMessage}") + failed.add(alerts[item.id]!!) + } else { + val deleteRequest = DeleteRequest(dataSources.alertsIndex, item.id) + .routing(request.workflowId) + deleteRequests.add(deleteRequest) + } + } + + if (deleteRequests.isNotEmpty()) { + try { + val deleteResponse: BulkResponse = client.suspendUntil { + client.bulk(BulkRequest().add(deleteRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + deleteResponse.items.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + return + } + } + actionListener.onResponse( + AcknowledgeAlertResponse( + acknowledged.toList(), + failed.toList(), + missing.toList() + ) + ) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt index b5ba20b02..820542379 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt @@ -5,19 +5,19 @@ package org.opensearch.alerting.transport +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.delete.DeleteResponse +import org.opensearch.action.ActionRequest import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.DeleteMonitorAction -import org.opensearch.alerting.action.DeleteMonitorRequest -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.model.Monitor +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.service.DeleteMonitorService import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException import org.opensearch.client.Client @@ -25,19 +25,22 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest +import org.opensearch.commons.alerting.action.DeleteMonitorResponse +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob import org.opensearch.commons.authuser.User -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.reindex.BulkByScrollResponse -import org.opensearch.index.reindex.DeleteByQueryAction -import org.opensearch.index.reindex.DeleteByQueryRequestBuilder -import org.opensearch.rest.RestStatus +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.tasks.Task import org.opensearch.transport.TransportService -import java.io.IOException +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) private val log = LogManager.getLogger(TransportDeleteMonitorAction::class.java) class TransportDeleteMonitorAction @Inject constructor( @@ -47,8 +50,8 @@ class TransportDeleteMonitorAction @Inject constructor( val clusterService: ClusterService, settings: Settings, val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - DeleteMonitorAction.NAME, transportService, actionFilters, ::DeleteMonitorRequest +) : HandledTransportAction( + AlertingActions.DELETE_MONITOR_ACTION_NAME, transportService, actionFilters, ::DeleteMonitorRequest ), SecureTransportAction { @@ -58,140 +61,76 @@ class TransportDeleteMonitorAction @Inject constructor( listenFilterBySettingChange(clusterService) } - override fun doExecute(task: Task, request: DeleteMonitorRequest, actionListener: ActionListener) { + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? DeleteMonitorRequest + ?: recreateObject(request) { DeleteMonitorRequest(it) } val user = readUserFromThreadContext(client) - val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, request.monitorId) - .setRefreshPolicy(request.refreshPolicy) if (!validateUserBackendRoles(user, actionListener)) { return } - client.threadPool().threadContext.stashContext().use { - DeleteMonitorHandler(client, actionListener, deleteRequest, user, request.monitorId).resolveUserAndStart() + scope.launch { + DeleteMonitorHandler( + client, + actionListener, + user, + transformedRequest.monitorId + ).resolveUserAndStart(transformedRequest.refreshPolicy) } } inner class DeleteMonitorHandler( private val client: Client, - private val actionListener: ActionListener, - private val deleteRequest: DeleteRequest, + private val actionListener: ActionListener, private val user: User?, private val monitorId: String ) { - - fun resolveUserAndStart() { - if (user == null) { - // Security is disabled, so we can delete the destination without issues - deleteMonitor() - } else if (!doFilterForUser(user)) { - // security is enabled and filterby is disabled. - deleteMonitor() - } else { - try { - start() - } catch (ex: IOException) { - actionListener.onFailure(AlertingException.wrap(ex)) - } - } - } - - fun start() { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Monitor with $monitorId is not found", RestStatus.NOT_FOUND) - ) - ) - return - } - val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, XContentType.JSON + suspend fun resolveUserAndStart(refreshPolicy: RefreshPolicy) { + try { + val monitor = getMonitor() + + val canDelete = user == null || !doFilterForUser(user) || + checkUserPermissionsWithResource(user, monitor.user, actionListener, "monitor", monitorId) + + if (DeleteMonitorService.monitorIsWorkflowDelegate(monitor.id)) { + actionListener.onFailure( + AlertingException( + "Monitor can't be deleted because it is a part of workflow(s)", + RestStatus.FORBIDDEN, + IllegalStateException() ) - val monitor = ScheduledJob.parse(xcp, response.id, response.version) as Monitor - onGetResponse(monitor) - } - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } + ) + } else if (canDelete) { + actionListener.onResponse( + DeleteMonitorService.deleteMonitor(monitor, refreshPolicy) + ) + } else { + actionListener.onFailure( + AlertingException("Not allowed to delete this monitor!", RestStatus.FORBIDDEN, IllegalStateException()) + ) } - ) - } - - private fun onGetResponse(monitor: Monitor) { - if (!checkUserPermissionsWithResource(user, monitor.user, actionListener, "monitor", monitorId)) { - return - } else { - deleteMonitor() + } catch (t: Exception) { + log.error("Failed to delete monitor $monitorId", t) + actionListener.onFailure(AlertingException.wrap(t)) } } - private fun deleteMonitor() { - client.delete( - deleteRequest, - object : ActionListener { - override fun onResponse(response: DeleteResponse) { - val clusterState = clusterService.state() - if (clusterState.routingTable.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX)) { - deleteDocLevelMonitorQueries() - } - deleteMetadata() - - actionListener.onResponse(response) - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } - - private fun deleteMetadata() { + private suspend fun getMonitor(): Monitor { val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (response.isExists) { - val deleteMetadataRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, "$monitorId") - .setRefreshPolicy(deleteRequest.refreshPolicy) - client.delete( - deleteMetadataRequest, - object : ActionListener { - override fun onResponse(response: DeleteResponse) { - } - override fun onFailure(t: Exception) { - } - } - ) - } - } - override fun onFailure(t: Exception) { - } - } - ) - } - - private fun deleteDocLevelMonitorQueries() { - DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) - .source(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) - .filter(QueryBuilders.matchQuery("monitor_id", monitorId)) - .execute( - object : ActionListener { - override fun onResponse(response: BulkByScrollResponse) { - } - - override fun onFailure(t: Exception) { - } - } + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + if (getResponse.isExists == false) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Monitor with $monitorId is not found", RestStatus.NOT_FOUND) + ) ) + } + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt new file mode 100644 index 000000000..5a9938f56 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt @@ -0,0 +1,337 @@ + +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.apache.lucene.search.join.ScoreMode +import org.opensearch.OpenSearchException +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.ActionRequest +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.delete.DeleteResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.service.DeleteMonitorService +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteWorkflowRequest +import org.opensearch.commons.alerting.action.DeleteWorkflowResponse +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.index.IndexNotFoundException +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) +/** + * Transport class that deletes the workflow. + * If the deleteDelegateMonitor flag is set to true, deletes the workflow delegates that are not part of another workflow + */ +class TransportDeleteWorkflowAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val clusterService: ClusterService, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.DELETE_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::DeleteWorkflowRequest +), + SecureTransportAction { + private val log = LogManager.getLogger(javaClass) + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? DeleteWorkflowRequest + ?: recreateObject(request) { DeleteWorkflowRequest(it) } + + val user = readUserFromThreadContext(client) + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, transformedRequest.workflowId) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + scope.launch { + DeleteWorkflowHandler( + client, + actionListener, + deleteRequest, + transformedRequest.deleteDelegateMonitors, + user, + transformedRequest.workflowId + ).resolveUserAndStart() + } + } + + inner class DeleteWorkflowHandler( + private val client: Client, + private val actionListener: ActionListener, + private val deleteRequest: DeleteRequest, + private val deleteDelegateMonitors: Boolean?, + private val user: User?, + private val workflowId: String, + ) { + suspend fun resolveUserAndStart() { + try { + val workflow: Workflow = getWorkflow() ?: return + + val canDelete = user == null || + !doFilterForUser(user) || + checkUserPermissionsWithResource( + user, + workflow.user, + actionListener, + "workflow", + workflowId + ) + + if (canDelete) { + val delegateMonitorIds = (workflow.inputs[0] as CompositeInput).getMonitorIds() + var deletableMonitors = listOf() + // User can only delete the delegate monitors only in the case if all monitors can be deleted + // if there are monitors in this workflow that are referenced in other workflows, we cannot delete the monitors. + // We will not partially delete monitors. we delete them all or fail the request. + if (deleteDelegateMonitors == true) { + deletableMonitors = getDeletableDelegates(workflowId, delegateMonitorIds, user) + val monitorsDiff = delegateMonitorIds.toMutableList() + monitorsDiff.removeAll(deletableMonitors.map { it.id }) + + if (monitorsDiff.isNotEmpty()) { + actionListener.onFailure( + AlertingException( + "Not allowed to delete ${monitorsDiff.joinToString()} monitors", + RestStatus.FORBIDDEN, + IllegalStateException() + ) + ) + return + } + } + + val deleteResponse = deleteWorkflow(deleteRequest) + var deleteWorkflowResponse = DeleteWorkflowResponse(deleteResponse.id, deleteResponse.version) + + val workflowMetadataId = WorkflowMetadata.getId(workflow.id) + + val metadataIdsToDelete = mutableListOf(workflowMetadataId) + + if (deleteDelegateMonitors == true) { + val failedMonitorIds = tryDeletingMonitors(deletableMonitors, RefreshPolicy.IMMEDIATE) + // Update delete workflow response + deleteWorkflowResponse.nonDeletedMonitors = failedMonitorIds + // Delete monitors workflow metadata + // Monitor metadata will be in workflowId-monitorId-metadata format + metadataIdsToDelete.addAll(deletableMonitors.map { MonitorMetadata.getId(it, workflowMetadataId) }) + } + try { + // Delete the monitors workflow metadata + val deleteMonitorWorkflowMetadataResponse: BulkByScrollResponse = client.suspendUntil { + DeleteByQueryRequestBuilder(this, DeleteByQueryAction.INSTANCE) + .source(ScheduledJob.SCHEDULED_JOBS_INDEX) + .filter(QueryBuilders.idsQuery().addIds(*metadataIdsToDelete.toTypedArray())) + .execute(it) + } + } catch (t: Exception) { + log.error("Failed to delete delegate monitor metadata. But proceeding with workflow deletion $workflowId", t) + } + actionListener.onResponse(deleteWorkflowResponse) + } else { + actionListener.onFailure( + AlertingException( + "Not allowed to delete this workflow!", + RestStatus.FORBIDDEN, + IllegalStateException() + ) + ) + } + } catch (t: Exception) { + if (t is IndexNotFoundException) { + actionListener.onFailure( + OpenSearchStatusException( + "Workflow not found.", + RestStatus.NOT_FOUND + ) + ) + } else { + log.error("Failed to delete workflow $workflowId", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + + /** + * Tries to delete the given list of the monitors. Return value contains all the monitorIds for which deletion failed + * @param monitorIds list of monitor ids to be deleted + * @param refreshPolicy + * @return list of the monitors that were not deleted + */ + private suspend fun tryDeletingMonitors(monitors: List, refreshPolicy: RefreshPolicy): List { + val nonDeletedMonitorIds = mutableListOf() + for (monitor in monitors) { + try { + DeleteMonitorService.deleteMonitor(monitor, refreshPolicy) + } catch (ex: Exception) { + log.error("failed to delete delegate monitor ${monitor.id} for $workflowId") + nonDeletedMonitorIds.add(monitor.id) + } + } + return nonDeletedMonitorIds + } + + /** + * Returns lit of monitor ids belonging only to a given workflow. + * if filterBy is enabled, it filters and returns only those monitors which user has permission to delete. + * @param workflowIdToBeDeleted Id of the workflow that should be deleted + * @param monitorIds List of delegate monitor ids (underlying monitor ids) + */ + private suspend fun getDeletableDelegates(workflowIdToBeDeleted: String, monitorIds: List, user: User?): List { + // Retrieve monitors belonging to another workflows + val queryBuilder = QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery("_id", workflowIdToBeDeleted)).filter( + QueryBuilders.nestedQuery( + WORKFLOW_DELEGATE_PATH, + QueryBuilders.boolQuery().must( + QueryBuilders.termsQuery( + WORKFLOW_MONITOR_PATH, + monitorIds + ) + ), + ScoreMode.None + ) + ) + + val searchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .source(SearchSourceBuilder().query(queryBuilder)) + + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + + val workflows = searchResponse.hits.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, XContentType.JSON + ).also { it.nextToken() } + lateinit var workflow: Workflow + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "workflow" -> workflow = Workflow.parse(xcp) + } + } + workflow.copy(id = hit.id, version = hit.version) + } + val workflowMonitors = workflows.flatMap { (it.inputs[0] as CompositeInput).getMonitorIds() }.distinct() + // Monitors that can be deleted -> all workflow delegates - monitors belonging to different workflows + val deletableMonitorIds = monitorIds.minus(workflowMonitors.toSet()) + + // filtering further to get the list of monitors that user has permission to delete if filterby is enabled and user is not null + val query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", deletableMonitorIds)) + val searchSource = SearchSourceBuilder().query(query) + val monitorSearchRequest = SearchRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).source(searchSource) + + if (user != null && filterByEnabled) { + addFilter(user, monitorSearchRequest.source(), "monitor.user.backend_roles.keyword") + } + + val searchMonitorResponse: SearchResponse = client.suspendUntil { search(monitorSearchRequest, it) } + if (searchMonitorResponse.isTimedOut) { + throw OpenSearchException("Cannot determine that the ${ScheduledJob.SCHEDULED_JOBS_INDEX} index is healthy") + } + val deletableMonitors = mutableListOf() + for (hit in searchMonitorResponse.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, hit.sourceAsString + ).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor + deletableMonitors.add(monitor) + } + } + + return deletableMonitors + } + + private suspend fun getWorkflow(): Workflow? { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, workflowId) + + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + if (!getResponse.isExists) { + handleWorkflowMissing() + return null + } + + return parseWorkflow(getResponse) + } + + private fun handleWorkflowMissing() { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Workflow not found.", RestStatus.NOT_FOUND) + ) + ) + } + + private fun parseWorkflow(getResponse: GetResponse): Workflow { + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Workflow + } + + private suspend fun deleteWorkflow(deleteRequest: DeleteRequest): DeleteResponse { + log.debug("Deleting the workflow with id ${deleteRequest.id()}") + return client.suspendUntil { delete(deleteRequest, it) } + } + + private suspend fun deleteWorkflowMetadata(workflow: Workflow) { + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, WorkflowMetadata.getId(workflow.id)) + val deleteResponse: DeleteResponse = client.suspendUntil { delete(deleteRequest, it) } + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt index 2ea064768..b0de10ff0 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt @@ -11,32 +11,34 @@ import kotlinx.coroutines.launch import kotlinx.coroutines.withContext import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.MonitorMetadataService import org.opensearch.alerting.MonitorRunnerService import org.opensearch.alerting.action.ExecuteMonitorAction import org.opensearch.alerting.action.ExecuteMonitorRequest import org.opensearch.alerting.action.ExecuteMonitorResponse -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob import org.opensearch.commons.authuser.User -import org.opensearch.rest.RestStatus +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.tasks.Task import org.opensearch.transport.TransportService import java.time.Instant @@ -73,6 +75,10 @@ class TransportExecuteMonitorAction @Inject constructor( val (periodStart, periodEnd) = monitor.schedule.getPeriodEndingAt(Instant.ofEpochMilli(execMonitorRequest.requestEnd.millis)) try { + log.info( + "Executing monitor from API - id: ${monitor.id}, type: ${monitor.monitorType.name}, " + + "periodStart: $periodStart, periodEnd: $periodEnd, dryrun: ${execMonitorRequest.dryrun}" + ) val monitorRunResult = runner.runJob(monitor, periodStart, periodEnd, execMonitorRequest.dryrun) withContext(Dispatchers.IO) { actionListener.onResponse(ExecuteMonitorResponse(monitorRunResult)) @@ -125,13 +131,15 @@ class TransportExecuteMonitorAction @Inject constructor( if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { try { scope.launch { - if (!docLevelMonitorQueries.docLevelQueryIndexExists()) { - docLevelMonitorQueries.initDocLevelQueryIndex() + if (!docLevelMonitorQueries.docLevelQueryIndexExists(monitor.dataSources)) { + docLevelMonitorQueries.initDocLevelQueryIndex(monitor.dataSources) log.info("Central Percolation index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX} created") } + val (metadata, _) = MonitorMetadataService.getOrCreateMetadata(monitor, skipIndex = true) docLevelMonitorQueries.indexDocLevelQueries( monitor, monitor.id, + metadata, WriteRequest.RefreshPolicy.IMMEDIATE, indexTimeout ) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt new file mode 100644 index 000000000..037628e9e --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt @@ -0,0 +1,132 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import kotlinx.coroutines.withContext +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.MonitorRunnerService +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.alerting.action.ExecuteWorkflowRequest +import org.opensearch.alerting.action.ExecuteWorkflowResponse +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use +import org.opensearch.client.Client +import org.opensearch.common.inject.Inject +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.authuser.User +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.time.Instant + +private val log = LogManager.getLogger(TransportExecuteWorkflowAction::class.java) + +class TransportExecuteWorkflowAction @Inject constructor( + transportService: TransportService, + private val client: Client, + private val runner: MonitorRunnerService, + actionFilters: ActionFilters, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction( + ExecuteWorkflowAction.NAME, transportService, actionFilters, ::ExecuteWorkflowRequest +) { + override fun doExecute( + task: Task, + execWorkflowRequest: ExecuteWorkflowRequest, + actionListener: ActionListener, + ) { + val userStr = client.threadPool().threadContext.getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) + log.debug("User and roles string from thread context: $userStr") + val user: User? = User.parse(userStr) + + client.threadPool().threadContext.stashContext().use { + val executeWorkflow = fun(workflow: Workflow) { + runner.launch { + val (periodStart, periodEnd) = + workflow.schedule.getPeriodEndingAt(Instant.ofEpochMilli(execWorkflowRequest.requestEnd.millis)) + try { + log.info( + "Executing workflow from API - id: ${workflow.id}, periodStart: $periodStart, periodEnd: $periodEnd, " + + "dryrun: ${execWorkflowRequest.dryrun}" + ) + val workflowRunResult = + MonitorRunnerService.runJob(workflow, periodStart, periodEnd, execWorkflowRequest.dryrun) + withContext(Dispatchers.IO, { + actionListener.onResponse( + ExecuteWorkflowResponse( + workflowRunResult + ) + ) + }) + } catch (e: Exception) { + log.error("Unexpected error running workflow", e) + withContext(Dispatchers.IO) { + actionListener.onFailure(AlertingException.wrap(e)) + } + } + } + } + + if (execWorkflowRequest.workflowId != null) { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).id(execWorkflowRequest.workflowId) + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + log.error("Can't find workflow with id: ${response.id}") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Can't find workflow with id: ${response.id}", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + val workflow = ScheduledJob.parse(xcp, response.id, response.version) as Workflow + executeWorkflow(workflow) + } + } + } + + override fun onFailure(t: Exception) { + log.error("Error getting workflow ${execWorkflowRequest.workflowId}", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } else { + val workflow = when (user?.name.isNullOrEmpty()) { + true -> execWorkflowRequest.workflow as Workflow + false -> (execWorkflowRequest.workflow as Workflow).copy(user = user) + } + + executeWorkflow(workflow) + } + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt index 9c25cb1aa..11eda858a 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetAlertsAction.kt @@ -5,31 +5,43 @@ package org.opensearch.alerting.transport +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.action.ActionRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.GetAlertsAction -import org.opensearch.alerting.action.GetAlertsRequest -import org.opensearch.alerting.action.GetAlertsResponse import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.Alert import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.action.GetAlertsResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.BoolQueryBuilder import org.opensearch.index.query.Operator import org.opensearch.index.query.QueryBuilders import org.opensearch.search.builder.SearchSourceBuilder @@ -40,6 +52,7 @@ import org.opensearch.transport.TransportService import java.io.IOException private val log = LogManager.getLogger(TransportGetAlertsAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) class TransportGetAlertsAction @Inject constructor( transportService: TransportService, @@ -47,13 +60,17 @@ class TransportGetAlertsAction @Inject constructor( clusterService: ClusterService, actionFilters: ActionFilters, val settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - GetAlertsAction.NAME, transportService, actionFilters, ::GetAlertsRequest + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.GET_ALERTS_ACTION_NAME, + transportService, + actionFilters, + ::GetAlertsRequest ), SecureTransportAction { - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) init { listenFilterBySettingChange(clusterService) @@ -61,9 +78,11 @@ class TransportGetAlertsAction @Inject constructor( override fun doExecute( task: Task, - getAlertsRequest: GetAlertsRequest, - actionListener: ActionListener + request: ActionRequest, + actionListener: ActionListener, ) { + val getAlertsRequest = request as? GetAlertsRequest + ?: recreateObject(request) { GetAlertsRequest(it) } val user = readUserFromThreadContext(client) val tableProp = getAlertsRequest.table @@ -76,16 +95,38 @@ class TransportGetAlertsAction @Inject constructor( val queryBuilder = QueryBuilders.boolQuery() - if (getAlertsRequest.severityLevel != "ALL") + if (getAlertsRequest.severityLevel != "ALL") { queryBuilder.filter(QueryBuilders.termQuery("severity", getAlertsRequest.severityLevel)) + } - if (getAlertsRequest.alertState != "ALL") + if (getAlertsRequest.alertState == "ALL") { + // alerting dashboards expects chained alerts and individually executed monitors' alerts to be returned from this api + // when invoked with state=ALL. They require that audit alerts are NOT returned in this page + // and only be shown in "associated alerts" field under get workflow_alerts API. + // But if the API is called with query_params: state=AUDIT,monitor_id=<123>,workflow_id=, this api + // will return audit alerts generated by delegate monitor <123> in workflow + queryBuilder.filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(Alert.STATE_FIELD, Alert.State.AUDIT.name))) + } else { queryBuilder.filter(QueryBuilders.termQuery("state", getAlertsRequest.alertState)) + } + + if (getAlertsRequest.alertIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("_id", getAlertsRequest.alertIds)) + } if (getAlertsRequest.monitorId != null) { queryBuilder.filter(QueryBuilders.termQuery("monitor_id", getAlertsRequest.monitorId)) + addWorkflowIdNullOrEmptyCheck(getAlertsRequest, queryBuilder) + } else if (getAlertsRequest.monitorIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getAlertsRequest.monitorIds)) + addWorkflowIdNullOrEmptyCheck(getAlertsRequest, queryBuilder) + } + if ( + getAlertsRequest.workflowIds.isNullOrEmpty() == false && + !(getAlertsRequest.workflowIds!!.size == 1 && getAlertsRequest.workflowIds!![0] == "") + ) { + queryBuilder.must(QueryBuilders.termsQuery("workflow_id", getAlertsRequest.workflowIds)) } - if (!tableProp.searchString.isNullOrBlank()) { queryBuilder .must( @@ -105,37 +146,104 @@ class TransportGetAlertsAction @Inject constructor( .from(tableProp.startIndex) client.threadPool().threadContext.stashContext().use { - resolve(searchSourceBuilder, actionListener, user) + scope.launch { + try { + val alertIndex = resolveAlertsIndexName(getAlertsRequest) + getAlerts(alertIndex, searchSourceBuilder, actionListener, user) + } catch (t: Exception) { + log.error("Failed to get alerts", t) + if (t is AlertingException) { + actionListener.onFailure(t) + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + } + } + + // we add this check when we want to fetch alerts for monitors not generated as part of a workflow i.e. non-delegate monitor alerts + private fun addWorkflowIdNullOrEmptyCheck( + getAlertsRequest: GetAlertsRequest, + queryBuilder: BoolQueryBuilder, + ) { + if ( + getAlertsRequest.workflowIds != null && getAlertsRequest.workflowIds!!.size == 1 && getAlertsRequest.workflowIds!![0] == "" + ) { + val noWorkflowIdQuery = QueryBuilders.boolQuery() + .should(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery(Alert.WORKFLOW_ID_FIELD))) + .should(QueryBuilders.termsQuery(Alert.WORKFLOW_ID_FIELD, "")) + queryBuilder.must(noWorkflowIdQuery) + } + } + + /** Precedence order for resolving alert index to be queried: + 1. alertIndex param. + 2. alert index mentioned in monitor data sources. + 3. Default alert indices pattern + */ + suspend fun resolveAlertsIndexName(getAlertsRequest: GetAlertsRequest): String { + var alertIndex = AlertIndices.ALL_ALERT_INDEX_PATTERN + if (getAlertsRequest.alertIndex.isNullOrEmpty() == false) { + alertIndex = getAlertsRequest.alertIndex!! + } else if (getAlertsRequest.monitorId.isNullOrEmpty() == false) { + val retrievedMonitor = getMonitor(getAlertsRequest) + if (retrievedMonitor != null) { + alertIndex = retrievedMonitor.dataSources.alertsIndex + } + } + return if (alertIndex == AlertIndices.ALERT_INDEX) + AlertIndices.ALL_ALERT_INDEX_PATTERN + else + alertIndex + } + + private suspend fun getMonitor(getAlertsRequest: GetAlertsRequest): Monitor? { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, getAlertsRequest.monitorId!!) + try { + val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } + if (!getResponse.isExists) { + return null + } + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor + } catch (t: Exception) { + log.error("Failure in fetching monitor ${getAlertsRequest.monitorId} to resolve alert index in get alerts action", t) + return null } } - fun resolve( + fun getAlerts( + alertIndex: String, searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener, - user: User? + user: User?, ) { // user is null when: 1/ security is disabled. 2/when user is super-admin. if (user == null) { // user is null when: 1/ security is disabled. 2/when user is super-admin. - search(searchSourceBuilder, actionListener) + search(alertIndex, searchSourceBuilder, actionListener) } else if (!doFilterForUser(user)) { // security is enabled and filterby is disabled. - search(searchSourceBuilder, actionListener) + search(alertIndex, searchSourceBuilder, actionListener) } else { // security is enabled and filterby is enabled. try { log.info("Filtering result by: ${user.backendRoles}") addFilter(user, searchSourceBuilder, "monitor_user.backend_roles.keyword") - search(searchSourceBuilder, actionListener) + search(alertIndex, searchSourceBuilder, actionListener) } catch (ex: IOException) { actionListener.onFailure(AlertingException.wrap(ex)) } } } - fun search(searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { + fun search(alertIndex: String, searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { val searchRequest = SearchRequest() - .indices(AlertIndices.ALL_ALERT_INDEX_PATTERN) + .indices(alertIndex) .source(searchSourceBuilder) client.search( @@ -145,8 +253,10 @@ class TransportGetAlertsAction @Inject constructor( val totalAlertCount = response.hits.totalHits?.value?.toInt() val alerts = response.hits.map { hit -> val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, XContentType.JSON + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON ) XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) val alert = Alert.parse(xcp, hit.id, hit.version) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt index 245a1bd87..9dab86330 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt @@ -6,7 +6,6 @@ package org.opensearch.alerting.transport import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters @@ -14,26 +13,27 @@ import org.opensearch.action.support.HandledTransportAction import org.opensearch.alerting.action.GetDestinationsAction import org.opensearch.alerting.action.GetDestinationsRequest import org.opensearch.alerting.action.GetDestinationsResponse -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.opensearchapi.addFilter import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.Strings import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob import org.opensearch.commons.authuser.User +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.Strings +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import org.opensearch.index.query.Operator import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.search.fetch.subphase.FetchSourceContext import org.opensearch.search.sort.SortBuilders @@ -148,7 +148,7 @@ class TransportGetDestinationsAction @Inject constructor( val version = hit.version val seqNo = hit.seqNo.toInt() val primaryTerm = hit.primaryTerm.toInt() - val xcp = XContentFactory.xContent(XContentType.JSON) + val xcp = XContentType.JSON.xContent() .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt index 9d771d203..af309e5d1 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt @@ -7,7 +7,6 @@ package org.opensearch.alerting.transport import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.support.ActionFilters @@ -15,20 +14,22 @@ import org.opensearch.action.support.HandledTransportAction import org.opensearch.alerting.action.GetEmailAccountAction import org.opensearch.alerting.action.GetEmailAccountRequest import org.opensearch.alerting.action.GetEmailAccountResponse -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST import org.opensearch.alerting.util.AlertingException import org.opensearch.alerting.util.DestinationType +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.tasks.Task import org.opensearch.transport.TransportService diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt index 00b6d3e6b..42b4ef1df 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt @@ -7,7 +7,6 @@ package org.opensearch.alerting.transport import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.support.ActionFilters @@ -15,20 +14,22 @@ import org.opensearch.action.support.HandledTransportAction import org.opensearch.alerting.action.GetEmailGroupAction import org.opensearch.alerting.action.GetEmailGroupRequest import org.opensearch.alerting.action.GetEmailGroupResponse -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.alerting.model.destination.email.EmailGroup import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST import org.opensearch.alerting.util.AlertingException import org.opensearch.alerting.util.DestinationType +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.tasks.Task import org.opensearch.transport.TransportService diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt index 006d8fb9a..35f04558f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetFindingsAction.kt @@ -8,38 +8,44 @@ package org.opensearch.alerting.transport import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch +import kotlinx.coroutines.withContext import org.apache.logging.log4j.LogManager import org.apache.lucene.search.join.ScoreMode -import org.opensearch.action.ActionListener +import org.opensearch.action.ActionRequest import org.opensearch.action.get.MultiGetRequest import org.opensearch.action.get.MultiGetResponse import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.GetFindingsAction -import org.opensearch.alerting.action.GetFindingsRequest -import org.opensearch.alerting.action.GetFindingsResponse import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_FINDING_INDEX_PATTERN -import org.opensearch.alerting.model.Finding -import org.opensearch.alerting.model.FindingDocument -import org.opensearch.alerting.model.FindingWithDocs import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.Strings import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.action.GetFindingsResponse +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetMonitorResponse +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.FindingDocument +import org.opensearch.commons.alerting.model.FindingWithDocs +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.Strings +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import org.opensearch.index.query.Operator import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestRequest import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.search.fetch.subphase.FetchSourceContext import org.opensearch.search.sort.SortBuilders @@ -57,8 +63,8 @@ class TransportGetFindingsSearchAction @Inject constructor( actionFilters: ActionFilters, val settings: Settings, val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction ( - GetFindingsAction.NAME, transportService, actionFilters, ::GetFindingsRequest +) : HandledTransportAction ( + AlertingActions.GET_FINDINGS_ACTION_NAME, transportService, actionFilters, ::GetFindingsRequest ), SecureTransportAction { @@ -70,9 +76,11 @@ class TransportGetFindingsSearchAction @Inject constructor( override fun doExecute( task: Task, - getFindingsRequest: GetFindingsRequest, + request: ActionRequest, actionListener: ActionListener ) { + val getFindingsRequest = request as? GetFindingsRequest + ?: recreateObject(request) { GetFindingsRequest(it) } val tableProp = getFindingsRequest.table val sortBuilder = SortBuilders @@ -95,6 +103,12 @@ class TransportGetFindingsSearchAction @Inject constructor( if (!getFindingsRequest.findingId.isNullOrBlank()) queryBuilder.filter(QueryBuilders.termQuery("_id", getFindingsRequest.findingId)) + if (getFindingsRequest.monitorId != null) { + queryBuilder.filter(QueryBuilders.termQuery("monitor_id", getFindingsRequest.monitorId)) + } else if (getFindingsRequest.monitorIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getFindingsRequest.monitorIds)) + } + if (!tableProp.searchString.isNullOrBlank()) { queryBuilder .should( @@ -122,8 +136,11 @@ class TransportGetFindingsSearchAction @Inject constructor( client.threadPool().threadContext.stashContext().use { scope.launch { try { - val getFindingsResponse = search(searchSourceBuilder) + val indexName = resolveFindingsIndexName(getFindingsRequest) + val getFindingsResponse = search(searchSourceBuilder, indexName) actionListener.onResponse(getFindingsResponse) + } catch (t: AlertingException) { + actionListener.onFailure(t) } catch (t: Exception) { actionListener.onFailure(AlertingException.wrap(t)) } @@ -131,17 +148,43 @@ class TransportGetFindingsSearchAction @Inject constructor( } } - suspend fun search(searchSourceBuilder: SearchSourceBuilder): GetFindingsResponse { + suspend fun resolveFindingsIndexName(findingsRequest: GetFindingsRequest): String { + var indexName = ALL_FINDING_INDEX_PATTERN + + if (findingsRequest.findingIndex.isNullOrEmpty() == false) { + // findingIndex has highest priority, so use that if available + indexName = findingsRequest.findingIndex!! + } else if (findingsRequest.monitorId.isNullOrEmpty() == false) { + // second best is monitorId. + // We will use it to fetch monitor and then read indexName from dataSources field of monitor + withContext(Dispatchers.IO) { + val getMonitorRequest = GetMonitorRequest( + findingsRequest.monitorId!!, + -3L, + RestRequest.Method.GET, + FetchSourceContext.FETCH_SOURCE + ) + val getMonitorResponse: GetMonitorResponse = + this@TransportGetFindingsSearchAction.client.suspendUntil { + execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, it) + } + indexName = getMonitorResponse.monitor?.dataSources?.findingsIndex ?: ALL_FINDING_INDEX_PATTERN + } + } + return indexName + } + + suspend fun search(searchSourceBuilder: SearchSourceBuilder, indexName: String): GetFindingsResponse { val searchRequest = SearchRequest() .source(searchSourceBuilder) - .indices(ALL_FINDING_INDEX_PATTERN) + .indices(indexName) val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } val totalFindingCount = searchResponse.hits.totalHits?.value?.toInt() val mgetRequest = MultiGetRequest() val findingsWithDocs = mutableListOf() val findings = mutableListOf() for (hit in searchResponse.hits) { - val xcp = XContentFactory.xContent(XContentType.JSON) + val xcp = XContentType.JSON.xContent() .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) val finding = Finding.parse(xcp) @@ -174,8 +217,9 @@ class TransportGetFindingsSearchAction @Inject constructor( val documents: MutableMap = mutableMapOf() response.responses.forEach { val key = "${it.index}|${it.id}" - val docData = if (it.isFailed) "" else it.response.sourceAsString - val findingDocument = FindingDocument(it.index, it.id, !it.isFailed, docData) + val isDocFound = !(it.isFailed || it.response.sourceAsString == null) + val docData = if (isDocFound) it.response.sourceAsString else "" + val findingDocument = FindingDocument(it.index, it.id, isDocFound, docData) documents[key] = findingDocument } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetMonitorAction.kt index b2be635d9..722db074f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetMonitorAction.kt @@ -5,33 +5,50 @@ package org.opensearch.alerting.transport +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager +import org.apache.lucene.search.join.ScoreMode import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener +import org.opensearch.action.ActionRequest import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.GetMonitorAction -import org.opensearch.alerting.action.GetMonitorRequest -import org.opensearch.alerting.action.GetMonitorResponse -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetMonitorResponse +import org.opensearch.commons.alerting.action.GetMonitorResponse.AssociatedWorkflow +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.tasks.Task import org.opensearch.transport.TransportService private val log = LogManager.getLogger(TransportGetMonitorAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) class TransportGetMonitorAction @Inject constructor( transportService: TransportService, @@ -39,24 +56,33 @@ class TransportGetMonitorAction @Inject constructor( actionFilters: ActionFilters, val xContentRegistry: NamedXContentRegistry, val clusterService: ClusterService, - settings: Settings -) : HandledTransportAction ( - GetMonitorAction.NAME, transportService, actionFilters, ::GetMonitorRequest + settings: Settings, +) : HandledTransportAction( + AlertingActions.GET_MONITOR_ACTION_NAME, + transportService, + actionFilters, + ::GetMonitorRequest ), SecureTransportAction { - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) init { listenFilterBySettingChange(clusterService) } - override fun doExecute(task: Task, getMonitorRequest: GetMonitorRequest, actionListener: ActionListener) { + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? GetMonitorRequest + ?: recreateObject(request) { + GetMonitorRequest(it) + } + val user = readUserFromThreadContext(client) - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, getMonitorRequest.monitorId) - .version(getMonitorRequest.version) - .fetchSourceContext(getMonitorRequest.srcContext) + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, transformedRequest.monitorId) + .version(transformedRequest.version) + .fetchSourceContext(transformedRequest.srcContext) if (!validateUserBackendRoles(user, actionListener)) { return @@ -83,8 +109,10 @@ class TransportGetMonitorAction @Inject constructor( var monitor: Monitor? = null if (!response.isSourceEmpty) { XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, XContentType.JSON + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, + XContentType.JSON ).use { xcp -> monitor = ScheduledJob.parse(xcp, response.id, response.version) as Monitor @@ -94,17 +122,30 @@ class TransportGetMonitorAction @Inject constructor( monitor?.user, actionListener, "monitor", - getMonitorRequest.monitorId + transformedRequest.monitorId ) ) { return } } } - - actionListener.onResponse( - GetMonitorResponse(response.id, response.version, response.seqNo, response.primaryTerm, RestStatus.OK, monitor) - ) + try { + scope.launch { + val associatedCompositeMonitors = getAssociatedWorkflows(response.id) + actionListener.onResponse( + GetMonitorResponse( + response.id, + response.version, + response.seqNo, + response.primaryTerm, + monitor, + associatedCompositeMonitors + ) + ) + } + } catch (e: Exception) { + log.error("Failed to get associate workflows in get monitor action", e) + } } override fun onFailure(t: Exception) { @@ -114,4 +155,41 @@ class TransportGetMonitorAction @Inject constructor( ) } } + + private suspend fun getAssociatedWorkflows(id: String): List { + try { + val associatedWorkflows = mutableListOf() + val queryBuilder = QueryBuilders.nestedQuery( + WORKFLOW_DELEGATE_PATH, + QueryBuilders.boolQuery().must( + QueryBuilders.matchQuery( + WORKFLOW_MONITOR_PATH, + id + ) + ), + ScoreMode.None + ) + val searchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .source(SearchSourceBuilder().query(queryBuilder).fetchField("_id")) + val response: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + + for (hit in response.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceAsString + ).use { hitsParser -> + val workflow = ScheduledJob.parse(hitsParser, hit.id, hit.version) + if (workflow is Workflow) { + associatedWorkflows.add(AssociatedWorkflow(hit.id, workflow.name)) + } + } + } + return associatedWorkflows + } catch (e: java.lang.Exception) { + log.error("failed to fetch associated workflows for monitor $id", e) + return emptyList() + } + } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetRemoteIndexesAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetRemoteIndexesAction.kt new file mode 100644 index 000000000..5b35d493a --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetRemoteIndexesAction.kt @@ -0,0 +1,193 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import kotlinx.coroutines.newSingleThreadContext +import kotlinx.coroutines.withContext +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse +import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest +import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse +import org.opensearch.action.admin.indices.resolve.ResolveIndexAction +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.IndicesOptions +import org.opensearch.alerting.action.GetRemoteIndexesAction +import org.opensearch.alerting.action.GetRemoteIndexesRequest +import org.opensearch.alerting.action.GetRemoteIndexesResponse +import org.opensearch.alerting.action.GetRemoteIndexesResponse.ClusterIndexes +import org.opensearch.alerting.action.GetRemoteIndexesResponse.ClusterIndexes.ClusterIndex +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.AlertingSettings.Companion.REMOTE_MONITORING_ENABLED +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.CrossClusterMonitorUtils +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.time.Duration +import java.time.Instant + +private val log = LogManager.getLogger(TransportGetRemoteIndexesAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportGetRemoteIndexesAction @Inject constructor( + val transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val xContentRegistry: NamedXContentRegistry, + val clusterService: ClusterService, + settings: Settings, +) : HandledTransportAction( + GetRemoteIndexesAction.NAME, + transportService, + actionFilters, + ::GetRemoteIndexesRequest +), + SecureTransportAction { + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + @Volatile private var remoteMonitoringEnabled = REMOTE_MONITORING_ENABLED.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(REMOTE_MONITORING_ENABLED) { remoteMonitoringEnabled = it } + listenFilterBySettingChange(clusterService) + } + + override fun doExecute( + task: Task, + request: GetRemoteIndexesRequest, + actionListener: ActionListener + ) { + log.debug("Remote monitoring enabled: {}", remoteMonitoringEnabled) + if (!remoteMonitoringEnabled) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Remote monitoring is not enabled.", RestStatus.FORBIDDEN) + ) + ) + return + } + + val user = readUserFromThreadContext(client) + if (!validateUserBackendRoles(user, actionListener)) return + + client.threadPool().threadContext.stashContext().use { + scope.launch { + val singleThreadContext = newSingleThreadContext("GetRemoteIndexesActionThread") + withContext(singleThreadContext) { + it.restore() + val clusterIndexesList = mutableListOf() + + var resolveIndexResponse: ResolveIndexAction.Response? = null + try { + resolveIndexResponse = + getRemoteClusters(CrossClusterMonitorUtils.parseIndexesForRemoteSearch(request.indexes, clusterService)) + } catch (e: Exception) { + log.error("Failed to retrieve indexes for request $request", e) + actionListener.onFailure(AlertingException.wrap(e)) + } + + val resolvedIndexes: MutableList = mutableListOf() + if (resolveIndexResponse != null) { + resolveIndexResponse.indices.forEach { resolvedIndexes.add(it.name) } + resolveIndexResponse.aliases.forEach { resolvedIndexes.add(it.name) } + } + + val clusterIndexesMap = CrossClusterMonitorUtils.separateClusterIndexes(resolvedIndexes, clusterService) + + clusterIndexesMap.forEach { (clusterName, indexes) -> + val targetClient = CrossClusterMonitorUtils.getClientForCluster(clusterName, client, clusterService) + + val startTime = Instant.now() + var clusterHealthResponse: ClusterHealthResponse? = null + try { + clusterHealthResponse = getHealthStatuses(targetClient, indexes) + } catch (e: Exception) { + log.error("Failed to retrieve health statuses for request $request", e) + actionListener.onFailure(AlertingException.wrap(e)) + } + val endTime = Instant.now() + val latency = Duration.between(startTime, endTime).toMillis() + + var mappingsResponse: GetMappingsResponse? = null + if (request.includeMappings) { + try { + mappingsResponse = getIndexMappings(targetClient, indexes) + } catch (e: Exception) { + log.error("Failed to retrieve mappings for request $request", e) + actionListener.onFailure(AlertingException.wrap(e)) + } + } + + val clusterIndexList = mutableListOf() + if (clusterHealthResponse != null) { + indexes.forEach { + clusterIndexList.add( + ClusterIndex( + indexName = it, + indexHealth = clusterHealthResponse.indices[it]?.status, + mappings = mappingsResponse?.mappings?.get(it) + ) + ) + } + } + + clusterIndexesList.add( + ClusterIndexes( + clusterName = clusterName, + clusterHealth = clusterHealthResponse!!.status, + hubCluster = clusterName == clusterService.clusterName.value(), + indexes = clusterIndexList, + latency = latency + ) + ) + } + actionListener.onResponse(GetRemoteIndexesResponse(clusterIndexes = clusterIndexesList)) + } + } + } + } + + private suspend fun getRemoteClusters(parsedIndexes: List): ResolveIndexAction.Response { + val resolveRequest = ResolveIndexAction.Request( + parsedIndexes.toTypedArray(), + ResolveIndexAction.Request.DEFAULT_INDICES_OPTIONS + ) + + return client.suspendUntil { + admin().indices().resolveIndex(resolveRequest, it) + } + } + private suspend fun getHealthStatuses(targetClient: Client, parsedIndexesNames: List): ClusterHealthResponse { + val clusterHealthRequest = ClusterHealthRequest() + .indices(*parsedIndexesNames.toTypedArray()) + .indicesOptions(IndicesOptions.lenientExpandHidden()) + + return targetClient.suspendUntil { + admin().cluster().health(clusterHealthRequest, it) + } + } + + private suspend fun getIndexMappings(targetClient: Client, parsedIndexNames: List): GetMappingsResponse { + val getMappingsRequest = GetMappingsRequest().indices(*parsedIndexNames.toTypedArray()) + return targetClient.suspendUntil { + admin().indices().getMappings(getMappingsRequest, it) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt new file mode 100644 index 000000000..4b2dc3ee3 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt @@ -0,0 +1,149 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetWorkflowRequest +import org.opensearch.commons.alerting.action.GetWorkflowResponse +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.IndexNotFoundException +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +class TransportGetWorkflowAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val xContentRegistry: NamedXContentRegistry, + val clusterService: ClusterService, + settings: Settings +) : HandledTransportAction( + AlertingActions.GET_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::GetWorkflowRequest +), + SecureTransportAction { + + private val log = LogManager.getLogger(javaClass) + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, getWorkflowRequest: GetWorkflowRequest, actionListener: ActionListener) { + val user = readUserFromThreadContext(client) + + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, getWorkflowRequest.workflowId) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + client.threadPool().threadContext.stashContext().use { + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + log.error("Workflow with ${getWorkflowRequest.workflowId} not found") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Workflow not found.", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + + var workflow: Workflow? = null + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + val compositeMonitor = ScheduledJob.parse(xcp, response.id, response.version) + if (compositeMonitor is Workflow) { + workflow = compositeMonitor + } else { + log.error("Wrong monitor type returned") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Workflow not found.", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + + // security is enabled and filterby is enabled + if (!checkUserPermissionsWithResource( + user, + workflow?.user, + actionListener, + "workflow", + getWorkflowRequest.workflowId + ) + ) { + return + } + } + } + + actionListener.onResponse( + GetWorkflowResponse( + response.id, + response.version, + response.seqNo, + response.primaryTerm, + RestStatus.OK, + workflow + ) + ) + } + + override fun onFailure(t: Exception) { + log.error("Getting the workflow failed", t) + + if (t is IndexNotFoundException) { + actionListener.onFailure( + OpenSearchStatusException( + "Workflow not found", + RestStatus.NOT_FOUND + ) + ) + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + ) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt new file mode 100644 index 000000000..2d6c165c0 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt @@ -0,0 +1,275 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.Operator +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortBuilders +import org.opensearch.search.sort.SortOrder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.io.IOException + +private val log = LogManager.getLogger(TransportGetAlertsAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportGetWorkflowAlertsAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.GET_WORKFLOW_ALERTS_ACTION_NAME, + transportService, + actionFilters, + ::GetAlertsRequest +), + SecureTransportAction { + + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + @Volatile + private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } + listenFilterBySettingChange(clusterService) + } + + override fun doExecute( + task: Task, + request: ActionRequest, + actionListener: ActionListener, + ) { + val getWorkflowAlertsRequest = request as? GetWorkflowAlertsRequest + ?: recreateObject(request) { GetWorkflowAlertsRequest(it) } + val user = readUserFromThreadContext(client) + + val tableProp = getWorkflowAlertsRequest.table + val sortBuilder = SortBuilders.fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + + val queryBuilder = QueryBuilders.boolQuery() + + if (getWorkflowAlertsRequest.severityLevel != "ALL") { + queryBuilder.filter(QueryBuilders.termQuery("severity", getWorkflowAlertsRequest.severityLevel)) + } + + if (getWorkflowAlertsRequest.alertState == "ALL") { + QueryBuilders.boolQuery() + .filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(Alert.STATE_FIELD, Alert.State.AUDIT.name))) + } else { + queryBuilder.filter(QueryBuilders.termQuery(Alert.STATE_FIELD, getWorkflowAlertsRequest.alertState)) + } + + if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("_id", getWorkflowAlertsRequest.alertIds)) + } + + if (getWorkflowAlertsRequest.monitorIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getWorkflowAlertsRequest.monitorIds)) + } + if (getWorkflowAlertsRequest.workflowIds.isNullOrEmpty() == false) { + queryBuilder.must(QueryBuilders.termsQuery("workflow_id", getWorkflowAlertsRequest.workflowIds)) + queryBuilder.must(QueryBuilders.termQuery("monitor_id", "")) + } + if (!tableProp.searchString.isNullOrBlank()) { + queryBuilder + .must( + QueryBuilders.queryStringQuery(tableProp.searchString) + .defaultOperator(Operator.AND) + .field("monitor_name") + .field("trigger_name") + ) + } + // if alert id is mentioned we cannot set "from" field as it may not return id. we would be using it to paginate associated alerts + val from = if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty()) + tableProp.startIndex + else 0 + + val searchSourceBuilder = SearchSourceBuilder() + .version(true) + .seqNoAndPrimaryTerm(true) + .query(queryBuilder) + .sort(sortBuilder) + .size(tableProp.size) + .from(from) + + client.threadPool().threadContext.stashContext().use { + scope.launch { + try { + val alertIndex = resolveAlertsIndexName(getWorkflowAlertsRequest) + getAlerts(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener, user) + } catch (t: Exception) { + log.error("Failed to get alerts", t) + if (t is AlertingException) { + actionListener.onFailure(t) + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + } + } + + fun resolveAlertsIndexName(getAlertsRequest: GetWorkflowAlertsRequest): String { + var alertIndex = AlertIndices.ALL_ALERT_INDEX_PATTERN + if (getAlertsRequest.alertIndex.isNullOrEmpty() == false) { + alertIndex = getAlertsRequest.alertIndex!! + } + return if (alertIndex == AlertIndices.ALERT_INDEX) + AlertIndices.ALL_ALERT_INDEX_PATTERN + else + alertIndex + } + + fun resolveAssociatedAlertsIndexName(getAlertsRequest: GetWorkflowAlertsRequest): String { + return if (getAlertsRequest.alertIndex.isNullOrEmpty()) AlertIndices.ALL_ALERT_INDEX_PATTERN + else getAlertsRequest.associatedAlertsIndex!! + } + + suspend fun getAlerts( + getWorkflowAlertsRequest: GetWorkflowAlertsRequest, + alertIndex: String, + searchSourceBuilder: SearchSourceBuilder, + actionListener: ActionListener, + user: User?, + ) { + // user is null when: 1/ security is disabled. 2/when user is super-admin. + if (user == null) { + // user is null when: 1/ security is disabled. 2/when user is super-admin. + search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) + } else if (!doFilterForUser(user)) { + // security is enabled and filterby is disabled. + search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) + } else { + // security is enabled and filterby is enabled. + try { + log.info("Filtering result by: ${user.backendRoles}") + addFilter(user, searchSourceBuilder, "monitor_user.backend_roles.keyword") + search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) + } catch (ex: IOException) { + actionListener.onFailure(AlertingException.wrap(ex)) + } + } + } + + suspend fun search( + getWorkflowAlertsRequest: GetWorkflowAlertsRequest, + alertIndex: String, + searchSourceBuilder: SearchSourceBuilder, + actionListener: ActionListener, + ) { + try { + val searchRequest = SearchRequest() + .indices(alertIndex) + .source(searchSourceBuilder) + val alerts = mutableListOf() + val associatedAlerts = mutableListOf() + + val response: SearchResponse = client.suspendUntil { search(searchRequest, it) } + val totalAlertCount = response.hits.totalHits?.value?.toInt() + alerts.addAll( + parseAlertsFromSearchResponse(response) + ) + if (alerts.isNotEmpty() && getWorkflowAlertsRequest.getAssociatedAlerts == true) + getAssociatedAlerts( + associatedAlerts, + alerts, + resolveAssociatedAlertsIndexName(getWorkflowAlertsRequest), + getWorkflowAlertsRequest + ) + actionListener.onResponse(GetWorkflowAlertsResponse(alerts, associatedAlerts, totalAlertCount)) + } catch (e: Exception) { + actionListener.onFailure(AlertingException("Failed to get alerts", RestStatus.INTERNAL_SERVER_ERROR, e)) + } + } + + private suspend fun getAssociatedAlerts( + associatedAlerts: MutableList, + alerts: MutableList, + alertIndex: String, + getWorkflowAlertsRequest: GetWorkflowAlertsRequest, + ) { + try { + val associatedAlertIds = mutableSetOf() + alerts.forEach { associatedAlertIds.addAll(it.associatedAlertIds) } + if (associatedAlertIds.isEmpty()) return + val queryBuilder = QueryBuilders.boolQuery() + val searchRequest = SearchRequest(alertIndex) + // if chained alert id param is non-null, paginate the associated alerts. + if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty() == false) { + val tableProp = getWorkflowAlertsRequest.table + val sortBuilder = SortBuilders.fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + searchRequest.source().sort(sortBuilder).size(tableProp.size).from(tableProp.startIndex) + } + queryBuilder.must(QueryBuilders.termsQuery("_id", associatedAlertIds)) + queryBuilder.must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.AUDIT.name)) + searchRequest.source().query(queryBuilder) + val response: SearchResponse = client.suspendUntil { search(searchRequest, it) } + associatedAlerts.addAll(parseAlertsFromSearchResponse(response)) + } catch (e: Exception) { + log.error("Failed to get associated alerts in get workflow alerts action", e) + } + } + + private fun parseAlertsFromSearchResponse(response: SearchResponse) = response.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt index 407d95a5b..49743b3f0 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt @@ -9,12 +9,16 @@ import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.launch import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchException import org.opensearch.OpenSearchSecurityException import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener +import org.opensearch.ResourceAlreadyExistsException +import org.opensearch.action.ActionRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthAction +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.action.admin.indices.get.GetIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexResponse import org.opensearch.action.get.GetRequest import org.opensearch.action.get.GetResponse import org.opensearch.action.index.IndexRequest @@ -25,20 +29,11 @@ import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction import org.opensearch.action.support.WriteRequest.RefreshPolicy import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.DocumentLevelMonitorRunner -import org.opensearch.alerting.action.IndexMonitorAction -import org.opensearch.alerting.action.IndexMonitorRequest -import org.opensearch.alerting.action.IndexMonitorResponse +import org.opensearch.alerting.MonitorMetadataService import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelMonitorInput.Companion.DOC_LEVEL_INPUT_FIELD -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.model.AlertingConfigAccessor.Companion.getMonitorMetadata -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorMetadata import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.service.DeleteMonitorService import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERTING_MAX_MONITORS import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT @@ -49,25 +44,39 @@ import org.opensearch.alerting.util.AlertingException import org.opensearch.alerting.util.DocLevelMonitorQueries import org.opensearch.alerting.util.IndexUtils import org.opensearch.alerting.util.addUserBackendRolesFilter +import org.opensearch.alerting.util.getRoleFilterEnabled import org.opensearch.alerting.util.isADMonitor +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent import org.opensearch.common.xcontent.XContentFactory.jsonBuilder import org.opensearch.common.xcontent.XContentHelper import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexMonitorRequest +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelMonitorInput.Companion.DOC_LEVEL_INPUT_FIELD +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.commons.alerting.model.SearchInput import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent import org.opensearch.index.query.QueryBuilders import org.opensearch.index.reindex.BulkByScrollResponse import org.opensearch.index.reindex.DeleteByQueryAction import org.opensearch.index.reindex.DeleteByQueryRequestBuilder import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.tasks.Task import org.opensearch.transport.TransportService @@ -85,9 +94,10 @@ class TransportIndexMonitorAction @Inject constructor( val docLevelMonitorQueries: DocLevelMonitorQueries, val clusterService: ClusterService, val settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - IndexMonitorAction.NAME, transportService, actionFilters, ::IndexMonitorRequest + val xContentRegistry: NamedXContentRegistry, + val namedWriteableRegistry: NamedWriteableRegistry, +) : HandledTransportAction( + AlertingActions.INDEX_MONITOR_ACTION_NAME, transportService, actionFilters, ::IndexMonitorRequest ), SecureTransportAction { @@ -107,18 +117,57 @@ class TransportIndexMonitorAction @Inject constructor( listenFilterBySettingChange(clusterService) } - override fun doExecute(task: Task, request: IndexMonitorRequest, actionListener: ActionListener) { + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? IndexMonitorRequest + ?: recreateObject(request, namedWriteableRegistry) { + IndexMonitorRequest(it) + } + val user = readUserFromThreadContext(client) if (!validateUserBackendRoles(user, actionListener)) { return } - if (!isADMonitor(request.monitor)) { - checkIndicesAndExecute(client, actionListener, request, user) + if ( + user != null && + !isAdmin(user) && + transformedRequest.rbacRoles != null + ) { + if (transformedRequest.rbacRoles?.stream()?.anyMatch { !user.backendRoles.contains(it) } == true) { + log.debug( + "User specified backend roles, ${transformedRequest.rbacRoles}, " + + "that they don' have access to. User backend roles: ${user.backendRoles}" + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User specified backend roles that they don't have access to. Contact administrator", RestStatus.FORBIDDEN + ) + ) + ) + return + } else if (transformedRequest.rbacRoles?.isEmpty() == true) { + log.debug( + "Non-admin user are not allowed to specify an empty set of backend roles. " + + "Please don't pass in the parameter or pass in at least one backend role." + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Non-admin user are not allowed to specify an empty set of backend roles.", RestStatus.FORBIDDEN + ) + ) + ) + return + } + } + + if (!isADMonitor(transformedRequest.monitor)) { + checkIndicesAndExecute(client, actionListener, transformedRequest, user) } else { // check if user has access to any anomaly detector for AD monitor - checkAnomalyDetectorAndExecute(client, actionListener, request, user) + checkAnomalyDetectorAndExecute(client, actionListener, transformedRequest, user) } } @@ -130,7 +179,7 @@ class TransportIndexMonitorAction @Inject constructor( client: Client, actionListener: ActionListener, request: IndexMonitorRequest, - user: User? + user: User?, ) { val indices = mutableListOf() // todo: for doc level alerting: check if index is present before monitor is created. @@ -183,7 +232,7 @@ class TransportIndexMonitorAction @Inject constructor( client: Client, actionListener: ActionListener, request: IndexMonitorRequest, - user: User? + user: User?, ) { client.threadPool().threadContext.stashContext().use { IndexMonitorHandler(client, actionListener, request, user).resolveUserAndStartForAD() @@ -194,7 +243,7 @@ class TransportIndexMonitorAction @Inject constructor( private val client: Client, private val actionListener: ActionListener, private val request: IndexMonitorRequest, - private val user: User? + private val user: User?, ) { fun resolveUserAndStart() { @@ -221,7 +270,9 @@ class TransportIndexMonitorAction @Inject constructor( request.monitor = request.monitor .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) val searchSourceBuilder = SearchSourceBuilder().size(0) - addUserBackendRolesFilter(user, searchSourceBuilder) + if (getRoleFilterEnabled(clusterService, settings, "plugins.anomaly_detection.filter_by_backend_roles")) { + addUserBackendRolesFilter(user, searchSourceBuilder) + } val searchRequest = SearchRequest().indices(".opendistro-anomaly-detectors").source(searchSourceBuilder) client.search( searchRequest, @@ -254,10 +305,30 @@ class TransportIndexMonitorAction @Inject constructor( if (!scheduledJobIndices.scheduledJobIndexExists()) { scheduledJobIndices.initScheduledJobIndex(object : ActionListener { override fun onResponse(response: CreateIndexResponse) { - onCreateMappingsResponse(response) + onCreateMappingsResponse(response.isAcknowledged) } override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) + // https://github.com/opensearch-project/alerting/issues/646 + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + scope.launch { + // Wait for the yellow status + val request = ClusterHealthRequest() + .indices(SCHEDULED_JOBS_INDEX) + .waitForYellowStatus() + val response: ClusterHealthResponse = client.suspendUntil { + execute(ClusterHealthAction.INSTANCE, request, it) + } + if (response.isTimedOut) { + actionListener.onFailure( + OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") + ) + } + // Retry mapping of monitor + onCreateMappingsResponse(true) + } + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } } }) } else if (!IndexUtils.scheduledJobIndexUpdated) { @@ -303,6 +374,7 @@ class TransportIndexMonitorAction @Inject constructor( val query = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("${Monitor.MONITOR_TYPE}.type", Monitor.MONITOR_TYPE)) val searchSource = SearchSourceBuilder().query(query).timeout(requestTimeout) val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX).source(searchSource) + client.search( searchRequest, object : ActionListener { @@ -323,12 +395,12 @@ class TransportIndexMonitorAction @Inject constructor( trigger.actions.forEach { action -> if (action.throttle != null) { require( - TimeValue(Duration.of(action.throttle.value.toLong(), action.throttle.unit).toMillis()) + TimeValue(Duration.of(action.throttle!!.value.toLong(), action.throttle!!.unit).toMillis()) .compareTo(maxValue) <= 0, { "Can only set throttle period less than or equal to $maxValue" } ) require( - TimeValue(Duration.of(action.throttle.value.toLong(), action.throttle.unit).toMillis()) + TimeValue(Duration.of(action.throttle!!.value.toLong(), action.throttle!!.unit).toMillis()) .compareTo(minValue) >= 0, { "Can only set throttle period greater than or equal to $minValue" } ) @@ -343,7 +415,7 @@ class TransportIndexMonitorAction @Inject constructor( private fun onSearchResponse(response: SearchResponse) { val totalHits = response.hits.totalHits?.value if (totalHits != null && totalHits >= maxMonitors) { - log.error("This request would create more than the allowed monitors [$maxMonitors].") + log.info("This request would create more than the allowed monitors [$maxMonitors].") actionListener.onFailure( AlertingException.wrap( IllegalArgumentException( @@ -358,13 +430,13 @@ class TransportIndexMonitorAction @Inject constructor( } } - private fun onCreateMappingsResponse(response: CreateIndexResponse) { - if (response.isAcknowledged) { + private fun onCreateMappingsResponse(isAcknowledged: Boolean) { + if (isAcknowledged) { log.info("Created $SCHEDULED_JOBS_INDEX with mappings.") prepareMonitorIndexing() IndexUtils.scheduledJobIndexUpdated() } else { - log.error("Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") + log.info("Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") actionListener.onFailure( AlertingException.wrap( OpenSearchStatusException( @@ -381,7 +453,7 @@ class TransportIndexMonitorAction @Inject constructor( IndexUtils.scheduledJobIndexUpdated() prepareMonitorIndexing() } else { - log.error("Update ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.") + log.info("Update ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.") actionListener.onFailure( AlertingException.wrap( OpenSearchStatusException( @@ -394,7 +466,18 @@ class TransportIndexMonitorAction @Inject constructor( } private suspend fun indexMonitor() { - var metadata = createMetadata() + if (user != null) { + // Use the backend roles which is an intersection of the requested backend roles and the user's backend roles. + // Admins can pass in any backend role. Also if no backend role is passed in, all the user's backend roles are used. + val rbacRoles = if (request.rbacRoles == null) user.backendRoles.toSet() + else if (!isAdmin(user)) request.rbacRoles?.intersect(user.backendRoles)?.toSet() + else request.rbacRoles + + request.monitor = request.monitor.copy( + user = User(user.name, rbacRoles.orEmpty().toList(), user.roles, user.customAttNames) + ) + log.debug("Created monitor's backend roles: $rbacRoles") + } val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) .setRefreshPolicy(request.refreshPolicy) @@ -403,36 +486,52 @@ class TransportIndexMonitorAction @Inject constructor( .setIfPrimaryTerm(request.primaryTerm) .timeout(indexTimeout) + log.info( + "Creating new monitor: ${request.monitor.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + )}" + ) + try { val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } val failureReasons = checkShardsFailure(indexResponse) if (failureReasons != null) { + log.info(failureReasons.toString()) actionListener.onFailure( AlertingException.wrap(OpenSearchStatusException(failureReasons.toString(), indexResponse.status())) ) return } - metadata = metadata.copy(monitorId = indexResponse.id, id = "${indexResponse.id}-metadata") - - // In case the metadata fails to be created, the monitor runner should have logic to recreate and index the metadata. - // This is currently being handled in DocumentLevelMonitor as its the only current monitor to use metadata currently. - // This should be enhanced by having a utility class to handle the logic of management and creation of the metadata. - // Issue to track this: https://github.com/opensearch-project/alerting/issues/445 - val metadataIndexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(request.refreshPolicy) - .source(metadata.toXContent(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .id(metadata.id) - .timeout(indexTimeout) - client.suspendUntil { client.index(metadataIndexRequest, it) } - - if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - indexDocLevelMonitorQueries(request.monitor, indexResponse.id, request.refreshPolicy) + var metadata: MonitorMetadata? + try { // delete monitor if metadata creation fails, log the right error and re-throw the error to fail listener + request.monitor = request.monitor.copy(id = indexResponse.id) + var (monitorMetadata: MonitorMetadata, created: Boolean) = MonitorMetadataService.getOrCreateMetadata(request.monitor) + if (created == false) { + log.warn("Metadata doc id:${monitorMetadata.id} exists, but it shouldn't!") + } + metadata = monitorMetadata + } catch (t: Exception) { + log.error("failed to create metadata for monitor ${indexResponse.id}. deleting monitor") + cleanupMonitorAfterPartialFailure(request.monitor, indexResponse) + throw t + } + try { + if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + indexDocLevelMonitorQueries(request.monitor, indexResponse.id, metadata, request.refreshPolicy) + } + // When inserting queries in queryIndex we could update sourceToQueryIndexMapping + MonitorMetadataService.upsertMetadata(metadata, updating = true) + } catch (t: Exception) { + log.error("failed to index doc level queries monitor ${indexResponse.id}. deleting monitor", t) + cleanupMonitorAfterPartialFailure(request.monitor, indexResponse) + throw t } actionListener.onResponse( IndexMonitorResponse( indexResponse.id, indexResponse.version, indexResponse.seqNo, - indexResponse.primaryTerm, RestStatus.CREATED, request.monitor + indexResponse.primaryTerm, request.monitor ) ) } catch (t: Exception) { @@ -440,19 +539,42 @@ class TransportIndexMonitorAction @Inject constructor( } } + private suspend fun cleanupMonitorAfterPartialFailure(monitor: Monitor, indexMonitorResponse: IndexResponse) { + // we simply log the success (debug log) or failure (error log) when we try clean up partially failed monitor creation request + try { + DeleteMonitorService.deleteMonitor( + monitor = monitor, + RefreshPolicy.IMMEDIATE + ) + log.debug( + "Cleaned up monitor related resources after monitor creation request partial failure. " + + "Monitor id : ${indexMonitorResponse.id}" + ) + } catch (e: Exception) { + log.error("Failed to clean up monitor after monitor creation request partial failure", e) + } + } + @Suppress("UNCHECKED_CAST") - private suspend fun indexDocLevelMonitorQueries(monitor: Monitor, monitorId: String, refreshPolicy: RefreshPolicy) { - if (!docLevelMonitorQueries.docLevelQueryIndexExists()) { - docLevelMonitorQueries.initDocLevelQueryIndex() - log.info("Central Percolation index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX} created") + private suspend fun indexDocLevelMonitorQueries( + monitor: Monitor, + monitorId: String, + monitorMetadata: MonitorMetadata, + refreshPolicy: RefreshPolicy + ) { + val queryIndex = monitor.dataSources.queryIndex + if (!docLevelMonitorQueries.docLevelQueryIndexExists(monitor.dataSources)) { + docLevelMonitorQueries.initDocLevelQueryIndex(monitor.dataSources) + log.info("Central Percolation index $queryIndex created") } docLevelMonitorQueries.indexDocLevelQueries( monitor, monitorId, + monitorMetadata, refreshPolicy, indexTimeout ) - log.debug("Queries inserted into Percolate index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + log.debug("Queries inserted into Percolate index $queryIndex") } private suspend fun updateMonitor() { @@ -488,6 +610,42 @@ class TransportIndexMonitorAction @Inject constructor( if (request.monitor.enabled && currentMonitor.enabled) request.monitor = request.monitor.copy(enabledTime = currentMonitor.enabledTime) + /** + * On update monitor check which backend roles to associate to the monitor. + * Below are 2 examples of how the logic works + * + * Example 1, say we have a Monitor with backend roles [a, b, c, d] associated with it. + * If I'm User A (non-admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c] and the roles to add are [a, b]. + * The Monitor's backend roles would then be [a, b, d]. + * + * Example 2, say we have a Monitor with backend roles [a, b, c, d] associated with it. + * If I'm User A (admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c, d] and the roles to add are [a, b]. + * The Monitor's backend roles would then be [a, b]. + */ + if (user != null) { + if (request.rbacRoles != null) { + if (isAdmin(user)) { + request.monitor = request.monitor.copy( + user = User(user.name, request.rbacRoles, user.roles, user.customAttNames) + ) + } else { + // rolesToRemove: these are the backend roles to remove from the monitor + val rolesToRemove = user.backendRoles - request.rbacRoles.orEmpty() + // remove the monitor's roles with rolesToRemove and add any roles passed into the request.rbacRoles + val updatedRbac = currentMonitor.user?.backendRoles.orEmpty() - rolesToRemove + request.rbacRoles.orEmpty() + request.monitor = request.monitor.copy( + user = User(user.name, updatedRbac, user.roles, user.customAttNames) + ) + } + } else { + request.monitor = request.monitor + .copy(user = User(user.name, currentMonitor.user!!.backendRoles, user.roles, user.customAttNames)) + } + log.debug("Update monitor backend roles to: ${request.monitor.user?.backendRoles}") + } + request.monitor = request.monitor.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) .setRefreshPolicy(request.refreshPolicy) @@ -497,6 +655,13 @@ class TransportIndexMonitorAction @Inject constructor( .setIfPrimaryTerm(request.primaryTerm) .timeout(indexTimeout) + log.info( + "Updating monitor, ${currentMonitor.id}, from: ${currentMonitor.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + )} \n to: ${request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))}" + ) + try { val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } val failureReasons = checkShardsFailure(indexResponse) @@ -506,45 +671,25 @@ class TransportIndexMonitorAction @Inject constructor( ) return } - - val metadata = getMonitorMetadata(client, xContentRegistry, "${request.monitor.id}-metadata") - - if (metadata == null) { - val newMetadata = createMetadata() - val indexMetadataRequest = IndexRequest(SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(request.refreshPolicy) - .source(newMetadata.toXContent(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .id(newMetadata.id) - .timeout(indexTimeout) - client.suspendUntil { client.index(indexMetadataRequest, it) } - } else if (currentMonitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - val monitorIndex = (request.monitor.inputs[0] as DocLevelMonitorInput).indices[0] - val runContext = createFullRunContext( - monitorIndex, - metadata.lastRunContext as MutableMap> - ) - val updatedMetadata = metadata.copy(lastRunContext = runContext) - val indexMetadataRequest = IndexRequest(SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(request.refreshPolicy) - .source(updatedMetadata.toXContent(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .id(metadata.id) - .timeout(indexTimeout) - client.suspendUntil { client.index(indexMetadataRequest, it) } - } - - if (currentMonitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + var updatedMetadata: MonitorMetadata + val (metadata, created) = MonitorMetadataService.getOrCreateMetadata(request.monitor) + // Recreate runContext if metadata exists + // Delete and insert all queries from/to queryIndex + if (created == false && currentMonitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + updatedMetadata = MonitorMetadataService.recreateRunContext(metadata, currentMonitor) client.suspendUntil { DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) - .source(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + .source(currentMonitor.dataSources.queryIndex) .filter(QueryBuilders.matchQuery("monitor_id", currentMonitor.id)) .execute(it) } - indexDocLevelMonitorQueries(request.monitor, currentMonitor.id, request.refreshPolicy) + indexDocLevelMonitorQueries(request.monitor, currentMonitor.id, updatedMetadata, request.refreshPolicy) + MonitorMetadataService.upsertMetadata(updatedMetadata, updating = true) } actionListener.onResponse( IndexMonitorResponse( indexResponse.id, indexResponse.version, indexResponse.seqNo, - indexResponse.primaryTerm, RestStatus.CREATED, request.monitor + indexResponse.primaryTerm, request.monitor ) ) } catch (t: Exception) { @@ -552,34 +697,6 @@ class TransportIndexMonitorAction @Inject constructor( } } - private suspend fun createFullRunContext( - index: String?, - existingRunContext: MutableMap>? = null - ): MutableMap> { - if (index == null) return mutableMapOf() - val getIndexRequest = GetIndexRequest().indices(index) - val getIndexResponse: GetIndexResponse = client.suspendUntil { - client.admin().indices().getIndex(getIndexRequest, it) - } - val indices = getIndexResponse.indices() - val lastRunContext = existingRunContext?.toMutableMap() ?: mutableMapOf>() - indices.forEach { indexName -> - if (!lastRunContext.containsKey(indexName)) - lastRunContext[indexName] = DocumentLevelMonitorRunner.createRunContext(clusterService, client, indexName) - } - return lastRunContext - } - - private suspend fun createMetadata(): MonitorMetadata { - val monitorIndex = if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) - (request.monitor.inputs[0] as DocLevelMonitorInput).indices[0] - else null - val runContext = if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) createFullRunContext(monitorIndex) - else emptyMap() - return MonitorMetadata("${request.monitorId}-metadata", request.monitorId, emptyList(), runContext) - } - private fun checkShardsFailure(response: IndexResponse): String? { val failureReasons = StringBuilder() if (response.shardInfo.failed > 0) { diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt new file mode 100644 index 000000000..f2eace7e9 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt @@ -0,0 +1,797 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchException +import org.opensearch.OpenSearchStatusException +import org.opensearch.ResourceAlreadyExistsException +import org.opensearch.action.ActionRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthAction +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.MonitorMetadataService +import org.opensearch.alerting.MonitorRunnerService.monitorCtx +import org.opensearch.alerting.WorkflowMetadataService +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.opensearchapi.withClosableContext +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERTING_MAX_MONITORS +import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTION_THROTTLE_VALUE +import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.alerting.util.isADMonitor +import org.opensearch.alerting.util.isQueryLevelMonitor +import org.opensearch.alerting.util.use +import org.opensearch.alerting.workflow.CompositeWorkflowRunner +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory.jsonBuilder +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexWorkflowRequest +import org.opensearch.commons.alerting.action.IndexWorkflowResponse +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.index.IndexNotFoundException +import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestRequest +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.util.UUID +import java.util.stream.Collectors + +private val log = LogManager.getLogger(TransportIndexWorkflowAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportIndexWorkflowAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val scheduledJobIndices: ScheduledJobIndices, + val clusterService: ClusterService, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, + val namedWriteableRegistry: NamedWriteableRegistry, +) : HandledTransportAction( + AlertingActions.INDEX_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::IndexWorkflowRequest +), + SecureTransportAction { + + @Volatile + private var maxMonitors = ALERTING_MAX_MONITORS.get(settings) + + @Volatile + private var requestTimeout = REQUEST_TIMEOUT.get(settings) + + @Volatile + private var indexTimeout = INDEX_TIMEOUT.get(settings) + + @Volatile + private var maxActionThrottle = MAX_ACTION_THROTTLE_VALUE.get(settings) + + @Volatile + private var allowList = ALLOW_LIST.get(settings) + + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERTING_MAX_MONITORS) { maxMonitors = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(INDEX_TIMEOUT) { indexTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(MAX_ACTION_THROTTLE_VALUE) { maxActionThrottle = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? IndexWorkflowRequest + ?: recreateObject(request, namedWriteableRegistry) { + IndexWorkflowRequest(it) + } + + val user = readUserFromThreadContext(client) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + if ( + user != null && + !isAdmin(user) && + transformedRequest.rbacRoles != null + ) { + if (transformedRequest.rbacRoles?.stream()?.anyMatch { !user.backendRoles.contains(it) } == true) { + log.error( + "User specified backend roles, ${transformedRequest.rbacRoles}, " + + "that they don' have access to. User backend roles: ${user.backendRoles}" + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User specified backend roles that they don't have access to. Contact administrator", + RestStatus.FORBIDDEN + ) + ) + ) + return + } else if (transformedRequest.rbacRoles?.isEmpty() == true) { + log.error( + "Non-admin user are not allowed to specify an empty set of backend roles. " + + "Please don't pass in the parameter or pass in at least one backend role." + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Non-admin user are not allowed to specify an empty set of backend roles.", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + } + + scope.launch { + try { + validateMonitorAccess( + transformedRequest, + user, + client, + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + // Stash the context and start the workflow creation + client.threadPool().threadContext.stashContext().use { + IndexWorkflowHandler(client, actionListener, transformedRequest, user).resolveUserAndStart() + } + } + + override fun onFailure(e: Exception) { + log.error("Error indexing workflow", e) + actionListener.onFailure(e) + } + } + ) + } catch (e: Exception) { + log.error("Failed to create workflow", e) + if (e is IndexNotFoundException) { + actionListener.onFailure( + OpenSearchStatusException( + "Monitors not found", + RestStatus.NOT_FOUND + ) + ) + } else { + actionListener.onFailure(e) + } + } + } + } + + inner class IndexWorkflowHandler( + private val client: Client, + private val actionListener: ActionListener, + private val request: IndexWorkflowRequest, + private val user: User?, + ) { + fun resolveUserAndStart() { + scope.launch { + if (user == null) { + // Security is disabled, add empty user to Workflow. user is null for older versions. + request.workflow = request.workflow + .copy(user = User("", listOf(), listOf(), listOf())) + start() + } else { + request.workflow = request.workflow + .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) + start() + } + } + } + + fun start() { + if (!scheduledJobIndices.scheduledJobIndexExists()) { + scheduledJobIndices.initScheduledJobIndex(object : ActionListener { + override fun onResponse(response: CreateIndexResponse) { + onCreateMappingsResponse(response.isAcknowledged) + } + + override fun onFailure(t: Exception) { + // https://github.com/opensearch-project/alerting/issues/646 + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + scope.launch { + // Wait for the yellow status + val request = ClusterHealthRequest() + .indices(SCHEDULED_JOBS_INDEX) + .waitForYellowStatus() + val response: ClusterHealthResponse = client.suspendUntil { + execute(ClusterHealthAction.INSTANCE, request, it) + } + if (response.isTimedOut) { + log.error("Workflow creation timeout", t) + actionListener.onFailure( + OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") + ) + } + // Retry mapping of workflow + onCreateMappingsResponse(true) + } + } else { + log.error("Failed to create workflow", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + }) + } else if (!IndexUtils.scheduledJobIndexUpdated) { + IndexUtils.updateIndexMapping( + SCHEDULED_JOBS_INDEX, + ScheduledJobIndices.scheduledJobMappings(), clusterService.state(), client.admin().indices(), + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + onUpdateMappingsResponse(response) + } + + override fun onFailure(t: Exception) { + log.error("Failed to create workflow", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } else { + prepareWorkflowIndexing() + } + } + + /** + * This function prepares for indexing a new workflow. + * If this is an update request we can simply update the workflow. Otherwise we first check to see how many monitors already exist, + * and compare this to the [maxMonitorCount]. Requests that breach this threshold will be rejected. + */ + private fun prepareWorkflowIndexing() { + if (request.method == RestRequest.Method.PUT) { + scope.launch { + updateWorkflow() + } + } else { + scope.launch { + indexWorkflow() + } + } + } + + private fun onCreateMappingsResponse(isAcknowledged: Boolean) { + if (isAcknowledged) { + log.info("Created $SCHEDULED_JOBS_INDEX with mappings.") + prepareWorkflowIndexing() + IndexUtils.scheduledJobIndexUpdated() + } else { + log.error("Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged", + RestStatus.INTERNAL_SERVER_ERROR + ) + ) + ) + } + } + + private fun onUpdateMappingsResponse(response: AcknowledgedResponse) { + if (response.isAcknowledged) { + log.info("Updated $SCHEDULED_JOBS_INDEX with mappings.") + IndexUtils.scheduledJobIndexUpdated() + prepareWorkflowIndexing() + } else { + log.error("Update $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Updated $SCHEDULED_JOBS_INDEX mappings call not acknowledged.", + RestStatus.INTERNAL_SERVER_ERROR + ) + ) + ) + } + } + + private suspend fun indexWorkflow() { + if (user != null) { + val rbacRoles = if (request.rbacRoles == null) user.backendRoles.toSet() + else if (!isAdmin(user)) request.rbacRoles?.intersect(user.backendRoles)?.toSet() + else request.rbacRoles + + request.workflow = request.workflow.copy( + user = User(user.name, rbacRoles.orEmpty().toList(), user.roles, user.customAttNames) + ) + log.debug("Created workflow's backend roles: $rbacRoles") + } + + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(request.refreshPolicy) + .source( + request.workflow.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + ) + ) + .setIfSeqNo(request.seqNo) + .setIfPrimaryTerm(request.primaryTerm) + .timeout(indexTimeout) + + try { + val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } + val failureReasons = checkShardsFailure(indexResponse) + if (failureReasons != null) { + log.error("Failed to create workflow: $failureReasons") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + failureReasons.toString(), + indexResponse.status() + ) + ) + ) + return + } + + val createdWorkflow = request.workflow.copy(id = indexResponse.id) + val executionId = CompositeWorkflowRunner.generateExecutionId(false, createdWorkflow) + + val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( + workflow = createdWorkflow, + skipIndex = false, + executionId = executionId + ) + + val delegates = (createdWorkflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } + val monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) + + for (monitor in monitors) { + var (monitorMetadata, created) = MonitorMetadataService.getOrCreateMetadata( + monitor = monitor, + createWithRunContext = true, + workflowMetadataId = workflowMetadata.id + ) + + if (created == false) { + log.warn("Metadata doc id:${monitorMetadata.id} exists, but it shouldn't!") + } + + if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + val oldMonitorMetadata = MonitorMetadataService.getMetadata(monitor) + monitorMetadata = monitorMetadata.copy(sourceToQueryIndexMapping = oldMonitorMetadata!!.sourceToQueryIndexMapping) + } + // When inserting queries in queryIndex we could update sourceToQueryIndexMapping + MonitorMetadataService.upsertMetadata(monitorMetadata, updating = true) + } + actionListener.onResponse( + IndexWorkflowResponse( + indexResponse.id, indexResponse.version, indexResponse.seqNo, + indexResponse.primaryTerm, request.workflow.copy(id = indexResponse.id) + ) + ) + } catch (t: Exception) { + log.error("Failed to index workflow", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun updateWorkflow() { + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, request.workflowId) + try { + val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } + if (!getResponse.isExists) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Workflow with ${request.workflowId} is not found", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + val workflow = ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Workflow + onGetResponse(workflow) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun onGetResponse(currentWorkflow: Workflow) { + if (!checkUserPermissionsWithResource( + user, + currentWorkflow.user, + actionListener, + "workfklow", + request.workflowId + ) + ) { + return + } + + // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be + // incorrect. + if (request.workflow.enabled && currentWorkflow.enabled) + request.workflow = request.workflow.copy(enabledTime = currentWorkflow.enabledTime) + + /** + * On update workflow check which backend roles to associate to the workflow. + * Below are 2 examples of how the logic works + * + * Example 1, say we have a Workflow with backend roles [a, b, c, d] associated with it. + * If I'm User A (non-admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Workflow's backend roles to [a, b]. This would mean that the roles to remove are [c] and the roles to add are [a, b]. + * The Workflow's backend roles would then be [a, b, d]. + * + * Example 2, say we have a Workflow with backend roles [a, b, c, d] associated with it. + * If I'm User A (admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Workflow's backend roles to [a, b]. This would mean that the roles to remove are [c, d] and the roles to add are [a, b]. + * The Workflow's backend roles would then be [a, b]. + */ + if (user != null) { + if (request.rbacRoles != null) { + if (isAdmin(user)) { + request.workflow = request.workflow.copy( + user = User(user.name, request.rbacRoles, user.roles, user.customAttNames) + ) + } else { + // rolesToRemove: these are the backend roles to remove from the monitor + val rolesToRemove = user.backendRoles - request.rbacRoles.orEmpty() + // remove the monitor's roles with rolesToRemove and add any roles passed into the request.rbacRoles + val updatedRbac = + currentWorkflow.user?.backendRoles.orEmpty() - rolesToRemove + request.rbacRoles.orEmpty() + request.workflow = request.workflow.copy( + user = User(user.name, updatedRbac, user.roles, user.customAttNames) + ) + } + } else { + request.workflow = request.workflow + .copy( + user = User( + user.name, + currentWorkflow.user!!.backendRoles, + user.roles, + user.customAttNames + ) + ) + } + log.debug("Update workflow backend roles to: ${request.workflow.user?.backendRoles}") + } + + request.workflow = request.workflow.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(request.refreshPolicy) + .source( + request.workflow.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + ) + ) + .id(request.workflowId) + .setIfSeqNo(request.seqNo) + .setIfPrimaryTerm(request.primaryTerm) + .timeout(indexTimeout) + + try { + val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } + val failureReasons = checkShardsFailure(indexResponse) + if (failureReasons != null) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + failureReasons.toString(), + indexResponse.status() + ) + ) + ) + return + } + + val updatedWorkflow = request.workflow.copy(id = indexResponse.id) + val executionId = CompositeWorkflowRunner.generateExecutionId(false, updatedWorkflow) + + val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( + workflow = updatedWorkflow, + skipIndex = false, + executionId = executionId + ) + + val delegates = (updatedWorkflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } + val monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) + + for (monitor in monitors) { + val (monitorMetadata, created) = MonitorMetadataService.getOrCreateMetadata( + monitor = monitor, + createWithRunContext = true, + workflowMetadataId = workflowMetadata.id + ) + + if (created == false && monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + var updatedMetadata = MonitorMetadataService.recreateRunContext(monitorMetadata, monitor) + val oldMonitorMetadata = MonitorMetadataService.getMetadata(monitor) + updatedMetadata = updatedMetadata.copy(sourceToQueryIndexMapping = oldMonitorMetadata!!.sourceToQueryIndexMapping) + MonitorMetadataService.upsertMetadata(updatedMetadata, updating = true) + } + } + actionListener.onResponse( + IndexWorkflowResponse( + indexResponse.id, indexResponse.version, indexResponse.seqNo, + indexResponse.primaryTerm, request.workflow.copy(id = currentWorkflow.id) + ) + ) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private fun checkShardsFailure(response: IndexResponse): String? { + val failureReasons = StringBuilder() + if (response.shardInfo.failed > 0) { + response.shardInfo.failures.forEach { entry -> + failureReasons.append(entry.reason()) + } + return failureReasons.toString() + } + return null + } + } + + private fun validateChainedMonitorFindingsMonitors(delegates: List, monitorDelegates: List) { + infix fun List.equalsIgnoreOrder(other: List) = + this.size == other.size && this.toSet() == other.toSet() + + val monitorsById = monitorDelegates.associateBy { it.id } + delegates.forEach { + + val delegateMonitor = monitorsById[it.monitorId] ?: throw AlertingException.wrap( + IllegalArgumentException("Delegate monitor ${it.monitorId} doesn't exist") + ) + if (it.chainedMonitorFindings != null) { + val chainedMonitorIds: MutableList = mutableListOf() + if (it.chainedMonitorFindings!!.monitorId.isNullOrBlank()) { + chainedMonitorIds.addAll(it.chainedMonitorFindings!!.monitorIds) + } else { + chainedMonitorIds.add(it.chainedMonitorFindings!!.monitorId!!) + } + chainedMonitorIds.forEach { chainedMonitorId -> + val chainedFindingMonitor = + monitorsById[chainedMonitorId] ?: throw AlertingException.wrap( + IllegalArgumentException("Chained finding monitor $chainedMonitorId doesn't exist") + ) + + if (chainedFindingMonitor.isQueryLevelMonitor()) { + throw AlertingException.wrap(IllegalArgumentException("Query level monitor can't be part of chained findings")) + } + + val delegateMonitorIndices = getMonitorIndices(delegateMonitor) + + val chainedMonitorIndices = getMonitorIndices(chainedFindingMonitor) + + if (!delegateMonitorIndices.containsAll(chainedMonitorIndices)) { + throw AlertingException.wrap( + IllegalArgumentException( + "Delegate monitor indices ${delegateMonitorIndices.joinToString()} " + + "doesn't query all of chained findings monitor's indices ${chainedMonitorIndices.joinToString()}}" + ) + ) + } + } + } + } + } + + /** + * Returns list of indices for the given monitor depending on it's type + */ + private fun getMonitorIndices(monitor: Monitor): List { + return when (monitor.monitorType) { + Monitor.MonitorType.DOC_LEVEL_MONITOR -> (monitor.inputs[0] as DocLevelMonitorInput).indices + Monitor.MonitorType.BUCKET_LEVEL_MONITOR -> monitor.inputs.flatMap { s -> (s as SearchInput).indices } + Monitor.MonitorType.QUERY_LEVEL_MONITOR -> { + if (isADMonitor(monitor)) monitor.inputs.flatMap { s -> (s as SearchInput).indices } + else { + val indices = mutableListOf() + for (input in monitor.inputs) { + when (input) { + is SearchInput -> indices.addAll(input.indices) + else -> indices + } + } + indices + } + } + + else -> emptyList() + } + } + + private fun validateDelegateMonitorsExist( + monitorIds: List, + delegateMonitors: List, + ) { + val reqMonitorIds: MutableList = monitorIds as MutableList + delegateMonitors.forEach { + reqMonitorIds.remove(it.id) + } + if (reqMonitorIds.isNotEmpty()) { + throw AlertingException.wrap(IllegalArgumentException(("${reqMonitorIds.joinToString()} are not valid monitor ids"))) + } + } + + /** + * Validates monitor and indices access + * 1. Validates the monitor access (if the filterByEnabled is set to true - adds backend role filter) as admin + * 2. Unstashes the context and checks if the user can access the monitor indices + */ + private suspend fun validateMonitorAccess( + request: IndexWorkflowRequest, + user: User?, + client: Client, + actionListener: ActionListener, + ) { + val compositeInput = request.workflow.inputs[0] as CompositeInput + val monitorIds = compositeInput.sequence.delegates.stream().map { it.monitorId }.collect(Collectors.toList()) + val query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", monitorIds)) + val searchSource = SearchSourceBuilder().query(query) + val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX).source(searchSource) + + if (user != null && !isAdmin(user) && filterByEnabled) { + addFilter(user, searchRequest.source(), "monitor.user.backend_roles.keyword") + } + + val searchMonitorResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + + if (searchMonitorResponse.isTimedOut) { + throw OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") + } + val monitors = mutableListOf() + for (hit in searchMonitorResponse.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, hit.sourceAsString + ).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor + monitors.add(monitor) + } + } + if (monitors.isEmpty()) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User doesn't have read permissions for one or more configured monitors ${monitorIds.joinToString()}", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + // Validate delegates and it's chained findings + try { + validateDelegateMonitorsExist(monitorIds, monitors) + validateChainedMonitorFindingsMonitors(compositeInput.sequence.delegates, monitors) + } catch (e: Exception) { + actionListener.onFailure(e) + return + } + val indices = getMonitorIndices(monitors) + + val indicesSearchRequest = SearchRequest().indices(*indices.toTypedArray()) + .source(SearchSourceBuilder.searchSource().size(1).query(QueryBuilders.matchAllQuery())) + + if (user != null && filterByEnabled) { + // Unstash the context and check if user with specified roles has indices access + withClosableContext( + InjectorContextElement( + user.name.plus(UUID.randomUUID().toString()), + settings, + client.threadPool().threadContext, + user.roles, + user + ) + ) { + checkIndicesAccess(client, indicesSearchRequest, indices, actionListener) + } + } else { + checkIndicesAccess(client, indicesSearchRequest, indices, actionListener) + } + } + + /** + * Checks if the client can access the given indices + */ + private fun checkIndicesAccess( + client: Client, + indicesSearchRequest: SearchRequest?, + indices: MutableList, + actionListener: ActionListener, + ) { + client.search( + indicesSearchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse?) { + actionListener.onResponse(AcknowledgedResponse(true)) + } + + override fun onFailure(e: Exception) { + log.error("Error accessing the monitor indices", e) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User doesn't have read permissions for one or more configured index ${indices.joinToString()}", + RestStatus.FORBIDDEN + ) + ) + ) + } + } + ) + } + + /** + * Extract indices from monitors + */ + private fun getMonitorIndices(monitors: List): MutableList { + val indices = mutableListOf() + + val searchInputs = + monitors.flatMap { monitor -> + monitor.inputs.filter { + it.name() == SearchInput.SEARCH_FIELD || it.name() == DocLevelMonitorInput.DOC_LEVEL_INPUT_FIELD + } + } + searchInputs.forEach { + val inputIndices = if (it.name() == SearchInput.SEARCH_FIELD) (it as SearchInput).indices + else (it as DocLevelMonitorInput).indices + indices.addAll(inputIndices) + } + return indices + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt index fcf4b1a04..d449ee9a2 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt @@ -6,7 +6,6 @@ package org.opensearch.alerting.transport import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters @@ -15,11 +14,13 @@ import org.opensearch.alerting.action.SearchEmailAccountAction import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST import org.opensearch.alerting.util.AlertingException import org.opensearch.alerting.util.DestinationType +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings -import org.opensearch.rest.RestStatus +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.Task import org.opensearch.transport.TransportService diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt index 707094482..0c0a4c4bf 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt @@ -6,7 +6,6 @@ package org.opensearch.alerting.transport import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionListener import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters @@ -15,11 +14,13 @@ import org.opensearch.alerting.action.SearchEmailGroupAction import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST import org.opensearch.alerting.util.AlertingException import org.opensearch.alerting.util.DestinationType +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings -import org.opensearch.rest.RestStatus +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus import org.opensearch.tasks.Task import org.opensearch.transport.TransportService diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt index f97e382ff..f4fffca0a 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt @@ -6,21 +6,32 @@ package org.opensearch.alerting.transport import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionListener +import org.opensearch.action.ActionRequest import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.ActionFilters import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.SearchMonitorAction -import org.opensearch.alerting.action.SearchMonitorRequest import org.opensearch.alerting.opensearchapi.addFilter import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.SearchMonitorRequest +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.ExistsQueryBuilder +import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilders import org.opensearch.tasks.Task import org.opensearch.transport.TransportService @@ -31,9 +42,10 @@ class TransportSearchMonitorAction @Inject constructor( val settings: Settings, val client: Client, clusterService: ClusterService, - actionFilters: ActionFilters -) : HandledTransportAction( - SearchMonitorAction.NAME, transportService, actionFilters, ::SearchMonitorRequest + actionFilters: ActionFilters, + val namedWriteableRegistry: NamedWriteableRegistry +) : HandledTransportAction( + AlertingActions.SEARCH_MONITORS_ACTION_NAME, transportService, actionFilters, ::SearchMonitorRequest ), SecureTransportAction { @Volatile @@ -42,10 +54,34 @@ class TransportSearchMonitorAction @Inject constructor( listenFilterBySettingChange(clusterService) } - override fun doExecute(task: Task, searchMonitorRequest: SearchMonitorRequest, actionListener: ActionListener) { + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? SearchMonitorRequest + ?: recreateObject(request, namedWriteableRegistry) { + SearchMonitorRequest(it) + } + + val searchSourceBuilder = transformedRequest.searchRequest.source() + .seqNoAndPrimaryTerm(true) + .version(true) + val queryBuilder = if (searchSourceBuilder.query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(searchSourceBuilder.query()) + + // The SearchMonitor API supports one 'index' parameter of either the SCHEDULED_JOBS_INDEX or ALL_ALERT_INDEX_PATTERN. + // When querying the ALL_ALERT_INDEX_PATTERN, we don't want to check whether the MONITOR_TYPE field exists + // because we're querying alert indexes. + if (transformedRequest.searchRequest.indices().contains(ScheduledJob.SCHEDULED_JOBS_INDEX)) { + val monitorWorkflowType = QueryBuilders.boolQuery().should(QueryBuilders.existsQuery(Monitor.MONITOR_TYPE)) + .should(QueryBuilders.existsQuery(Workflow.WORKFLOW_TYPE)) + queryBuilder.must(monitorWorkflowType) + } + + searchSourceBuilder.query(queryBuilder) + .seqNoAndPrimaryTerm(true) + .version(true) + addOwnerFieldIfNotExists(transformedRequest.searchRequest) val user = readUserFromThreadContext(client) client.threadPool().threadContext.stashContext().use { - resolve(searchMonitorRequest, actionListener, user) + resolve(transformedRequest, actionListener, user) } } @@ -78,4 +114,16 @@ class TransportSearchMonitorAction @Inject constructor( } ) } + + private fun addOwnerFieldIfNotExists(searchRequest: SearchRequest) { + if (searchRequest.source().query() == null || searchRequest.source().query().toString().contains("monitor.owner") == false) { + var boolQueryBuilder: BoolQueryBuilder = if (searchRequest.source().query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(searchRequest.source().query()) + val bqb = BoolQueryBuilder() + bqb.should().add(BoolQueryBuilder().mustNot(ExistsQueryBuilder("monitor.owner"))) + bqb.should().add(BoolQueryBuilder().must(MatchQueryBuilder("monitor.owner", "alerting"))) + boolQueryBuilder.filter(bqb) + searchRequest.source().query(boolQueryBuilder) + } + } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt index 749214048..45937c8ab 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt @@ -5,11 +5,11 @@ package org.opensearch.alerting.triggercondition.resolvers -import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.triggercondition.tokens.ExpressionToken import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionConstant import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionToken +import org.opensearch.commons.alerting.model.DocLevelQuery import java.util.Optional import java.util.Stack diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt index faeabad08..fea22c356 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt @@ -5,7 +5,7 @@ package org.opensearch.alerting.triggercondition.resolvers -import org.opensearch.alerting.core.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocLevelQuery interface TriggerExpressionResolver { fun evaluate(queryToDocIds: Map>): Set diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/AggregationQueryRewriter.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/AggregationQueryRewriter.kt index 066dfa3c2..e1b6675b2 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/AggregationQueryRewriter.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/AggregationQueryRewriter.kt @@ -6,10 +6,10 @@ package org.opensearch.alerting.util import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.model.BucketLevelTrigger import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.Trigger import org.opensearch.alerting.model.TriggerAfterKey +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Trigger import org.opensearch.search.aggregations.AggregationBuilder import org.opensearch.search.aggregations.AggregatorFactories import org.opensearch.search.aggregations.bucket.SingleBucketAggregation diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingException.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingException.kt index 4df774b6d..4127afaa2 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingException.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingException.kt @@ -9,11 +9,11 @@ import org.apache.logging.log4j.LogManager import org.opensearch.OpenSearchException import org.opensearch.OpenSearchSecurityException import org.opensearch.OpenSearchStatusException -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings +import org.opensearch.core.rest.RestStatus import org.opensearch.index.IndexNotFoundException import org.opensearch.index.engine.VersionConflictEngineException import org.opensearch.indices.InvalidIndexNameException -import org.opensearch.rest.RestStatus private val log = LogManager.getLogger(AlertingException::class.java) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt index fd44d525c..c912768cb 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/AlertingUtils.kt @@ -6,28 +6,23 @@ package org.opensearch.alerting.util import org.apache.logging.log4j.LogManager -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.model.AggregationResultBucket import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.MonitorMetadata -import org.opensearch.alerting.model.action.Action -import org.opensearch.alerting.model.action.ActionExecutionPolicy -import org.opensearch.alerting.model.action.ActionExecutionScope import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.settings.DestinationSettings -import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.util.concurrent.ThreadContext +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.ActionExecutionScope +import org.opensearch.commons.alerting.util.isBucketLevelMonitor private val logger = LogManager.getLogger("AlertingUtils") +val MAX_SEARCH_SIZE = 10000 + /** * RFC 5322 compliant pattern matching: https://www.ietf.org/rfc/rfc5322.txt * Regex was based off of this post: https://stackoverflow.com/a/201378 @@ -46,15 +41,37 @@ fun isValidEmail(email: String): Boolean { return validEmailPattern.matches(email) } +fun getRoleFilterEnabled(clusterService: ClusterService, settings: Settings, settingPath: String): Boolean { + var adBackendRoleFilterEnabled: Boolean + val metaData = clusterService.state().metadata() + + // get default value for setting + if (clusterService.clusterSettings.get(settingPath) != null) { + adBackendRoleFilterEnabled = clusterService.clusterSettings.get(settingPath).getDefault(settings) as Boolean + } else { + // default setting doesn't exist, so returning false as it means AD plugins isn't in cluster anyway + return false + } + + // Transient settings are prioritized so those are checked first. + return if (metaData.transientSettings().get(settingPath) != null) { + metaData.transientSettings().getAsBoolean(settingPath, adBackendRoleFilterEnabled) + } else if (metaData.persistentSettings().get(settingPath) != null) { + metaData.persistentSettings().getAsBoolean(settingPath, adBackendRoleFilterEnabled) + } else { + adBackendRoleFilterEnabled + } +} + /** Allowed Destinations are ones that are specified in the [DestinationSettings.ALLOW_LIST] setting. */ fun Destination.isAllowed(allowList: List): Boolean = allowList.contains(this.type.value) fun Destination.isTestAction(): Boolean = this.type == DestinationType.TEST_ACTION -fun Monitor.isBucketLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.BUCKET_LEVEL_MONITOR - fun Monitor.isDocLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR +fun Monitor.isQueryLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.QUERY_LEVEL_MONITOR + /** * Since buckets can have multi-value keys, this converts the bucket key values to a string that can be used * as the key for a HashMap to easily retrieve [AggregationResultBucket] based on the bucket key values. @@ -124,12 +141,39 @@ fun defaultToPerExecutionAction( return false } -suspend fun updateMonitorMetadata(client: Client, settings: Settings, monitorMetadata: MonitorMetadata): IndexResponse { - val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(monitorMetadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .id(monitorMetadata.id) - .timeout(AlertingSettings.INDEX_TIMEOUT.get(settings)) +/** + * Executes the given [block] function on this resource and then closes it down correctly whether an exception + * is thrown or not. + * + * In case if the resource is being closed due to an exception occurred in [block], and the closing also fails with an exception, + * the latter is added to the [suppressed][java.lang.Throwable.addSuppressed] exceptions of the former. + * + * @param block a function to process this [AutoCloseable] resource. + * @return the result of [block] function invoked on this resource. + */ +inline fun T.use(block: (T) -> R): R { + var exception: Throwable? = null + try { + return block(this) + } catch (e: Throwable) { + exception = e + throw e + } finally { + closeFinally(exception) + } +} - return client.suspendUntil { client.index(indexRequest, it) } +/** + * Closes this [AutoCloseable], suppressing possible exception or error thrown by [AutoCloseable.close] function when + * it's being closed due to some other [cause] exception occurred. + * + * The suppressed exception is added to the list of suppressed exceptions of [cause] exception. + */ +fun ThreadContext.StoredContext.closeFinally(cause: Throwable?) = when (cause) { + null -> close() + else -> try { + close() + } catch (closeException: Throwable) { + cause.addSuppressed(closeException) + } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtils.kt index e1c7903f7..e83f45a15 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtils.kt @@ -6,10 +6,10 @@ package org.opensearch.alerting.util import org.apache.lucene.search.join.ScoreMode -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.model.Monitor -import org.opensearch.common.Strings +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput import org.opensearch.commons.authuser.User +import org.opensearch.core.common.Strings import org.opensearch.index.query.BoolQueryBuilder import org.opensearch.index.query.NestedQueryBuilder import org.opensearch.index.query.QueryBuilders diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/CrossClusterMonitorUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/CrossClusterMonitorUtils.kt new file mode 100644 index 000000000..6ec14ffa2 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/CrossClusterMonitorUtils.kt @@ -0,0 +1,231 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.opensearch.action.search.SearchRequest +import org.opensearch.client.Client +import org.opensearch.client.node.NodeClient +import org.opensearch.cluster.service.ClusterService +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput + +class CrossClusterMonitorUtils { + companion object { + + /** + * Uses the monitor inputs to determine whether the monitor makes calls to remote clusters. + * @param monitor The monitor to evaluate. + * @param localClusterName The name of the local cluster. + * @return TRUE if the monitor makes calls to remote clusters; otherwise returns FALSE. + */ + @JvmStatic + fun isRemoteMonitor(monitor: Monitor, localClusterName: String): Boolean { + var isRemoteMonitor = false + monitor.inputs.forEach inputCheck@{ + when (it) { + is ClusterMetricsInput -> { + it.clusters.forEach { clusterName -> + if (clusterName != localClusterName) { + isRemoteMonitor = true + return@inputCheck + } + } + } + is SearchInput -> { + // Remote indexes follow the pattern ":". + // Index entries without a CLUSTER_NAME indicate they're store on the local cluster. + it.indices.forEach { index -> + val clusterName = parseClusterName(index) + if (clusterName != localClusterName) { + isRemoteMonitor = true + return@inputCheck + } + } + } + is DocLevelMonitorInput -> { + // TODO: When document level monitors are supported, this check will be similar to SearchInput. + throw IllegalArgumentException("Per document monitors do not currently support cross-cluster search.") + } + else -> { + throw IllegalArgumentException("Unsupported input type: ${it.name()}.") + } + } + } + return isRemoteMonitor + } + + /** + * Uses the monitor inputs to determine whether the monitor makes calls to remote clusters. + * @param monitor The monitor to evaluate. + * @param clusterService Used to retrieve the name of the local cluster. + * @return TRUE if the monitor makes calls to remote clusters; otherwise returns FALSE. + */ + @JvmStatic + fun isRemoteMonitor(monitor: Monitor, clusterService: ClusterService): Boolean { + return isRemoteMonitor(monitor = monitor, localClusterName = clusterService.clusterName.value()) + } + + /** + * Parses the list of indexes into a map of CLUSTER_NAME to List. + * @param indexes A list of index names in ":" format. + * @param localClusterName The name of the local cluster. + * @return A map of CLUSTER_NAME to List + */ + @JvmStatic + fun separateClusterIndexes(indexes: List, localClusterName: String): HashMap> { + val output = hashMapOf>() + indexes.forEach { index -> + var clusterName = parseClusterName(index) + val indexName = parseIndexName(index) + + // If the index entry does not have a CLUSTER_NAME, it indicates the index is on the local cluster. + if (clusterName.isEmpty()) clusterName = localClusterName + + output.getOrPut(clusterName) { mutableListOf() }.add(indexName) + } + return output + } + + /** + * Parses the list of indexes into a map of CLUSTER_NAME to List. + * @param indexes A list of index names in ":" format. + * Local indexes can also be in "" format. + * @param clusterService Used to retrieve the name of the local cluster. + * @return A map of CLUSTER_NAME to List + */ + @JvmStatic + fun separateClusterIndexes(indexes: List, clusterService: ClusterService): HashMap> { + return separateClusterIndexes(indexes = indexes, localClusterName = clusterService.clusterName.value()) + } + + /** + * The [NodeClient] used by the plugin cannot execute searches against local indexes + * using format ":". That format only supports querying remote indexes. + * This function formats a list of indexes to be supplied directly to a [SearchRequest]. + * @param indexes A list of index names in ":" format. + * @param localClusterName The name of the local cluster. + * @return A list of indexes with any remote indexes in ":" format, + * and any local indexes in "" format. + */ + @JvmStatic + fun parseIndexesForRemoteSearch(indexes: List, localClusterName: String): List { + return indexes.map { + var index = it + val clusterName = parseClusterName(it) + if (clusterName.isNotEmpty() && clusterName == localClusterName) { + index = parseIndexName(it) + } + index + } + } + + /** + * The [NodeClient] used by the plugin cannot execute searches against local indexes + * using format ":". That format only supports querying remote indexes. + * This function formats a list of indexes to be supplied directly to a [SearchRequest]. + * @param indexes A list of index names in ":" format. + * @param clusterService Used to retrieve the name of the local cluster. + * @return A list of indexes with any remote indexes in ":" format, + * and any local indexes in "" format. + */ + @JvmStatic + fun parseIndexesForRemoteSearch(indexes: List, clusterService: ClusterService): List { + return parseIndexesForRemoteSearch(indexes = indexes, localClusterName = clusterService.clusterName.value()) + } + + /** + * Uses the clusterName to determine whether the target client is the local or a remote client, + * and returns the appropriate client. + * @param clusterName The name of the cluster to evaluate. + * @param client The local [NodeClient]. + * @param localClusterName The name of the local cluster. + * @return The local [NodeClient] for the local cluster, or a remote client for a remote cluster. + */ + @JvmStatic + fun getClientForCluster(clusterName: String, client: Client, localClusterName: String): Client { + return if (clusterName == localClusterName) client else client.getRemoteClusterClient(clusterName) + } + + /** + * Uses the clusterName to determine whether the target client is the local or a remote client, + * and returns the appropriate client. + * @param clusterName The name of the cluster to evaluate. + * @param client The local [NodeClient]. + * @param clusterService Used to retrieve the name of the local cluster. + * @return The local [NodeClient] for the local cluster, or a remote client for a remote cluster. + */ + @JvmStatic + fun getClientForCluster(clusterName: String, client: Client, clusterService: ClusterService): Client { + return getClientForCluster(clusterName = clusterName, client = client, localClusterName = clusterService.clusterName.value()) + } + + /** + * Uses the index name to determine whether the target client is the local or a remote client, + * and returns the appropriate client. + * @param index The name of the index to evaluate. + * Can be in either ":" or "" format. + * @param client The local [NodeClient]. + * @param localClusterName The name of the local cluster. + * @return The local [NodeClient] for the local cluster, or a remote client for a remote cluster. + */ + @JvmStatic + fun getClientForIndex(index: String, client: Client, localClusterName: String): Client { + val clusterName = parseClusterName(index) + return if (clusterName.isNotEmpty() && clusterName != localClusterName) + client.getRemoteClusterClient(clusterName) else client + } + + /** + * Uses the index name to determine whether the target client is the local or a remote client, + * and returns the appropriate client. + * @param index The name of the index to evaluate. + * Can be in either ":" or "" format. + * @param client The local [NodeClient]. + * @param clusterService Used to retrieve the name of the local cluster. + * @return The local [NodeClient] for the local cluster, or a remote client for a remote cluster. + */ + @JvmStatic + fun getClientForIndex(index: String, client: Client, clusterService: ClusterService): Client { + return getClientForIndex(index = index, client = client, localClusterName = clusterService.clusterName.value()) + } + + /** + * @param index The name of the index to evaluate. + * Can be in either ":" or "" format. + * @return The cluster name if present; else an empty string. + */ + @JvmStatic + fun parseClusterName(index: String): String { + return if (index.contains(":")) index.split(":").getOrElse(0) { "" } + else "" + } + + /** + * @param index The name of the index to evaluate. + * Can be in either ":" or "" format. + * @return The index name. + */ + @JvmStatic + fun parseIndexName(index: String): String { + return if (index.contains(":")) index.split(":").getOrElse(1) { index } + else index + } + + /** + * If clusterName is provided, combines the inputs into ":" format. + * @param clusterName + * @param indexName + * @return The formatted string. + */ + @JvmStatic + fun formatClusterAndIndexName(clusterName: String, indexName: String): String { + return if (clusterName.isNotEmpty()) "$clusterName:$indexName" + else indexName + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt index 87b364bfc..42237853f 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/DocLevelMonitorQueries.kt @@ -6,41 +6,116 @@ package org.opensearch.alerting.util import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchStatusException import org.opensearch.ResourceAlreadyExistsException +import org.opensearch.action.admin.indices.alias.Alias import org.opensearch.action.admin.indices.create.CreateIndexRequest import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.action.admin.indices.get.GetIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.admin.indices.rollover.RolloverRequest +import org.opensearch.action.admin.indices.rollover.RolloverResponse +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.opensearch.action.bulk.BulkRequest import org.opensearch.action.bulk.BulkResponse import org.opensearch.action.index.IndexRequest import org.opensearch.action.support.WriteRequest.RefreshPolicy import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.model.Monitor +import org.opensearch.alerting.MonitorRunnerService.monitorCtx +import org.opensearch.alerting.model.MonitorMetadata import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.client.Client +import org.opensearch.cluster.ClusterState import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING private val log = LogManager.getLogger(DocLevelMonitorQueries::class.java) class DocLevelMonitorQueries(private val client: Client, private val clusterService: ClusterService) { companion object { + + const val PROPERTIES = "properties" + const val NESTED = "nested" + const val TYPE = "type" + const val INDEX_PATTERN_SUFFIX = "-000001" + const val QUERY_INDEX_BASE_FIELDS_COUNT = 8 // 3 fields we defined and 5 builtin additional metadata fields @JvmStatic fun docLevelQueriesMappings(): String { return DocLevelMonitorQueries::class.java.classLoader.getResource("mappings/doc-level-queries.json").readText() } + fun docLevelQueriesSettings(): Settings { + return Settings.builder().loadFromSource( + DocLevelMonitorQueries::class.java.classLoader.getResource("settings/doc-level-queries.json").readText(), + XContentType.JSON + ).build() + } } suspend fun initDocLevelQueryIndex(): Boolean { if (!docLevelQueryIndexExists()) { - val indexRequest = CreateIndexRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + // Since we changed queryIndex to be alias now, for backwards compatibility, we have to delete index with same name + // as our alias, to avoid name clash. + if (clusterService.state().metadata.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX)) { + val acknowledgedResponse: AcknowledgedResponse = client.suspendUntil { + admin().indices().delete(DeleteIndexRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX), it) + } + if (!acknowledgedResponse.isAcknowledged) { + val errorMessage = "Deletion of old queryIndex [${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}] index is not acknowledged!" + log.error(errorMessage) + throw AlertingException.wrap(OpenSearchStatusException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR)) + } + } + val alias = ScheduledJob.DOC_LEVEL_QUERIES_INDEX + val indexPattern = ScheduledJob.DOC_LEVEL_QUERIES_INDEX + INDEX_PATTERN_SUFFIX + val indexRequest = CreateIndexRequest(indexPattern) + .mapping(docLevelQueriesMappings()) + .alias(Alias(alias)) + .settings(docLevelQueriesSettings()) + return try { + val createIndexResponse: CreateIndexResponse = client.suspendUntil { client.admin().indices().create(indexRequest, it) } + createIndexResponse.isAcknowledged + } catch (t: Exception) { + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + true + } else { + throw t + } + } + } + return true + } + suspend fun initDocLevelQueryIndex(dataSources: DataSources): Boolean { + if (dataSources.queryIndex == ScheduledJob.DOC_LEVEL_QUERIES_INDEX) { + return initDocLevelQueryIndex() + } + // Since we changed queryIndex to be alias now, for backwards compatibility, we have to delete index with same name + // as our alias, to avoid name clash. + if (clusterService.state().metadata.hasIndex(dataSources.queryIndex)) { + val acknowledgedResponse: AcknowledgedResponse = client.suspendUntil { + admin().indices().delete(DeleteIndexRequest(dataSources.queryIndex), it) + } + if (!acknowledgedResponse.isAcknowledged) { + log.warn("Deletion of old queryIndex [${dataSources.queryIndex}] index is not acknowledged!") + } + } + val alias = dataSources.queryIndex + val indexPattern = dataSources.queryIndex + INDEX_PATTERN_SUFFIX + if (!clusterService.state().metadata.hasAlias(alias)) { + val indexRequest = CreateIndexRequest(indexPattern) .mapping(docLevelQueriesMappings()) + .alias(Alias(alias)) .settings( Settings.builder().put("index.hidden", true) .build() @@ -48,8 +123,8 @@ class DocLevelMonitorQueries(private val client: Client, private val clusterServ return try { val createIndexResponse: CreateIndexResponse = client.suspendUntil { client.admin().indices().create(indexRequest, it) } createIndexResponse.isAcknowledged - } catch (t: ResourceAlreadyExistsException) { - if (t.message?.contains("already exists") == true) { + } catch (t: Exception) { + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { true } else { throw t @@ -59,82 +134,474 @@ class DocLevelMonitorQueries(private val client: Client, private val clusterServ return true } + fun docLevelQueryIndexExists(dataSources: DataSources): Boolean { + val clusterState = clusterService.state() + return clusterState.metadata.hasAlias(dataSources.queryIndex) + } + fun docLevelQueryIndexExists(): Boolean { val clusterState = clusterService.state() - return clusterState.routingTable.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + return clusterState.metadata.hasAlias(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + } + + /** + * Does a DFS traversal of index mappings tree. + * Calls processLeafFn on every leaf node. + * Populates flattenPaths list with full paths of leaf nodes + * @param node current node which we're visiting + * @param currentPath current node path from root node + * @param processLeafFn leaf processor function which is called on every leaf discovered + * @param flattenPaths list of full paths of all leaf nodes relative to root + */ + fun traverseMappingsAndUpdate( + node: MutableMap, + currentPath: String, + processLeafFn: (String, String, MutableMap) -> Triple>, + flattenPaths: MutableMap> + ) { + // If node contains "properties" property then it is internal(non-leaf) node + log.debug("Node in traverse: $node") + // newNodes will hold list of updated leaf properties + var newNodes = ArrayList>(node.size) + node.entries.forEach { + // Compute full path relative to root + val fullPath = if (currentPath.isEmpty()) it.key + else "$currentPath.${it.key}" + val nodeProps = it.value as MutableMap + // If it has type property and type is not "nested" then this is a leaf + if (nodeProps.containsKey(TYPE) && nodeProps[TYPE] != NESTED) { + // At this point we know full path of node, so we add it to output array + flattenPaths.put(fullPath, nodeProps) + // Calls processLeafFn and gets old node name, new node name and new properties of node. + // This is all information we need to update this node + val (oldName, newName, props) = processLeafFn(it.key, fullPath, it.value as MutableMap) + newNodes.add(Triple(oldName, newName, props)) + } else { + // Internal(non-leaf) node - visit children + traverseMappingsAndUpdate(nodeProps[PROPERTIES] as MutableMap, fullPath, processLeafFn, flattenPaths) + } + } + // Here we can update all processed leaves in tree + newNodes.forEach { + // If we renamed leaf, we have to remove it first + if (it.first != it.second) { + node.remove(it.first) + } + // Put new properties of leaf + node.put(it.second, it.third) + } } suspend fun indexDocLevelQueries( monitor: Monitor, monitorId: String, + monitorMetadata: MonitorMetadata, refreshPolicy: RefreshPolicy = RefreshPolicy.IMMEDIATE, indexTimeout: TimeValue ) { val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput - val index = docLevelMonitorInput.indices[0] val queries: List = docLevelMonitorInput.queries + val indices = docLevelMonitorInput.indices val clusterState = clusterService.state() - val getIndexRequest = GetIndexRequest().indices(index) - val getIndexResponse: GetIndexResponse = client.suspendUntil { - client.admin().indices().getIndex(getIndexRequest, it) - } - val indices = getIndexResponse.indices() + // Run through each backing index and apply appropriate mappings to query index + indices.forEach { indexName -> + var concreteIndices = IndexUtils.resolveAllIndices( + listOf(indexName), + monitorCtx.clusterService!!, + monitorCtx.indexNameExpressionResolver!! + ) + if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || + IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) + ) { + val lastWriteIndex = concreteIndices.find { monitorMetadata.lastRunContext.containsKey(it) } + if (lastWriteIndex != null) { + val lastWriteIndexCreationDate = + IndexUtils.getCreationDateForIndex(lastWriteIndex, monitorCtx.clusterService!!.state()) + concreteIndices = IndexUtils.getNewestIndicesByCreationDate( + concreteIndices, + monitorCtx.clusterService!!.state(), + lastWriteIndexCreationDate + ) + } + } + val updatedIndexName = indexName.replace("*", "_") + val updatedProperties = mutableMapOf() + val allFlattenPaths = mutableSetOf>() + var sourceIndexFieldLimit = 0L + val conflictingFields = getAllConflictingFields(clusterState, concreteIndices) - indices?.forEach { indexName -> - if (clusterState.routingTable.hasIndex(indexName)) { - val indexMetadata = clusterState.metadata.index(indexName) - if (indexMetadata.mapping()?.sourceAsMap?.get("properties") != null) { - val properties = ( - (indexMetadata.mapping()?.sourceAsMap?.get("properties")) - as Map> - ) + concreteIndices.forEach { concreteIndexName -> + if (clusterState.routingTable.hasIndex(concreteIndexName)) { + val indexMetadata = clusterState.metadata.index(concreteIndexName) + if (indexMetadata.mapping()?.sourceAsMap?.get("properties") != null) { + val properties = ( + (indexMetadata.mapping()?.sourceAsMap?.get("properties")) + as MutableMap + ) + // Node processor function is used to process leaves of index mappings tree + // + val leafNodeProcessor = + fun(fieldName: String, fullPath: String, props: MutableMap): + Triple> { + val newProps = props.toMutableMap() + if (monitor.dataSources.queryIndexMappingsByType.isNotEmpty()) { + val mappingsByType = monitor.dataSources.queryIndexMappingsByType + if (props.containsKey("type") && mappingsByType.containsKey(props["type"]!!)) { + mappingsByType[props["type"]]?.entries?.forEach { iter: Map.Entry -> + newProps[iter.key] = iter.value + } + } + } - val updatedProperties = properties.entries.associate { - if (it.value.containsKey("path")) { - val newVal = it.value.toMutableMap() - newVal["path"] = "${it.value["path"]}_${indexName}_$monitorId" - "${it.key}_${indexName}_$monitorId" to newVal - } else { - "${it.key}_${indexName}_$monitorId" to it.value + return if (conflictingFields.contains(fullPath)) { + if (props.containsKey("path")) { + newProps["path"] = "${props["path"]}_${concreteIndexName}_$monitorId" + } + Triple(fieldName, "${fieldName}_${concreteIndexName}_$monitorId", newProps) + } else { + if (props.containsKey("path")) { + newProps["path"] = "${props["path"]}_${updatedIndexName}_$monitorId" + } + Triple(fieldName, "${fieldName}_${updatedIndexName}_$monitorId", newProps) + } + } + // Traverse and update index mappings here while extracting flatten field paths + val flattenPaths = mutableMapOf>() + traverseMappingsAndUpdate(properties, "", leafNodeProcessor, flattenPaths) + flattenPaths.keys.forEach { allFlattenPaths.add(Pair(it, concreteIndexName)) } + // Updated mappings ready to be applied on queryIndex + properties.forEach { + if ( + it.value is Map<*, *> && + (it.value as Map).containsKey("type") && + (it.value as Map)["type"] == NESTED + ) { + } else { + if (updatedProperties.containsKey(it.key) && updatedProperties[it.key] != it.value) { + val mergedField = mergeConflictingFields( + updatedProperties[it.key] as Map, + it.value as Map + ) + updatedProperties[it.key] = mergedField + } else { + updatedProperties[it.key] = it.value + } + } } + sourceIndexFieldLimit += checkMaxFieldLimit(concreteIndexName) } + } + } + // Updates mappings of concrete queryIndex. This can rollover queryIndex if field mapping limit is reached. + val (updateMappingResponse, concreteQueryIndex) = updateQueryIndexMappings( + monitor, + monitorMetadata, + updatedIndexName, + sourceIndexFieldLimit, + updatedProperties + ) + + if (updateMappingResponse.isAcknowledged) { + doIndexAllQueries( + concreteQueryIndex, + updatedIndexName, + monitorId, + queries, + allFlattenPaths, + conflictingFields, + refreshPolicy, + indexTimeout + ) + } + } + } - val updateMappingRequest = PutMappingRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + private suspend fun doIndexAllQueries( + concreteQueryIndex: String, + sourceIndex: String, + monitorId: String, + queries: List, + flattenPaths: MutableSet>, + conflictingPaths: Set, + refreshPolicy: RefreshPolicy, + indexTimeout: TimeValue + ) { + val indexRequests = mutableListOf() + val conflictingPathToConcreteIndices = mutableMapOf>() + flattenPaths.forEach { fieldPath -> + if (conflictingPaths.contains(fieldPath.first)) { + if (conflictingPathToConcreteIndices.containsKey(fieldPath.first)) { + val concreteIndexSet = conflictingPathToConcreteIndices[fieldPath.first] + concreteIndexSet!!.add(fieldPath.second) + conflictingPathToConcreteIndices[fieldPath.first] = concreteIndexSet + } else { + val concreteIndexSet = mutableSetOf() + concreteIndexSet.add(fieldPath.second) + conflictingPathToConcreteIndices[fieldPath.first] = concreteIndexSet + } + } + } + + val newQueries = mutableListOf() + queries.forEach { + val filteredConcreteIndices = mutableSetOf() + var query = it.query + conflictingPaths.forEach { conflictingPath -> + if (query.contains(conflictingPath)) { + query = query.replace("$conflictingPath:", "${conflictingPath}__$monitorId:") + filteredConcreteIndices.addAll(conflictingPathToConcreteIndices[conflictingPath]!!) + } + } + + if (filteredConcreteIndices.isNotEmpty()) { + filteredConcreteIndices.forEach { filteredConcreteIndex -> + val newQuery = it.copy( + id = "${it.id}_$filteredConcreteIndex", + query = query.replace("", filteredConcreteIndex) + ) + newQueries.add(newQuery) + } + } else { + newQueries.add(it.copy(id = "${it.id}_$sourceIndex")) + } + } + + newQueries.forEach { + var query = it.query + flattenPaths.forEach { fieldPath -> + if (!conflictingPaths.contains(fieldPath.first)) { + query = query.replace("${fieldPath.first}:", "${fieldPath.first}_${sourceIndex}_$monitorId:") + } + } + val indexRequest = IndexRequest(concreteQueryIndex) + .id(it.id + "_$monitorId") + .source( + mapOf( + "query" to mapOf("query_string" to mapOf("query" to query, "fields" to it.fields)), + "monitor_id" to monitorId, + "index" to sourceIndex + ) + ) + indexRequests.add(indexRequest) + } + log.debug("bulk inserting percolate [${queries.size}] queries") + if (indexRequests.isNotEmpty()) { + val bulkResponse: BulkResponse = client.suspendUntil { + client.bulk( + BulkRequest().setRefreshPolicy(refreshPolicy).timeout(indexTimeout).add(indexRequests), it + ) + } + bulkResponse.forEach { bulkItemResponse -> + if (bulkItemResponse.isFailed) { + log.debug(bulkItemResponse.failureMessage) + } + } + } + } + + private suspend fun updateQueryIndexMappings( + monitor: Monitor, + monitorMetadata: MonitorMetadata, + sourceIndex: String, + sourceIndexFieldLimit: Long, + updatedProperties: MutableMap + ): Pair { + var targetQueryIndex = monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] + if (targetQueryIndex == null) { + // queryIndex is alias which will always have only 1 backing index which is writeIndex + // This is due to a fact that that _rollover API would maintain only single index under alias + // if you don't add is_write_index setting when creating index initially + targetQueryIndex = getWriteIndexNameForAlias(monitor.dataSources.queryIndex) + if (targetQueryIndex == null) { + val message = "Failed to get write index for queryIndex alias:${monitor.dataSources.queryIndex}" + log.error(message) + throw AlertingException.wrap( + OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) + ) + } + monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] = targetQueryIndex + } + val updateMappingRequest = PutMappingRequest(targetQueryIndex) + updateMappingRequest.source(mapOf("properties" to updatedProperties)) + var updateMappingResponse = AcknowledgedResponse(false) + try { + // Adjust max field limit in mappings for query index, if needed. + adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit, targetQueryIndex) + updateMappingResponse = client.suspendUntil { + client.admin().indices().putMapping(updateMappingRequest, it) + } + return Pair(updateMappingResponse, targetQueryIndex) + } catch (e: Exception) { + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + log.debug("exception after rollover queryIndex index: $targetQueryIndex exception: ${unwrappedException.message}") + // If we reached limit for total number of fields in mappings, do a rollover here + if (unwrappedException.message?.contains("Limit of total fields") == true) { + try { + // Do queryIndex rollover + targetQueryIndex = rolloverQueryIndex(monitor) + // Adjust max field limit in mappings for new index. + adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit, targetQueryIndex) + // PUT mappings to newly created index + val updateMappingRequest = PutMappingRequest(targetQueryIndex) updateMappingRequest.source(mapOf("properties" to updatedProperties)) - val updateMappingResponse: AcknowledgedResponse = client.suspendUntil { + updateMappingResponse = client.suspendUntil { client.admin().indices().putMapping(updateMappingRequest, it) } + } catch (e: Exception) { + // If we reached limit for total number of fields in mappings after rollover + // it means that source index has more then (FIELD_LIMIT - 3) fields (every query index has 3 fields defined) + // TODO maybe split queries/mappings between multiple query indices? + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + log.debug("exception after rollover queryIndex index: $targetQueryIndex exception: ${unwrappedException.message}") + if (unwrappedException.message?.contains("Limit of total fields") == true) { + val errorMessage = + "Monitor [${monitorMetadata.monitorId}] can't process index [$sourceIndex] due to field mapping limit" + log.error(errorMessage) + throw AlertingException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR, e) + } else { + throw AlertingException.wrap(e) + } + } + } else { + log.debug("unknown exception during PUT mapping on queryIndex: $targetQueryIndex") + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + throw AlertingException.wrap(unwrappedException) + } + } + // We did rollover, so try to apply mappings again on new targetQueryIndex + if (targetQueryIndex.isNotEmpty()) { + // add newly created index to monitor's metadata object so that we can fetch it later on, when either applying mappings or running queries + monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] = targetQueryIndex + } else { + val failureMessage = "Failed to resolve targetQueryIndex!" + log.error(failureMessage) + throw AlertingException(failureMessage, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureMessage)) + } + return Pair(updateMappingResponse, targetQueryIndex) + } - if (updateMappingResponse.isAcknowledged) { - val indexRequests = mutableListOf() - queries.forEach { - var query = it.query - properties.forEach { prop -> - query = query.replace("${prop.key}:", "${prop.key}_${indexName}_$monitorId:") - } - val indexRequest = IndexRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) - .id(it.id + "_${indexName}_$monitorId") - .source( - mapOf( - "query" to mapOf("query_string" to mapOf("query" to query)), - "monitor_id" to monitorId, - "index" to indexName - ) - ) - indexRequests.add(indexRequest) + /** + * merge conflicting leaf fields in the mapping tree + */ + private fun mergeConflictingFields(oldField: Map, newField: Map): Map { + val mergedField = mutableMapOf() + oldField.entries.forEach { + if (newField.containsKey(it.key)) { + if (it.value is Map<*, *> && newField[it.key] is Map<*, *>) { + mergedField[it.key] = + mergeConflictingFields(it.value as Map, newField[it.key] as Map) + } else { + mergedField[it.key] = it.value + } + } else { + mergedField[it.key] = it.value + } + } + + newField.entries.forEach { + if (!oldField.containsKey(it.key)) { + mergedField[it.key] = it.value + } + } + return mergedField + } + + /** + * get all fields which have same name but different mappings belonging to an index pattern + */ + fun getAllConflictingFields(clusterState: ClusterState, concreteIndices: List): Set { + val conflictingFields = mutableSetOf() + val allFlattenPaths = mutableMapOf>() + concreteIndices.forEach { concreteIndexName -> + if (clusterState.routingTable.hasIndex(concreteIndexName)) { + val indexMetadata = clusterState.metadata.index(concreteIndexName) + if (indexMetadata.mapping()?.sourceAsMap?.get("properties") != null) { + val properties = ( + (indexMetadata.mapping()?.sourceAsMap?.get("properties")) + as MutableMap + ) + // Node processor function is used to process leaves of index mappings tree + // + val leafNodeProcessor = + fun(fieldName: String, _: String, props: MutableMap): Triple> { + return Triple(fieldName, fieldName, props) } - if (indexRequests.isNotEmpty()) { - val bulkResponse: BulkResponse = client.suspendUntil { - client.bulk( - BulkRequest().setRefreshPolicy(refreshPolicy).timeout(indexTimeout).add(indexRequests), it - ) - } + // Traverse and update index mappings here while extracting flatten field paths + val flattenPaths = mutableMapOf>() + traverseMappingsAndUpdate(properties, "", leafNodeProcessor, flattenPaths) + + flattenPaths.forEach { + if (allFlattenPaths.containsKey(it.key) && allFlattenPaths[it.key]!! != it.value) { + conflictingFields.add(it.key) } + allFlattenPaths.putIfAbsent(it.key, it.value) } } } } + return conflictingFields + } + + /** + * checks the max field limit for a concrete index + */ + private suspend fun checkMaxFieldLimit(sourceIndex: String): Long { + val getSettingsResponse: GetSettingsResponse = client.suspendUntil { + admin().indices().getSettings(GetSettingsRequest().indices(sourceIndex), it) + } + return getSettingsResponse.getSetting(sourceIndex, INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key)?.toLong() ?: 1000L + } + + /** + * Adjusts max field limit index setting for query index if source index has higher limit. + * This will prevent max field limit exception, when source index has more fields then query index limit + */ + private suspend fun adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit: Long, concreteQueryIndex: String) { + val getSettingsResponse: GetSettingsResponse = client.suspendUntil { + admin().indices().getSettings(GetSettingsRequest().indices(concreteQueryIndex), it) + } + val queryIndexLimit = + getSettingsResponse.getSetting(concreteQueryIndex, INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key)?.toLong() ?: 1000L + // Our query index initially has 3 fields we defined and 5 more builtin metadata fields in mappings so we have to account for that + if (sourceIndexFieldLimit > (queryIndexLimit - QUERY_INDEX_BASE_FIELDS_COUNT)) { + val updateSettingsResponse: AcknowledgedResponse = client.suspendUntil { + admin().indices().updateSettings( + UpdateSettingsRequest(concreteQueryIndex).settings( + Settings.builder().put( + INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key, + sourceIndexFieldLimit + QUERY_INDEX_BASE_FIELDS_COUNT + ) + ), + it + ) + } + } + } + + private suspend fun rolloverQueryIndex(monitor: Monitor): String { + val queryIndex = monitor.dataSources.queryIndex + val queryIndexPattern = monitor.dataSources.queryIndex + INDEX_PATTERN_SUFFIX + + val request = RolloverRequest(queryIndex, null) + request.createIndexRequest.index(queryIndexPattern) + .mapping(docLevelQueriesMappings()) + .settings(docLevelQueriesSettings()) + val response: RolloverResponse = client.suspendUntil { + client.admin().indices().rolloverIndex(request, it) + } + if (response.isRolledOver == false) { + val message = "failed to rollover queryIndex:$queryIndex queryIndexPattern:$queryIndexPattern" + log.error(message) + throw AlertingException.wrap( + OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) + ) + } + return response.newIndex + } + + private fun getWriteIndexNameForAlias(alias: String): String? { + return this.clusterService.state().metadata().indicesLookup?.get(alias)?.writeIndex?.index?.name } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt index 9f299e8c5..6949ca58d 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/IndexUtils.kt @@ -5,25 +5,31 @@ package org.opensearch.alerting.util -import org.opensearch.action.ActionListener import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.support.IndicesOptions import org.opensearch.action.support.master.AcknowledgedResponse import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.core.ScheduledJobIndices import org.opensearch.client.IndicesAdminClient import org.opensearch.cluster.ClusterState +import org.opensearch.cluster.metadata.IndexAbstraction import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.metadata.IndexNameExpressionResolver +import org.opensearch.cluster.service.ClusterService import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentParser import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.util.IndexUtils +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser class IndexUtils { companion object { + val VALID_INDEX_NAME_REGEX = Regex("""^(?![_\-\+])(?!.*\.\.)[^\s,\\\/\*\?"<>|#:\.]{1,255}$""") + const val _META = "_meta" const val SCHEMA_VERSION = "schema_version" - const val NO_SCHEMA_VERSION = 0 var scheduledJobIndexSchemaVersion: Int private set @@ -90,17 +96,17 @@ class IndexUtils { } xcp.nextToken() } - return NO_SCHEMA_VERSION + return IndexUtils.NO_SCHEMA_VERSION } @JvmStatic fun getIndexNameWithAlias(clusterState: ClusterState, alias: String): String { - return clusterState.metadata.indices.first { it.value.aliases.containsKey(alias) }.key + return clusterState.metadata.indices.entries.first { it.value.aliases.containsKey(alias) }.key } @JvmStatic fun shouldUpdateIndex(index: IndexMetadata, mapping: String): Boolean { - var oldVersion = NO_SCHEMA_VERSION + var oldVersion = IndexUtils.NO_SCHEMA_VERSION val newVersion = getSchemaVersion(mapping) val indexMapping = index.mapping()?.sourceAsMap() @@ -122,7 +128,7 @@ class IndexUtils { actionListener: ActionListener ) { if (clusterState.metadata.indices.containsKey(index)) { - if (shouldUpdateIndex(clusterState.metadata.indices[index], mapping)) { + if (shouldUpdateIndex(clusterState.metadata.indices[index]!!, mapping)) { val putMappingRequest: PutMappingRequest = PutMappingRequest(index).source(mapping, XContentType.JSON) client.putMapping(putMappingRequest, actionListener) } else { @@ -130,5 +136,64 @@ class IndexUtils { } } } + + @JvmStatic + fun resolveAllIndices(indices: List, clusterService: ClusterService, resolver: IndexNameExpressionResolver): List { + val result = mutableListOf() + + indices.forEach { index -> + val concreteIndices = resolver.concreteIndexNames( + clusterService.state(), + IndicesOptions.lenientExpand(), + true, + index + ) + result.addAll(concreteIndices) + } + + return result + } + + @JvmStatic + fun isDataStream(name: String, clusterState: ClusterState): Boolean { + return clusterState.metadata().dataStreams().containsKey(name) + } + + @JvmStatic + fun isAlias(name: String, clusterState: ClusterState): Boolean { + return clusterState.metadata().hasAlias(name) + } + + @JvmStatic + fun getWriteIndex(index: String, clusterState: ClusterState): String? { + if (isAlias(index, clusterState) || isDataStream(index, clusterState)) { + val metadata = clusterState.metadata.indicesLookup[index]?.writeIndex + if (metadata != null) { + return metadata.index.name + } + } + return null + } + + @JvmStatic + fun getNewestIndicesByCreationDate(concreteIndices: List, clusterState: ClusterState, thresholdDate: Long): List { + val filteredIndices = mutableListOf() + val lookup = clusterState.metadata().indicesLookup + concreteIndices.forEach { indexName -> + val index = lookup[indexName] + val indexMetadata = clusterState.metadata.index(indexName) + if (index != null && index.type == IndexAbstraction.Type.CONCRETE_INDEX) { + if (indexMetadata.creationDate >= thresholdDate) { + filteredIndices.add(indexName) + } + } + } + return filteredIndices + } + + @JvmStatic + fun getCreationDateForIndex(index: String, clusterState: ClusterState): Long { + return clusterState.metadata.index(index).creationDate + } } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/RestHandlerUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/RestHandlerUtils.kt index 70bd9775b..b5aeaa542 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/RestHandlerUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/RestHandlerUtils.kt @@ -6,7 +6,7 @@ package org.opensearch.alerting.util import org.opensearch.alerting.AlertingPlugin -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.rest.RestRequest import org.opensearch.search.fetch.subphase.FetchSourceContext @@ -18,16 +18,12 @@ import org.opensearch.search.fetch.subphase.FetchSourceContext * @return FetchSourceContext */ fun context(request: RestRequest): FetchSourceContext? { - val userAgent = Strings.coalesceToEmpty(request.header("User-Agent")) + val userAgent = if (request.header("User-Agent") == null) "" else request.header("User-Agent") return if (!userAgent.contains(AlertingPlugin.OPEN_SEARCH_DASHBOARDS_USER_AGENT)) { FetchSourceContext(true, Strings.EMPTY_ARRAY, AlertingPlugin.UI_METADATA_EXCLUDE) } else null } -const val _ID = "_id" -const val _VERSION = "_version" -const val _SEQ_NO = "_seq_no" const val IF_SEQ_NO = "if_seq_no" -const val _PRIMARY_TERM = "_primary_term" const val IF_PRIMARY_TERM = "if_primary_term" const val REFRESH = "refresh" diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/ScheduledJobUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/ScheduledJobUtils.kt new file mode 100644 index 000000000..70fe42a38 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/ScheduledJobUtils.kt @@ -0,0 +1,70 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetResponse +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry + +private val log = LogManager.getLogger(ScheduledJobUtils::class.java) + +class ScheduledJobUtils { + companion object { + const val WORKFLOW_DELEGATE_PATH = "workflow.inputs.composite_input.sequence.delegates" + const val WORKFLOW_MONITOR_PATH = "workflow.inputs.composite_input.sequence.delegates.monitor_id" + fun parseWorkflowFromScheduledJobDocSource(xContentRegistry: NamedXContentRegistry, response: GetResponse): Workflow { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + try { + val workflow = ScheduledJob.parse(xcp, response.id, response.version) + if (workflow is Workflow) { + return workflow + } else { + log.error("Unable to parse workflow from ${response.source}") + throw OpenSearchStatusException( + "Unable to parse workflow from ${response.source}", + RestStatus.INTERNAL_SERVER_ERROR + ) + } + } catch (e: java.lang.Exception) { + throw AlertingException("Unable to parse workflow from ${response.source}", RestStatus.INTERNAL_SERVER_ERROR, e) + } + } + } + + fun parseMonitorFromScheduledJobDocSource(xContentRegistry: NamedXContentRegistry, response: GetResponse): Monitor { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + try { + val monitor = ScheduledJob.parse(xcp, response.id, response.version) + if (monitor is Monitor) { + return monitor + } else { + log.error("Unable to parse monitor from ${response.source}") + throw OpenSearchStatusException( + "Unable to parse monitor from ${response.source}", + RestStatus.INTERNAL_SERVER_ERROR + ) + } + } catch (e: java.lang.Exception) { + throw AlertingException("Unable to parse monitor from ${response.source}", RestStatus.INTERNAL_SERVER_ERROR, e) + } + } + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt new file mode 100644 index 000000000..8e92b597f --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt @@ -0,0 +1,859 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.action.ValidateActions +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.state.ClusterStateResponse +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse +import org.opensearch.action.admin.indices.stats.CommonStats +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.action.support.IndicesOptions +import org.opensearch.alerting.util.IndexUtils.Companion.VALID_INDEX_NAME_REGEX +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.common.time.DateFormatter +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.IndexSettings +import java.time.Instant +import java.time.ZoneOffset +import java.time.ZonedDateTime +import java.util.Locale + +class CatIndicesRequestWrapper(val pathParams: String = "") : ActionRequest() { + val log = LogManager.getLogger(CatIndicesRequestWrapper::class.java) + + var clusterHealthRequest: ClusterHealthRequest = + ClusterHealthRequest().indicesOptions(IndicesOptions.lenientExpandHidden()) + var clusterStateRequest: ClusterStateRequest = + ClusterStateRequest().indicesOptions(IndicesOptions.lenientExpandHidden()) + var indexSettingsRequest: GetSettingsRequest = + GetSettingsRequest() + .indicesOptions(IndicesOptions.lenientExpandHidden()) + .names(IndexSettings.INDEX_SEARCH_THROTTLED.key) + var indicesStatsRequest: IndicesStatsRequest = + IndicesStatsRequest().all().indicesOptions(IndicesOptions.lenientExpandHidden()) + var indicesList = arrayOf() + + init { + if (pathParams.isNotBlank()) { + indicesList = pathParams.split(",").toTypedArray() + + require(validate() == null) { + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases." + } + + clusterHealthRequest = clusterHealthRequest.indices(*indicesList) + clusterStateRequest = clusterStateRequest.indices(*indicesList) + indexSettingsRequest = indexSettingsRequest.indices(*indicesList) + indicesStatsRequest = indicesStatsRequest.indices(*indicesList) + } + } + + override fun validate(): ActionRequestValidationException? { + var exception: ActionRequestValidationException? = null + if (pathParams.isNotBlank() && indicesList.any { !VALID_INDEX_NAME_REGEX.containsMatchIn(it) }) + exception = ValidateActions.addValidationError( + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases.", + exception + ) + return exception + } +} + +class CatIndicesResponseWrapper( + clusterHealthResponse: ClusterHealthResponse, + clusterStateResponse: ClusterStateResponse, + indexSettingsResponse: GetSettingsResponse, + indicesStatsResponse: IndicesStatsResponse +) : ActionResponse(), ToXContentObject { + var indexInfoList: List = listOf() + + init { + indexInfoList = compileIndexInfo( + clusterHealthResponse, + clusterStateResponse, + indexSettingsResponse, + indicesStatsResponse + ) + } + + companion object { + const val WRAPPER_FIELD = "indices" + } + + override fun writeTo(out: StreamOutput) { + out.writeList(indexInfoList) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + builder.startArray(WRAPPER_FIELD) + indexInfoList.forEach { it.toXContent(builder, params) } + builder.endArray() + return builder.endObject() + } + + private fun compileIndexInfo( + clusterHealthResponse: ClusterHealthResponse, + clusterStateResponse: ClusterStateResponse, + indexSettingsResponse: GetSettingsResponse, + indicesStatsResponse: IndicesStatsResponse + ): List { + val list = mutableListOf() + + val indicesSettings = indexSettingsResponse.indexToSettings + val indicesHealths = clusterHealthResponse.indices + val indicesStats = indicesStatsResponse.indices + val indicesMetadatas = hashMapOf() + clusterStateResponse.state.metadata.forEach { indicesMetadatas[it.index.name] = it } + + indicesSettings.forEach { (indexName, settings) -> + if (!indicesMetadatas.containsKey(indexName)) return@forEach + + val indexMetadata = indicesMetadatas[indexName] + val indexState = indexMetadata?.state + val indexStats = indicesStats[indexName] + val searchThrottled = IndexSettings.INDEX_SEARCH_THROTTLED.get(settings) + val indexHealth = indicesHealths[indexName] + + var health = "" + if (indexHealth != null) { + health = indexHealth.status.toString().lowercase(Locale.ROOT) + } else if (indexStats != null) { + health = "red*" + } + + val primaryStats: CommonStats? + val totalStats: CommonStats? + if (indexStats == null || indexState == IndexMetadata.State.CLOSE) { + primaryStats = CommonStats() + totalStats = CommonStats() + } else { + primaryStats = indexStats.primaries + totalStats = indexStats.total + } + + list.add( + IndexInfo( + health = health, + status = indexState.toString().lowercase(Locale.ROOT), + index = indexName, + uuid = indexMetadata?.indexUUID, + pri = "${indexHealth?.numberOfShards}", + rep = "${indexHealth?.numberOfReplicas}", + docsCount = "${primaryStats?.getDocs()?.count}", + docsDeleted = "${primaryStats?.getDocs()?.deleted}", + creationDate = "${indexMetadata?.creationDate}", + creationDateString = DateFormatter.forPattern("strict_date_time") + .format(ZonedDateTime.ofInstant(Instant.ofEpochMilli(indexMetadata!!.creationDate), ZoneOffset.UTC)), + storeSize = "${totalStats?.store?.size}", + priStoreSize = "${primaryStats?.store?.size}", + completionSize = "${totalStats?.completion?.size}", + priCompletionSize = "${primaryStats?.completion?.size}", + fieldDataMemorySize = "${totalStats?.fieldData?.memorySize}", + priFieldDataMemorySize = "${primaryStats?.fieldData?.memorySize}", + fieldDataEvictions = "${totalStats?.fieldData?.evictions}", + priFieldDataEvictions = "${primaryStats?.fieldData?.evictions}", + queryCacheMemorySize = "${totalStats?.queryCache?.memorySize}", + priQueryCacheMemorySize = "${primaryStats?.queryCache?.memorySize}", + queryCacheEvictions = "${totalStats?.queryCache?.evictions}", + priQueryCacheEvictions = "${primaryStats?.queryCache?.evictions}", + requestCacheMemorySize = "${totalStats?.requestCache?.memorySize}", + priRequestCacheMemorySize = "${primaryStats?.requestCache?.memorySize}", + requestCacheEvictions = "${totalStats?.requestCache?.evictions}", + priRequestCacheEvictions = "${primaryStats?.requestCache?.evictions}", + requestCacheHitCount = "${totalStats?.requestCache?.hitCount}", + priRequestCacheHitCount = "${primaryStats?.requestCache?.hitCount}", + requestCacheMissCount = "${totalStats?.requestCache?.missCount}", + priRequestCacheMissCount = "${primaryStats?.requestCache?.missCount}", + flushTotal = "${totalStats?.flush?.total}", + priFlushTotal = "${primaryStats?.flush?.total}", + flushTotalTime = "${totalStats?.flush?.totalTime}", + priFlushTotalTime = "${primaryStats?.flush?.totalTime}", + getCurrent = "${totalStats?.get?.current()}", + priGetCurrent = "${primaryStats?.get?.current()}", + getTime = "${totalStats?.get?.time}", + priGetTime = "${primaryStats?.get?.time}", + getTotal = "${totalStats?.get?.count}", + priGetTotal = "${primaryStats?.get?.count}", + getExistsTime = "${totalStats?.get?.existsTime}", + priGetExistsTime = "${primaryStats?.get?.existsTime}", + getExistsTotal = "${totalStats?.get?.existsCount}", + priGetExistsTotal = "${primaryStats?.get?.existsCount}", + getMissingTime = "${totalStats?.get?.missingTime}", + priGetMissingTime = "${primaryStats?.get?.missingTime}", + getMissingTotal = "${totalStats?.get?.missingCount}", + priGetMissingTotal = "${primaryStats?.get?.missingCount}", + indexingDeleteCurrent = "${totalStats?.indexing?.total?.deleteCurrent}", + priIndexingDeleteCurrent = "${primaryStats?.indexing?.total?.deleteCurrent}", + indexingDeleteTime = "${totalStats?.indexing?.total?.deleteTime}", + priIndexingDeleteTime = "${primaryStats?.indexing?.total?.deleteTime}", + indexingDeleteTotal = "${totalStats?.indexing?.total?.deleteCount}", + priIndexingDeleteTotal = "${primaryStats?.indexing?.total?.deleteCount}", + indexingIndexCurrent = "${totalStats?.indexing?.total?.indexCurrent}", + priIndexingIndexCurrent = "${primaryStats?.indexing?.total?.indexCurrent}", + indexingIndexTime = "${totalStats?.indexing?.total?.indexTime}", + priIndexingIndexTime = "${primaryStats?.indexing?.total?.indexTime}", + indexingIndexTotal = "${totalStats?.indexing?.total?.indexCount}", + priIndexingIndexTotal = "${primaryStats?.indexing?.total?.indexCount}", + indexingIndexFailed = "${totalStats?.indexing?.total?.indexFailedCount}", + priIndexingIndexFailed = "${primaryStats?.indexing?.total?.indexFailedCount}", + mergesCurrent = "${totalStats?.merge?.current}", + priMergesCurrent = "${primaryStats?.merge?.current}", + mergesCurrentDocs = "${totalStats?.merge?.currentNumDocs}", + priMergesCurrentDocs = "${primaryStats?.merge?.currentNumDocs}", + mergesCurrentSize = "${totalStats?.merge?.currentSize}", + priMergesCurrentSize = "${primaryStats?.merge?.currentSize}", + mergesTotal = "${totalStats?.merge?.total}", + priMergesTotal = "${primaryStats?.merge?.total}", + mergesTotalDocs = "${totalStats?.merge?.totalNumDocs}", + priMergesTotalDocs = "${primaryStats?.merge?.totalNumDocs}", + mergesTotalSize = "${totalStats?.merge?.totalSize}", + priMergesTotalSize = "${primaryStats?.merge?.totalSize}", + mergesTotalTime = "${totalStats?.merge?.totalTime}", + priMergesTotalTime = "${primaryStats?.merge?.totalTime}", + refreshTotal = "${totalStats?.refresh?.total}", + priRefreshTotal = "${primaryStats?.refresh?.total}", + refreshTime = "${totalStats?.refresh?.totalTime}", + priRefreshTime = "${primaryStats?.refresh?.totalTime}", + refreshExternalTotal = "${totalStats?.refresh?.externalTotal}", + priRefreshExternalTotal = "${primaryStats?.refresh?.externalTotal}", + refreshExternalTime = "${totalStats?.refresh?.externalTotalTime}", + priRefreshExternalTime = "${primaryStats?.refresh?.externalTotalTime}", + refreshListeners = "${totalStats?.refresh?.listeners}", + priRefreshListeners = "${primaryStats?.refresh?.listeners}", + searchFetchCurrent = "${totalStats?.search?.total?.fetchCurrent}", + priSearchFetchCurrent = "${primaryStats?.search?.total?.fetchCurrent}", + searchFetchTime = "${totalStats?.search?.total?.fetchTime}", + priSearchFetchTime = "${primaryStats?.search?.total?.fetchTime}", + searchFetchTotal = "${totalStats?.search?.total?.fetchCount}", + priSearchFetchTotal = "${primaryStats?.search?.total?.fetchCount}", + searchOpenContexts = "${totalStats?.search?.openContexts}", + priSearchOpenContexts = "${primaryStats?.search?.openContexts}", + searchQueryCurrent = "${totalStats?.search?.total?.queryCurrent}", + priSearchQueryCurrent = "${primaryStats?.search?.total?.queryCurrent}", + searchQueryTime = "${totalStats?.search?.total?.queryTime}", + priSearchQueryTime = "${primaryStats?.search?.total?.queryTime}", + searchQueryTotal = "${totalStats?.search?.total?.queryCount}", + priSearchQueryTotal = "${primaryStats?.search?.total?.queryCount}", + searchScrollCurrent = "${totalStats?.search?.total?.scrollCurrent}", + priSearchScrollCurrent = "${primaryStats?.search?.total?.scrollCurrent}", + searchScrollTime = "${totalStats?.search?.total?.scrollTime}", + priSearchScrollTime = "${primaryStats?.search?.total?.scrollTime}", + searchScrollTotal = "${totalStats?.search?.total?.scrollCount}", + priSearchScrollTotal = "${primaryStats?.search?.total?.scrollCount}", + searchPointInTimeCurrent = "${totalStats?.search?.total?.pitCurrent}", + priSearchPointInTimeCurrent = "${primaryStats?.search?.total?.pitCurrent}", + searchPointInTimeTime = "${totalStats?.search?.total?.pitTime}", + priSearchPointInTimeTime = "${primaryStats?.search?.total?.pitTime}", + searchPointInTimeTotal = "${totalStats?.search?.total?.pitCount}", + priSearchPointInTimeTotal = "${primaryStats?.search?.total?.pitCount}", + segmentsCount = "${totalStats?.segments?.count}", + priSegmentsCount = "${primaryStats?.segments?.count}", + segmentsMemory = "${totalStats?.segments?.zeroMemory}", + priSegmentsMemory = "${primaryStats?.segments?.zeroMemory}", + segmentsIndexWriterMemory = "${totalStats?.segments?.indexWriterMemory}", + priSegmentsIndexWriterMemory = "${primaryStats?.segments?.indexWriterMemory}", + segmentsVersionMapMemory = "${totalStats?.segments?.versionMapMemory}", + priSegmentsVersionMapMemory = "${primaryStats?.segments?.versionMapMemory}", + segmentsFixedBitsetMemory = "${totalStats?.segments?.bitsetMemory}", + priSegmentsFixedBitsetMemory = "${primaryStats?.segments?.bitsetMemory}", + warmerCurrent = "${totalStats?.warmer?.current()}", + priWarmerCurrent = "${primaryStats?.warmer?.current()}", + warmerTotal = "${totalStats?.warmer?.total()}", + priWarmerTotal = "${primaryStats?.warmer?.total()}", + warmerTotalTime = "${totalStats?.warmer?.totalTime()}", + priWarmerTotalTime = "${primaryStats?.warmer?.totalTime()}", + suggestCurrent = "${totalStats?.search?.total?.suggestCurrent}", + priSuggestCurrent = "${primaryStats?.search?.total?.suggestCurrent}", + suggestTime = "${totalStats?.search?.total?.suggestTime}", + priSuggestTime = "${primaryStats?.search?.total?.suggestTime}", + suggestTotal = "${totalStats?.search?.total?.suggestCount}", + priSuggestTotal = "${primaryStats?.search?.total?.suggestCount}", + memoryTotal = "${totalStats?.totalMemory}", + priMemoryTotal = "${primaryStats?.totalMemory}", + searchThrottled = "$searchThrottled", + ) + ) + } + + return list + } + + data class IndexInfo( + val health: String?, + val status: String?, + val index: String?, + val uuid: String?, + val pri: String?, + val rep: String?, + val docsCount: String?, + val docsDeleted: String?, + val creationDate: String?, + val creationDateString: String?, + val storeSize: String?, + val priStoreSize: String?, + val completionSize: String?, + val priCompletionSize: String?, + val fieldDataMemorySize: String?, + val priFieldDataMemorySize: String?, + val fieldDataEvictions: String?, + val priFieldDataEvictions: String?, + val queryCacheMemorySize: String?, + val priQueryCacheMemorySize: String?, + val queryCacheEvictions: String?, + val priQueryCacheEvictions: String?, + val requestCacheMemorySize: String?, + val priRequestCacheMemorySize: String?, + val requestCacheEvictions: String?, + val priRequestCacheEvictions: String?, + val requestCacheHitCount: String?, + val priRequestCacheHitCount: String?, + val requestCacheMissCount: String?, + val priRequestCacheMissCount: String?, + val flushTotal: String?, + val priFlushTotal: String?, + val flushTotalTime: String?, + val priFlushTotalTime: String?, + val getCurrent: String?, + val priGetCurrent: String?, + val getTime: String?, + val priGetTime: String?, + val getTotal: String?, + val priGetTotal: String?, + val getExistsTime: String?, + val priGetExistsTime: String?, + val getExistsTotal: String?, + val priGetExistsTotal: String?, + val getMissingTime: String?, + val priGetMissingTime: String?, + val getMissingTotal: String?, + val priGetMissingTotal: String?, + val indexingDeleteCurrent: String?, + val priIndexingDeleteCurrent: String?, + val indexingDeleteTime: String?, + val priIndexingDeleteTime: String?, + val indexingDeleteTotal: String?, + val priIndexingDeleteTotal: String?, + val indexingIndexCurrent: String?, + val priIndexingIndexCurrent: String?, + val indexingIndexTime: String?, + val priIndexingIndexTime: String?, + val indexingIndexTotal: String?, + val priIndexingIndexTotal: String?, + val indexingIndexFailed: String?, + val priIndexingIndexFailed: String?, + val mergesCurrent: String?, + val priMergesCurrent: String?, + val mergesCurrentDocs: String?, + val priMergesCurrentDocs: String?, + val mergesCurrentSize: String?, + val priMergesCurrentSize: String?, + val mergesTotal: String?, + val priMergesTotal: String?, + val mergesTotalDocs: String?, + val priMergesTotalDocs: String?, + val mergesTotalSize: String?, + val priMergesTotalSize: String?, + val mergesTotalTime: String?, + val priMergesTotalTime: String?, + val refreshTotal: String?, + val priRefreshTotal: String?, + val refreshTime: String?, + val priRefreshTime: String?, + val refreshExternalTotal: String?, + val priRefreshExternalTotal: String?, + val refreshExternalTime: String?, + val priRefreshExternalTime: String?, + val refreshListeners: String?, + val priRefreshListeners: String?, + val searchFetchCurrent: String?, + val priSearchFetchCurrent: String?, + val searchFetchTime: String?, + val priSearchFetchTime: String?, + val searchFetchTotal: String?, + val priSearchFetchTotal: String?, + val searchOpenContexts: String?, + val priSearchOpenContexts: String?, + val searchQueryCurrent: String?, + val priSearchQueryCurrent: String?, + val searchQueryTime: String?, + val priSearchQueryTime: String?, + val searchQueryTotal: String?, + val priSearchQueryTotal: String?, + val searchScrollCurrent: String?, + val priSearchScrollCurrent: String?, + val searchScrollTime: String?, + val priSearchScrollTime: String?, + val searchScrollTotal: String?, + val priSearchScrollTotal: String?, + val searchPointInTimeCurrent: String?, + val priSearchPointInTimeCurrent: String?, + val searchPointInTimeTime: String?, + val priSearchPointInTimeTime: String?, + val searchPointInTimeTotal: String?, + val priSearchPointInTimeTotal: String?, + val segmentsCount: String?, + val priSegmentsCount: String?, + val segmentsMemory: String?, + val priSegmentsMemory: String?, + val segmentsIndexWriterMemory: String?, + val priSegmentsIndexWriterMemory: String?, + val segmentsVersionMapMemory: String?, + val priSegmentsVersionMapMemory: String?, + val segmentsFixedBitsetMemory: String?, + val priSegmentsFixedBitsetMemory: String?, + val warmerCurrent: String?, + val priWarmerCurrent: String?, + val warmerTotal: String?, + val priWarmerTotal: String?, + val warmerTotalTime: String?, + val priWarmerTotalTime: String?, + val suggestCurrent: String?, + val priSuggestCurrent: String?, + val suggestTime: String?, + val priSuggestTime: String?, + val suggestTotal: String?, + val priSuggestTotal: String?, + val memoryTotal: String?, + val priMemoryTotal: String?, + val searchThrottled: String? + ) : ToXContentObject, Writeable { + companion object { + const val HEALTH_FIELD = "health" + const val STATUS_FIELD = "status" + const val INDEX_FIELD = "index" + const val UUID_FIELD = "uuid" + const val PRI_FIELD = "pri" + const val REP_FIELD = "rep" + const val DOCS_COUNT_FIELD = "docs.count" + const val DOCS_DELETED_FIELD = "docs.deleted" + const val CREATION_DATE_FIELD = "creation.date" + const val CREATION_DATE_STRING_FIELD = "creation.date.string" + const val STORE_SIZE_FIELD = "store.size" + const val PRI_STORE_SIZE_FIELD = "pri.store.size" + const val COMPLETION_SIZE_FIELD = "completion.size" + const val PRI_COMPLETION_SIZE_FIELD = "pri.completion.size" + const val FIELD_DATA_MEMORY_SIZE_FIELD = "fielddata.memory_size" + const val PRI_FIELD_DATA_MEMORY_SIZE_FIELD = "pri.fielddata.memory_size" + const val FIELD_DATA_EVICTIONS_FIELD = "fielddata.evictions" + const val PRI_FIELD_DATA_EVICTIONS_FIELD = "pri.fielddata.evictions" + const val QUERY_CACHE_MEMORY_SIZE_FIELD = "query_cache.memory_size" + const val PRI_QUERY_CACHE_MEMORY_SIZE_FIELD = "pri.query_cache.memory_size" + const val QUERY_CACHE_EVICTIONS_FIELD = "query_cache.evictions" + const val PRI_QUERY_CACHE_EVICTIONS_FIELD = "pri.query_cache.evictions" + const val REQUEST_CACHE_MEMORY_SIZE_FIELD = "request_cache.memory_size" + const val PRI_REQUEST_CACHE_MEMORY_SIZE_FIELD = "pri.request_cache.memory_size" + const val REQUEST_CACHE_EVICTIONS_FIELD = "request_cache.evictions" + const val PRI_REQUEST_CACHE_EVICTIONS_FIELD = "pri.request_cache.evictions" + const val REQUEST_CACHE_HIT_COUNT_FIELD = "request_cache.hit_count" + const val PRI_REQUEST_CACHE_HIT_COUNT_FIELD = "pri.request_cache.hit_count" + const val REQUEST_CACHE_MISS_COUNT_FIELD = "request_cache.miss_count" + const val PRI_REQUEST_CACHE_MISS_COUNT_FIELD = "pri.request_cache.miss_count" + const val FLUSH_TOTAL_FIELD = "flush.total" + const val PRI_FLUSH_TOTAL_FIELD = "pri.flush.total" + const val FLUSH_TOTAL_TIME_FIELD = "flush.total_time" + const val PRI_FLUSH_TOTAL_TIME_FIELD = "pri.flush.total_time" + const val GET_CURRENT_FIELD = "get.current" + const val PRI_GET_CURRENT_FIELD = "pri.get.current" + const val GET_TIME_FIELD = "get.time" + const val PRI_GET_TIME_FIELD = "pri.get.time" + const val GET_TOTAL_FIELD = "get.total" + const val PRI_GET_TOTAL_FIELD = "pri.get.total" + const val GET_EXISTS_TIME_FIELD = "get.exists_time" + const val PRI_GET_EXISTS_TIME_FIELD = "pri.get.exists_time" + const val GET_EXISTS_TOTAL_FIELD = "get.exists_total" + const val PRI_GET_EXISTS_TOTAL_FIELD = "pri.get.exists_total" + const val GET_MISSING_TIME_FIELD = "get.missing_time" + const val PRI_GET_MISSING_TIME_FIELD = "pri.get.missing_time" + const val GET_MISSING_TOTAL_FIELD = "get.missing_total" + const val PRI_GET_MISSING_TOTAL_FIELD = "pri.get.missing_total" + const val INDEXING_DELETE_CURRENT_FIELD = "indexing.delete_current" + const val PRI_INDEXING_DELETE_CURRENT_FIELD = "pri.indexing.delete_current" + const val INDEXING_DELETE_TIME_FIELD = "indexing.delete_time" + const val PRI_INDEXING_DELETE_TIME_FIELD = "pri.indexing.delete_time" + const val INDEXING_DELETE_TOTAL_FIELD = "indexing.delete_total" + const val PRI_INDEXING_DELETE_TOTAL_FIELD = "pri.indexing.delete_total" + const val INDEXING_INDEX_CURRENT_FIELD = "indexing.index_current" + const val PRI_INDEXING_INDEX_CURRENT_FIELD = "pri.indexing.index_current" + const val INDEXING_INDEX_TIME_FIELD = "indexing.index_time" + const val PRI_INDEXING_INDEX_TIME_FIELD = "pri.indexing.index_time" + const val INDEXING_INDEX_TOTAL_FIELD = "indexing.index_total" + const val PRI_INDEXING_INDEX_TOTAL_FIELD = "pri.indexing.index_total" + const val INDEXING_INDEX_FAILED_FIELD = "indexing.index_failed" + const val PRI_INDEXING_INDEX_FAILED_FIELD = "pri.indexing.index_failed" + const val MERGES_CURRENT_FIELD = "merges.current" + const val PRI_MERGES_CURRENT_FIELD = "pri.merges.current" + const val MERGES_CURRENT_DOCS_FIELD = "merges.current_docs" + const val PRI_MERGES_CURRENT_DOCS_FIELD = "pri.merges.current_docs" + const val MERGES_CURRENT_SIZE_FIELD = "merges.current_size" + const val PRI_MERGES_CURRENT_SIZE_FIELD = "pri.merges.current_size" + const val MERGES_TOTAL_FIELD = "merges.total" + const val PRI_MERGES_TOTAL_FIELD = "pri.merges.total" + const val MERGES_TOTAL_DOCS_FIELD = "merges.total_docs" + const val PRI_MERGES_TOTAL_DOCS_FIELD = "pri.merges.total_docs" + const val MERGES_TOTAL_SIZE_FIELD = "merges.total_size" + const val PRI_MERGES_TOTAL_SIZE_FIELD = "pri.merges.total_size" + const val MERGES_TOTAL_TIME_FIELD = "merges.total_time" + const val PRI_MERGES_TOTAL_TIME_FIELD = "pri.merges.total_time" + const val REFRESH_TOTAL_FIELD = "refresh.total" + const val PRI_REFRESH_TOTAL_FIELD = "pri.refresh.total" + const val REFRESH_TIME_FIELD = "refresh.time" + const val PRI_REFRESH_TIME_FIELD = "pri.refresh.time" + const val REFRESH_EXTERNAL_TOTAL_FIELD = "refresh.external_total" + const val PRI_REFRESH_EXTERNAL_TOTAL_FIELD = "pri.refresh.external_total" + const val REFRESH_EXTERNAL_TIME_FIELD = "refresh.external_time" + const val PRI_REFRESH_EXTERNAL_TIME_FIELD = "pri.refresh.external_time" + const val REFRESH_LISTENERS_FIELD = "refresh.listeners" + const val PRI_REFRESH_LISTENERS_FIELD = "pri.refresh.listeners" + const val SEARCH_FETCH_CURRENT_FIELD = "search.fetch_current" + const val PRI_SEARCH_FETCH_CURRENT_FIELD = "pri.search.fetch_current" + const val SEARCH_FETCH_TIME_FIELD = "search.fetch_time" + const val PRI_SEARCH_FETCH_TIME_FIELD = "pri.search.fetch_time" + const val SEARCH_FETCH_TOTAL_FIELD = "search.fetch_total" + const val PRI_SEARCH_FETCH_TOTAL_FIELD = "pri.search.fetch_total" + const val SEARCH_OPEN_CONTEXTS_FIELD = "search.open_contexts" + const val PRI_SEARCH_OPEN_CONTEXTS_FIELD = "pri.search.open_contexts" + const val SEARCH_QUERY_CURRENT_FIELD = "search.query_current" + const val PRI_SEARCH_QUERY_CURRENT_FIELD = "pri.search.query_current" + const val SEARCH_QUERY_TIME_FIELD = "search.query_time" + const val PRI_SEARCH_QUERY_TIME_FIELD = "pri.search.query_time" + const val SEARCH_QUERY_TOTAL_FIELD = "search.query_total" + const val PRI_SEARCH_QUERY_TOTAL_FIELD = "pri.search.query_total" + const val SEARCH_SCROLL_CURRENT_FIELD = "search.scroll_current" + const val PRI_SEARCH_SCROLL_CURRENT_FIELD = "pri.search.scroll_current" + const val SEARCH_SCROLL_TIME_FIELD = "search.scroll_time" + const val PRI_SEARCH_SCROLL_TIME_FIELD = "pri.search.scroll_time" + const val SEARCH_SCROLL_TOTAL_FIELD = "search.scroll_total" + const val PRI_SEARCH_SCROLL_TOTAL_FIELD = "pri.search.scroll_total" + const val SEARCH_POINT_IN_TIME_CURRENT_FIELD = "search.point_in_time_current" + const val PRI_SEARCH_POINT_IN_TIME_CURRENT_FIELD = "pri.search.point_in_time_current" + const val SEARCH_POINT_IN_TIME_TIME_FIELD = "search.point_in_time_time" + const val PRI_SEARCH_POINT_IN_TIME_TIME_FIELD = "pri.search.point_in_time_time" + const val SEARCH_POINT_IN_TIME_TOTAL_FIELD = "search.point_in_time_total" + const val PRI_SEARCH_POINT_IN_TIME_TOTAL_FIELD = "pri.search.point_in_time_total" + const val SEGMENTS_COUNT_FIELD = "segments.count" + const val PRI_SEGMENTS_COUNT_FIELD = "pri.segments.count" + const val SEGMENTS_MEMORY_FIELD = "segments.memory" + const val PRI_SEGMENTS_MEMORY_FIELD = "pri.segments.memory" + const val SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "segments.index_writer_memory" + const val PRI_SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "pri.segments.index_writer_memory" + const val SEGMENTS_VERSION_MAP_MEMORY_FIELD = "segments.version_map_memory" + const val PRI_SEGMENTS_VERSION_MAP_MEMORY_FIELD = "pri.segments.version_map_memory" + const val SEGMENTS_FIXED_BITSET_MEMORY_FIELD = "segments.fixed_bitset_memory" + const val PRI_SEGMENTS_FIXED_BITSET_MEMORY_FIELD = "pri.segments.fixed_bitset_memory" + const val WARMER_CURRENT_FIELD = "warmer.current" + const val PRI_WARMER_CURRENT_FIELD = "pri.warmer.current" + const val WARMER_TOTAL_FIELD = "warmer.total" + const val PRI_WARMER_TOTAL_FIELD = "pri.warmer.total" + const val WARMER_TOTAL_TIME_FIELD = "warmer.total_time" + const val PRI_WARMER_TOTAL_TIME_FIELD = "pri.warmer.total_time" + const val SUGGEST_CURRENT_FIELD = "suggest.current" + const val PRI_SUGGEST_CURRENT_FIELD = "pri.suggest.current" + const val SUGGEST_TIME_FIELD = "suggest.time" + const val PRI_SUGGEST_TIME_FIELD = "pri.suggest.time" + const val SUGGEST_TOTAL_FIELD = "suggest.total" + const val PRI_SUGGEST_TOTAL_FIELD = "pri.suggest.total" + const val MEMORY_TOTAL_FIELD = "memory.total" + const val PRI_MEMORY_TOTAL_FIELD = "pri.memory.total" + const val SEARCH_THROTTLED_FIELD = "search.throttled" + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(HEALTH_FIELD, health) + .field(STATUS_FIELD, status) + .field(INDEX_FIELD, index) + .field(UUID_FIELD, uuid) + .field(PRI_FIELD, pri) + .field(REP_FIELD, rep) + .field(DOCS_COUNT_FIELD, docsCount) + .field(DOCS_DELETED_FIELD, docsDeleted) + .field(CREATION_DATE_FIELD, creationDate) + .field(CREATION_DATE_STRING_FIELD, creationDateString) + .field(STORE_SIZE_FIELD, storeSize) + .field(PRI_STORE_SIZE_FIELD, priStoreSize) + .field(COMPLETION_SIZE_FIELD, completionSize) + .field(PRI_COMPLETION_SIZE_FIELD, priCompletionSize) + .field(FIELD_DATA_MEMORY_SIZE_FIELD, fieldDataMemorySize) + .field(PRI_FIELD_DATA_MEMORY_SIZE_FIELD, priFieldDataMemorySize) + .field(FIELD_DATA_EVICTIONS_FIELD, fieldDataEvictions) + .field(PRI_FIELD_DATA_EVICTIONS_FIELD, priFieldDataEvictions) + .field(QUERY_CACHE_MEMORY_SIZE_FIELD, queryCacheMemorySize) + .field(PRI_QUERY_CACHE_MEMORY_SIZE_FIELD, priQueryCacheMemorySize) + .field(QUERY_CACHE_EVICTIONS_FIELD, queryCacheEvictions) + .field(PRI_QUERY_CACHE_EVICTIONS_FIELD, priQueryCacheEvictions) + .field(REQUEST_CACHE_MEMORY_SIZE_FIELD, requestCacheMemorySize) + .field(PRI_REQUEST_CACHE_MEMORY_SIZE_FIELD, priRequestCacheMemorySize) + .field(REQUEST_CACHE_EVICTIONS_FIELD, requestCacheEvictions) + .field(PRI_REQUEST_CACHE_EVICTIONS_FIELD, priRequestCacheEvictions) + .field(REQUEST_CACHE_HIT_COUNT_FIELD, requestCacheHitCount) + .field(PRI_REQUEST_CACHE_HIT_COUNT_FIELD, priRequestCacheHitCount) + .field(REQUEST_CACHE_MISS_COUNT_FIELD, requestCacheMissCount) + .field(PRI_REQUEST_CACHE_MISS_COUNT_FIELD, priRequestCacheMissCount) + .field(FLUSH_TOTAL_FIELD, flushTotal) + .field(PRI_FLUSH_TOTAL_FIELD, priFlushTotal) + .field(FLUSH_TOTAL_TIME_FIELD, flushTotalTime) + .field(PRI_FLUSH_TOTAL_TIME_FIELD, priFlushTotalTime) + .field(GET_CURRENT_FIELD, getCurrent) + .field(PRI_GET_CURRENT_FIELD, priGetCurrent) + .field(GET_TIME_FIELD, getTime) + .field(PRI_GET_TIME_FIELD, priGetTime) + .field(GET_TOTAL_FIELD, getTotal) + .field(PRI_GET_TOTAL_FIELD, priGetTotal) + .field(GET_EXISTS_TIME_FIELD, getExistsTime) + .field(PRI_GET_EXISTS_TIME_FIELD, priGetExistsTime) + .field(GET_EXISTS_TOTAL_FIELD, getExistsTotal) + .field(PRI_GET_EXISTS_TOTAL_FIELD, priGetExistsTotal) + .field(GET_MISSING_TIME_FIELD, getMissingTime) + .field(PRI_GET_MISSING_TIME_FIELD, priGetMissingTime) + .field(GET_MISSING_TOTAL_FIELD, getMissingTotal) + .field(PRI_GET_MISSING_TOTAL_FIELD, priGetMissingTotal) + .field(INDEXING_DELETE_CURRENT_FIELD, indexingDeleteCurrent) + .field(PRI_INDEXING_DELETE_CURRENT_FIELD, priIndexingDeleteCurrent) + .field(INDEXING_DELETE_TIME_FIELD, indexingDeleteTime) + .field(PRI_INDEXING_DELETE_TIME_FIELD, priIndexingDeleteTime) + .field(INDEXING_DELETE_TOTAL_FIELD, indexingDeleteTotal) + .field(PRI_INDEXING_DELETE_TOTAL_FIELD, priIndexingDeleteTotal) + .field(INDEXING_INDEX_CURRENT_FIELD, indexingIndexCurrent) + .field(PRI_INDEXING_INDEX_CURRENT_FIELD, priIndexingIndexCurrent) + .field(INDEXING_INDEX_TIME_FIELD, indexingIndexTime) + .field(PRI_INDEXING_INDEX_TIME_FIELD, priIndexingIndexTime) + .field(INDEXING_INDEX_TOTAL_FIELD, indexingIndexTotal) + .field(PRI_INDEXING_INDEX_TOTAL_FIELD, priIndexingIndexTotal) + .field(INDEXING_INDEX_FAILED_FIELD, indexingIndexFailed) + .field(PRI_INDEXING_INDEX_FAILED_FIELD, priIndexingIndexFailed) + .field(MERGES_CURRENT_FIELD, mergesCurrent) + .field(PRI_MERGES_CURRENT_FIELD, priMergesCurrent) + .field(MERGES_CURRENT_DOCS_FIELD, mergesCurrentDocs) + .field(PRI_MERGES_CURRENT_DOCS_FIELD, priMergesCurrentDocs) + .field(MERGES_CURRENT_SIZE_FIELD, mergesCurrentSize) + .field(PRI_MERGES_CURRENT_SIZE_FIELD, priMergesCurrentSize) + .field(MERGES_TOTAL_FIELD, mergesTotal) + .field(PRI_MERGES_TOTAL_FIELD, priMergesTotal) + .field(MERGES_TOTAL_DOCS_FIELD, mergesTotalDocs) + .field(PRI_MERGES_TOTAL_DOCS_FIELD, priMergesTotalDocs) + .field(MERGES_TOTAL_SIZE_FIELD, mergesTotalSize) + .field(PRI_MERGES_TOTAL_SIZE_FIELD, priMergesTotalSize) + .field(MERGES_TOTAL_TIME_FIELD, mergesTotalTime) + .field(PRI_MERGES_TOTAL_TIME_FIELD, priMergesTotalTime) + .field(REFRESH_TOTAL_FIELD, refreshTotal) + .field(PRI_REFRESH_TOTAL_FIELD, priRefreshTotal) + .field(REFRESH_TIME_FIELD, refreshTime) + .field(PRI_REFRESH_TIME_FIELD, priRefreshTime) + .field(REFRESH_EXTERNAL_TOTAL_FIELD, refreshExternalTotal) + .field(PRI_REFRESH_EXTERNAL_TOTAL_FIELD, priRefreshExternalTotal) + .field(REFRESH_EXTERNAL_TIME_FIELD, refreshExternalTime) + .field(PRI_REFRESH_EXTERNAL_TIME_FIELD, priRefreshExternalTime) + .field(REFRESH_LISTENERS_FIELD, refreshListeners) + .field(PRI_REFRESH_LISTENERS_FIELD, priRefreshListeners) + .field(SEARCH_FETCH_CURRENT_FIELD, searchFetchCurrent) + .field(PRI_SEARCH_FETCH_CURRENT_FIELD, priSearchFetchCurrent) + .field(SEARCH_FETCH_TIME_FIELD, searchFetchTime) + .field(PRI_SEARCH_FETCH_TIME_FIELD, priSearchFetchTime) + .field(SEARCH_FETCH_TOTAL_FIELD, searchFetchTotal) + .field(PRI_SEARCH_FETCH_TOTAL_FIELD, priSearchFetchTotal) + .field(SEARCH_OPEN_CONTEXTS_FIELD, searchOpenContexts) + .field(PRI_SEARCH_OPEN_CONTEXTS_FIELD, priSearchOpenContexts) + .field(SEARCH_QUERY_CURRENT_FIELD, searchQueryCurrent) + .field(PRI_SEARCH_QUERY_CURRENT_FIELD, priSearchQueryCurrent) + .field(SEARCH_QUERY_TIME_FIELD, searchQueryTime) + .field(PRI_SEARCH_QUERY_TIME_FIELD, priSearchQueryTime) + .field(SEARCH_QUERY_TOTAL_FIELD, searchQueryTotal) + .field(PRI_SEARCH_QUERY_TOTAL_FIELD, priSearchQueryTotal) + .field(SEARCH_SCROLL_CURRENT_FIELD, searchScrollCurrent) + .field(PRI_SEARCH_SCROLL_CURRENT_FIELD, priSearchScrollCurrent) + .field(SEARCH_SCROLL_TIME_FIELD, searchScrollTime) + .field(PRI_SEARCH_SCROLL_TIME_FIELD, priSearchScrollTime) + .field(SEARCH_SCROLL_TOTAL_FIELD, searchScrollTotal) + .field(PRI_SEARCH_SCROLL_TOTAL_FIELD, priSearchScrollTotal) + .field(SEARCH_POINT_IN_TIME_CURRENT_FIELD, searchPointInTimeCurrent) + .field(PRI_SEARCH_POINT_IN_TIME_CURRENT_FIELD, priSearchPointInTimeCurrent) + .field(SEARCH_POINT_IN_TIME_TIME_FIELD, searchPointInTimeTime) + .field(PRI_SEARCH_POINT_IN_TIME_TIME_FIELD, priSearchPointInTimeTime) + .field(SEARCH_POINT_IN_TIME_TOTAL_FIELD, searchPointInTimeTotal) + .field(PRI_SEARCH_POINT_IN_TIME_TOTAL_FIELD, priSearchPointInTimeTotal) + .field(SEGMENTS_COUNT_FIELD, segmentsCount) + .field(PRI_SEGMENTS_COUNT_FIELD, priSegmentsCount) + .field(SEGMENTS_MEMORY_FIELD, segmentsMemory) + .field(PRI_SEGMENTS_MEMORY_FIELD, priSegmentsMemory) + .field(SEGMENTS_INDEX_WRITER_MEMORY_FIELD, segmentsIndexWriterMemory) + .field(PRI_SEGMENTS_INDEX_WRITER_MEMORY_FIELD, priSegmentsIndexWriterMemory) + .field(SEGMENTS_VERSION_MAP_MEMORY_FIELD, segmentsVersionMapMemory) + .field(PRI_SEGMENTS_VERSION_MAP_MEMORY_FIELD, priSegmentsVersionMapMemory) + .field(SEGMENTS_FIXED_BITSET_MEMORY_FIELD, segmentsFixedBitsetMemory) + .field(PRI_SEGMENTS_FIXED_BITSET_MEMORY_FIELD, priSegmentsFixedBitsetMemory) + .field(WARMER_CURRENT_FIELD, warmerCurrent) + .field(PRI_WARMER_CURRENT_FIELD, priWarmerCurrent) + .field(WARMER_TOTAL_FIELD, warmerTotal) + .field(PRI_WARMER_TOTAL_FIELD, priWarmerTotal) + .field(WARMER_TOTAL_TIME_FIELD, warmerTotalTime) + .field(PRI_WARMER_TOTAL_TIME_FIELD, priWarmerTotalTime) + .field(SUGGEST_CURRENT_FIELD, suggestCurrent) + .field(PRI_SUGGEST_CURRENT_FIELD, priSuggestCurrent) + .field(SUGGEST_TIME_FIELD, suggestTime) + .field(PRI_SUGGEST_TIME_FIELD, priSuggestTime) + .field(SUGGEST_TOTAL_FIELD, suggestTotal) + .field(PRI_SUGGEST_TOTAL_FIELD, priSuggestTotal) + .field(MEMORY_TOTAL_FIELD, memoryTotal) + .field(PRI_MEMORY_TOTAL_FIELD, priMemoryTotal) + .field(SEARCH_THROTTLED_FIELD, searchThrottled) + return builder.endObject() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(health) + out.writeString(status) + out.writeString(index) + out.writeString(uuid) + out.writeString(pri) + out.writeString(rep) + out.writeString(docsCount) + out.writeString(docsDeleted) + out.writeString(creationDate) + out.writeString(creationDateString) + out.writeString(storeSize) + out.writeString(priStoreSize) + out.writeString(completionSize) + out.writeString(priCompletionSize) + out.writeString(fieldDataMemorySize) + out.writeString(priFieldDataMemorySize) + out.writeString(fieldDataEvictions) + out.writeString(priFieldDataEvictions) + out.writeString(queryCacheMemorySize) + out.writeString(priQueryCacheMemorySize) + out.writeString(queryCacheEvictions) + out.writeString(priQueryCacheEvictions) + out.writeString(requestCacheMemorySize) + out.writeString(priRequestCacheMemorySize) + out.writeString(requestCacheEvictions) + out.writeString(priRequestCacheEvictions) + out.writeString(requestCacheHitCount) + out.writeString(priRequestCacheHitCount) + out.writeString(requestCacheMissCount) + out.writeString(priRequestCacheMissCount) + out.writeString(flushTotal) + out.writeString(priFlushTotal) + out.writeString(flushTotalTime) + out.writeString(priFlushTotalTime) + out.writeString(getCurrent) + out.writeString(priGetCurrent) + out.writeString(getTime) + out.writeString(priGetTime) + out.writeString(getTotal) + out.writeString(priGetTotal) + out.writeString(getExistsTime) + out.writeString(priGetExistsTime) + out.writeString(getExistsTotal) + out.writeString(priGetExistsTotal) + out.writeString(getMissingTime) + out.writeString(priGetMissingTime) + out.writeString(getMissingTotal) + out.writeString(priGetMissingTotal) + out.writeString(indexingDeleteCurrent) + out.writeString(priIndexingDeleteCurrent) + out.writeString(indexingDeleteTime) + out.writeString(priIndexingDeleteTime) + out.writeString(indexingDeleteTotal) + out.writeString(priIndexingDeleteTotal) + out.writeString(indexingIndexCurrent) + out.writeString(priIndexingIndexCurrent) + out.writeString(indexingIndexTime) + out.writeString(priIndexingIndexTime) + out.writeString(indexingIndexTotal) + out.writeString(priIndexingIndexTotal) + out.writeString(indexingIndexFailed) + out.writeString(priIndexingIndexFailed) + out.writeString(mergesCurrent) + out.writeString(priMergesCurrent) + out.writeString(mergesCurrentDocs) + out.writeString(priMergesCurrentDocs) + out.writeString(mergesCurrentSize) + out.writeString(priMergesCurrentSize) + out.writeString(mergesTotal) + out.writeString(priMergesTotal) + out.writeString(mergesTotalDocs) + out.writeString(priMergesTotalDocs) + out.writeString(mergesTotalSize) + out.writeString(priMergesTotalSize) + out.writeString(mergesTotalTime) + out.writeString(priMergesTotalTime) + out.writeString(refreshTotal) + out.writeString(priRefreshTotal) + out.writeString(refreshTime) + out.writeString(priRefreshTime) + out.writeString(refreshExternalTotal) + out.writeString(priRefreshExternalTotal) + out.writeString(refreshExternalTime) + out.writeString(priRefreshExternalTime) + out.writeString(refreshListeners) + out.writeString(priRefreshListeners) + out.writeString(searchFetchCurrent) + out.writeString(priSearchFetchCurrent) + out.writeString(searchFetchTime) + out.writeString(priSearchFetchTime) + out.writeString(searchFetchTotal) + out.writeString(priSearchFetchTotal) + out.writeString(searchOpenContexts) + out.writeString(priSearchOpenContexts) + out.writeString(searchQueryCurrent) + out.writeString(priSearchQueryCurrent) + out.writeString(searchQueryTime) + out.writeString(priSearchQueryTime) + out.writeString(searchQueryTotal) + out.writeString(priSearchQueryTotal) + out.writeString(searchScrollCurrent) + out.writeString(priSearchScrollCurrent) + out.writeString(searchScrollTime) + out.writeString(priSearchScrollTime) + out.writeString(searchScrollTotal) + out.writeString(priSearchScrollTotal) + out.writeString(searchPointInTimeCurrent) + out.writeString(priSearchPointInTimeCurrent) + out.writeString(searchPointInTimeTime) + out.writeString(priSearchPointInTimeTime) + out.writeString(searchPointInTimeTotal) + out.writeString(priSearchPointInTimeTotal) + out.writeString(segmentsCount) + out.writeString(priSegmentsCount) + out.writeString(segmentsMemory) + out.writeString(priSegmentsMemory) + out.writeString(segmentsIndexWriterMemory) + out.writeString(priSegmentsIndexWriterMemory) + out.writeString(segmentsVersionMapMemory) + out.writeString(priSegmentsVersionMapMemory) + out.writeString(segmentsFixedBitsetMemory) + out.writeString(priSegmentsFixedBitsetMemory) + out.writeString(warmerCurrent) + out.writeString(priWarmerCurrent) + out.writeString(warmerTotal) + out.writeString(priWarmerTotal) + out.writeString(warmerTotalTime) + out.writeString(priWarmerTotalTime) + out.writeString(suggestCurrent) + out.writeString(priSuggestCurrent) + out.writeString(suggestTime) + out.writeString(priSuggestTime) + out.writeString(suggestTotal) + out.writeString(priSuggestTotal) + out.writeString(memoryTotal) + out.writeString(priMemoryTotal) + out.writeString(searchThrottled) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt new file mode 100644 index 000000000..12152e69d --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt @@ -0,0 +1,495 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.action.ValidateActions +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.state.ClusterStateResponse +import org.opensearch.action.admin.indices.stats.CommonStats +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.action.admin.indices.stats.ShardStats +import org.opensearch.alerting.util.IndexUtils.Companion.VALID_INDEX_NAME_REGEX +import org.opensearch.cluster.routing.UnassignedInfo +import org.opensearch.common.unit.TimeValue +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.cache.query.QueryCacheStats +import org.opensearch.index.engine.CommitStats +import org.opensearch.index.engine.Engine +import org.opensearch.index.engine.SegmentsStats +import org.opensearch.index.fielddata.FieldDataStats +import org.opensearch.index.flush.FlushStats +import org.opensearch.index.get.GetStats +import org.opensearch.index.merge.MergeStats +import org.opensearch.index.refresh.RefreshStats +import org.opensearch.index.search.stats.SearchStats +import org.opensearch.index.seqno.SeqNoStats +import org.opensearch.index.shard.DocsStats +import org.opensearch.index.store.StoreStats +import org.opensearch.search.suggest.completion.CompletionStats +import java.time.Instant +import java.util.Locale +import java.util.function.Function + +class CatShardsRequestWrapper(val pathParams: String = "") : ActionRequest() { + var clusterStateRequest: ClusterStateRequest = + ClusterStateRequest().clear().nodes(true).routingTable(true) + var indicesStatsRequest: IndicesStatsRequest = + IndicesStatsRequest().all() + var indicesList = arrayOf() + + init { + if (pathParams.isNotBlank()) { + indicesList = pathParams.split(",").toTypedArray() + + require(validate() == null) { + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases." + } + + clusterStateRequest = clusterStateRequest.indices(*indicesList) + indicesStatsRequest = indicesStatsRequest.indices(*indicesList) + } + } + + override fun validate(): ActionRequestValidationException? { + var exception: ActionRequestValidationException? = null + if (pathParams.isNotBlank() && indicesList.any { !VALID_INDEX_NAME_REGEX.containsMatchIn(it) }) + exception = ValidateActions.addValidationError( + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases.", + exception + ) + return exception + } +} + +class CatShardsResponseWrapper( + stateResp: ClusterStateResponse, + indicesResp: IndicesStatsResponse +) : ActionResponse(), ToXContentObject { + var shardInfoList: List = listOf() + + init { + shardInfoList = compileShardInfo(stateResp, indicesResp) + } + + companion object { + const val WRAPPER_FIELD = "shards" + } + + override fun writeTo(out: StreamOutput) { + out.writeList(shardInfoList) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + builder.startArray(WRAPPER_FIELD) + shardInfoList.forEach { it.toXContent(builder, params) } + builder.endArray() + return builder.endObject() + } + + private fun getOrNull(stats: S?, accessor: Function, func: Function): Any? { + if (stats != null) { + val t: T? = accessor.apply(stats) + if (t != null) { + return func.apply(t) + } + } + return null + } + + private fun compileShardInfo( + stateResp: ClusterStateResponse, + indicesResp: IndicesStatsResponse + ): List { + val list = mutableListOf() + + for (shard in stateResp.state.routingTable.allShards()) { + val shardStats = indicesResp.asMap()[shard] + var commonStats: CommonStats? = null + var commitStats: CommitStats? = null + if (shardStats != null) { + commonStats = shardStats.stats + commitStats = shardStats.commitStats + } + + var shardInfo = ShardInfo( + index = shard.indexName, + shard = "${shard.id}", + primaryOrReplica = if (shard.primary()) "p" else "r", + state = shard.state().name, + docs = getOrNull(commonStats, CommonStats::getDocs, DocsStats::getCount)?.toString(), + store = getOrNull(commonStats, CommonStats::getStore, StoreStats::getSize)?.toString(), + id = null, // Added below + node = null, // Added below + completionSize = getOrNull(commonStats, CommonStats::getCompletion, CompletionStats::getSize)?.toString(), + fieldDataMemory = getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getMemorySize)?.toString(), + fieldDataEvictions = getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getEvictions)?.toString(), + flushTotal = getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotal)?.toString(), + flushTotalTime = getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotalTime)?.toString(), + getCurrent = getOrNull(commonStats, CommonStats::getGet, GetStats::current)?.toString(), + getTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getTime)?.toString(), + getTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getCount)?.toString(), + getExistsTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsTime)?.toString(), + getExistsTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsCount)?.toString(), + getMissingTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingTime)?.toString(), + getMissingTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingCount)?.toString(), + indexingDeleteCurrent = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteCurrent })?.toString(), + indexingDeleteTime = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteTime })?.toString(), + indexingDeleteTotal = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteCount })?.toString(), + indexingIndexCurrent = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexCurrent })?.toString(), + indexingIndexTime = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexTime })?.toString(), + indexingIndexTotal = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexCount })?.toString(), + indexingIndexFailed = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexFailedCount })?.toString(), + mergesCurrent = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrent)?.toString(), + mergesCurrentDocs = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentNumDocs)?.toString(), + mergesCurrentSize = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentSize)?.toString(), + mergesTotal = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotal)?.toString(), + mergesTotalDocs = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalNumDocs)?.toString(), + mergesTotalSize = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalSize)?.toString(), + mergesTotalTime = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalTime)?.toString(), + queryCacheMemory = getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getMemorySize)?.toString(), + queryCacheEvictions = getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getEvictions)?.toString(), + recoverySourceType = null, // Added below + refreshTotal = getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotal)?.toString(), + refreshTime = getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotalTime)?.toString(), + searchFetchCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchCurrent })?.toString(), + searchFetchTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchTime })?.toString(), + searchFetchTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchCount })?.toString(), + searchOpenContexts = getOrNull(commonStats, CommonStats::getSearch, SearchStats::getOpenContexts)?.toString(), + searchQueryCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryCurrent })?.toString(), + searchQueryTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryTime })?.toString(), + searchQueryTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryCount })?.toString(), + searchScrollCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollCurrent })?.toString(), + searchScrollTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollTime })?.toString(), + searchScrollTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollCount })?.toString(), + segmentsCount = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)?.toString(), + segmentsMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)?.toString(), + segmentsIndexWriterMemory = + getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getIndexWriterMemory)?.toString(), + segmentsVersionMapMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getVersionMapMemory)?.toString(), + fixedBitsetMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getBitsetMemory)?.toString(), + globalCheckpoint = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getGlobalCheckpoint)?.toString(), + localCheckpoint = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getLocalCheckpoint)?.toString(), + maxSeqNo = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getMaxSeqNo)?.toString(), + syncId = commitStats?.userData?.get(Engine.SYNC_COMMIT_ID), + unassignedAt = null, // Added below + unassignedDetails = null, // Added below + unassignedFor = null, // Added below + unassignedReason = null // Added below + ) + + if (shard.assignedToNode()) { + val id = shard.currentNodeId() + val node = StringBuilder() + node.append(stateResp.state.nodes().get(id).name) + + if (shard.relocating()) { + val reloNodeId = shard.relocatingNodeId() + val reloName = stateResp.state.nodes().get(reloNodeId).name + node.append(" -> ") + node.append(reloNodeId) + node.append(" ") + node.append(reloName) + } + + shardInfo = shardInfo.copy( + id = id, + node = node.toString() + ) + } + + if (shard.unassignedInfo() != null) { + val unassignedTime = Instant.ofEpochMilli(shard.unassignedInfo().unassignedTimeInMillis) + shardInfo = shardInfo.copy( + unassignedReason = shard.unassignedInfo().reason.name, + unassignedAt = UnassignedInfo.DATE_TIME_FORMATTER.format(unassignedTime), + unassignedFor = + TimeValue.timeValueMillis(System.currentTimeMillis() - shard.unassignedInfo().unassignedTimeInMillis).stringRep, + unassignedDetails = shard.unassignedInfo().details + ) + } + + if (shard.recoverySource() != null) { + shardInfo = shardInfo.copy( + recoverySourceType = shard.recoverySource().type.toString().lowercase(Locale.ROOT) + ) + } + + list.add(shardInfo) + } + return list + } + + data class ShardInfo( + val index: String?, + val shard: String?, + val primaryOrReplica: String?, + val state: String?, + val docs: String?, + val store: String?, + val id: String?, + val node: String?, + val completionSize: String?, + val fieldDataMemory: String?, + val fieldDataEvictions: String?, + val flushTotal: String?, + val flushTotalTime: String?, + val getCurrent: String?, + val getTime: String?, + val getTotal: String?, + val getExistsTime: String?, + val getExistsTotal: String?, + val getMissingTime: String?, + val getMissingTotal: String?, + val indexingDeleteCurrent: String?, + val indexingDeleteTime: String?, + val indexingDeleteTotal: String?, + val indexingIndexCurrent: String?, + val indexingIndexTime: String?, + val indexingIndexTotal: String?, + val indexingIndexFailed: String?, + val mergesCurrent: String?, + val mergesCurrentDocs: String?, + val mergesCurrentSize: String?, + val mergesTotal: String?, + val mergesTotalDocs: String?, + val mergesTotalSize: String?, + val mergesTotalTime: String?, + val queryCacheMemory: String?, + val queryCacheEvictions: String?, + val recoverySourceType: String?, + val refreshTotal: String?, + val refreshTime: String?, + val searchFetchCurrent: String?, + val searchFetchTime: String?, + val searchFetchTotal: String?, + val searchOpenContexts: String?, + val searchQueryCurrent: String?, + val searchQueryTime: String?, + val searchQueryTotal: String?, + val searchScrollCurrent: String?, + val searchScrollTime: String?, + val searchScrollTotal: String?, + val segmentsCount: String?, + val segmentsMemory: String?, + val segmentsIndexWriterMemory: String?, + val segmentsVersionMapMemory: String?, + val fixedBitsetMemory: String?, + val globalCheckpoint: String?, + val localCheckpoint: String?, + val maxSeqNo: String?, + val syncId: String?, + val unassignedAt: String?, + val unassignedDetails: String?, + val unassignedFor: String?, + val unassignedReason: String? + ) : ToXContentObject, Writeable { + companion object { + const val INDEX_FIELD = "index" + const val SHARD_FIELD = "shard" + const val PRIMARY_OR_REPLICA_FIELD = "primaryOrReplica" + const val STATE_FIELD = "state" + const val DOCS_FIELD = "docs" + const val STORE_FIELD = "store" + const val ID_FIELD = "id" + const val NODE_FIELD = "node" + const val COMPLETION_SIZE_FIELD = "completionSize" + const val FIELD_DATA_MEMORY_FIELD = "fielddataMemory" + const val FIELD_DATA_EVICTIONS_FIELD = "fielddataEvictions" + const val FLUSH_TOTAL_FIELD = "flushTotal" + const val FLUSH_TOTAL_TIME_FIELD = "flushTotalTime" + const val GET_CURRENT_FIELD = "getCurrent" + const val GET_TIME_FIELD = "getTime" + const val GET_TOTAL_FIELD = "getTotal" + const val GET_EXISTS_TIME_FIELD = "getExistsTime" + const val GET_EXISTS_TOTAL_FIELD = "getExistsTotal" + const val GET_MISSING_TIME_FIELD = "getMissingTime" + const val GET_MISSING_TOTAL_FIELD = "getMissingTotal" + const val INDEXING_DELETE_CURRENT_FIELD = "indexingDeleteCurrent" + const val INDEXING_DELETE_TIME_FIELD = "indexingDeleteTime" + const val INDEXING_DELETE_TOTAL_FIELD = "indexingDeleteTotal" + const val INDEXING_INDEX_CURRENT_FIELD = "indexingIndexCurrent" + const val INDEXING_INDEX_TIME_FIELD = "indexingIndexTime" + const val INDEXING_INDEX_TOTAL_FIELD = "indexingIndexTotal" + const val INDEXING_INDEX_FAILED_FIELD = "indexingIndexFailed" + const val MERGES_CURRENT_FIELD = "mergesCurrent" + const val MERGES_CURRENT_DOCS_FIELD = "mergesCurrentDocs" + const val MERGES_CURRENT_SIZE_FIELD = "mergesCurrentSize" + const val MERGES_TOTAL_FIELD = "mergesTotal" + const val MERGES_TOTAL_DOCS_FIELD = "mergesTotalDocs" + const val MERGES_TOTAL_SIZE_FIELD = "mergesTotalSize" + const val MERGES_TOTAL_TIME_FIELD = "mergesTotalTime" + const val QUERY_CACHE_MEMORY_FIELD = "queryCacheMemory" + const val QUERY_CACHE_EVICTIONS_FIELD = "queryCacheEvictions" + const val RECOVERY_SOURCE_TYPE_FIELD = "recoverysource.type" + const val REFRESH_TOTAL_FIELD = "refreshTotal" + const val REFRESH_TIME_FIELD = "refreshTime" + const val SEARCH_FETCH_CURRENT_FIELD = "searchFetchCurrent" + const val SEARCH_FETCH_TIME_FIELD = "searchFetchTime" + const val SEARCH_FETCH_TOTAL_FIELD = "searchFetchTotal" + const val SEARCH_OPEN_CONTEXTS_FIELD = "searchOpenContexts" + const val SEARCH_QUERY_CURRENT_FIELD = "searchQueryCurrent" + const val SEARCH_QUERY_TIME_FIELD = "searchQueryTime" + const val SEARCH_QUERY_TOTAL_FIELD = "searchQueryTotal" + const val SEARCH_SCROLL_CURRENT_FIELD = "searchScrollCurrent" + const val SEARCH_SCROLL_TIME_FIELD = "searchScrollTime" + const val SEARCH_SCROLL_TOTAL_FIELD = "searchScrollTotal" + const val SEGMENTS_COUNT_FIELD = "segmentsCount" + const val SEGMENTS_MEMORY_FIELD = "segmentsMemory" + const val SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "segmentsIndexWriterMemory" + const val SEGMENTS_VERSION_MAP_MEMORY_FIELD = "segmentsVersionMapMemory" + const val FIXED_BITSET_MEMORY_FIELD = "fixedBitsetMemory" + const val GLOBAL_CHECKPOINT_FIELD = "globalCheckpoint" + const val LOCAL_CHECKPOINT_FIELD = "localCheckpoint" + const val MAX_SEQ_NO_FIELD = "maxSeqNo" + const val SYNC_ID_FIELD = "sync_id" + const val UNASSIGNED_AT_FIELD = "unassigned.at" + const val UNASSIGNED_DETAILS_FIELD = "unassigned.details" + const val UNASSIGNED_FOR_FIELD = "unassigned.for" + const val UNASSIGNED_REASON_FIELD = "unassigned.reason" + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(INDEX_FIELD, index) + .field(SHARD_FIELD, shard) + .field(PRIMARY_OR_REPLICA_FIELD, primaryOrReplica) + .field(STATE_FIELD, state) + .field(DOCS_FIELD, docs) + .field(STORE_FIELD, store) + .field(ID_FIELD, id) + .field(NODE_FIELD, node) + .field(COMPLETION_SIZE_FIELD, completionSize) + .field(FIELD_DATA_MEMORY_FIELD, fieldDataMemory) + .field(FIELD_DATA_EVICTIONS_FIELD, fieldDataEvictions) + .field(FLUSH_TOTAL_FIELD, flushTotal) + .field(FLUSH_TOTAL_TIME_FIELD, flushTotalTime) + .field(GET_CURRENT_FIELD, getCurrent) + .field(GET_TIME_FIELD, getTime) + .field(GET_TOTAL_FIELD, getTotal) + .field(GET_EXISTS_TIME_FIELD, getExistsTime) + .field(GET_EXISTS_TOTAL_FIELD, getExistsTotal) + .field(GET_MISSING_TIME_FIELD, getMissingTime) + .field(GET_MISSING_TOTAL_FIELD, getMissingTotal) + .field(INDEXING_DELETE_CURRENT_FIELD, indexingDeleteCurrent) + .field(INDEXING_DELETE_TIME_FIELD, indexingDeleteTime) + .field(INDEXING_DELETE_TOTAL_FIELD, indexingDeleteTotal) + .field(INDEXING_INDEX_CURRENT_FIELD, indexingIndexCurrent) + .field(INDEXING_INDEX_TIME_FIELD, indexingIndexTime) + .field(INDEXING_INDEX_TOTAL_FIELD, indexingIndexTotal) + .field(INDEXING_INDEX_FAILED_FIELD, indexingIndexFailed) + .field(MERGES_CURRENT_FIELD, mergesCurrent) + .field(MERGES_CURRENT_DOCS_FIELD, mergesCurrentDocs) + .field(MERGES_CURRENT_SIZE_FIELD, mergesCurrentSize) + .field(MERGES_TOTAL_FIELD, mergesTotal) + .field(MERGES_TOTAL_DOCS_FIELD, mergesTotalDocs) + .field(MERGES_TOTAL_SIZE_FIELD, mergesTotalSize) + .field(MERGES_TOTAL_TIME_FIELD, mergesTotalTime) + .field(QUERY_CACHE_MEMORY_FIELD, queryCacheMemory) + .field(QUERY_CACHE_EVICTIONS_FIELD, queryCacheEvictions) + .field(RECOVERY_SOURCE_TYPE_FIELD, recoverySourceType) + .field(REFRESH_TOTAL_FIELD, refreshTotal) + .field(REFRESH_TIME_FIELD, refreshTime) + .field(SEARCH_FETCH_CURRENT_FIELD, searchFetchCurrent) + .field(SEARCH_FETCH_TIME_FIELD, searchFetchTime) + .field(SEARCH_FETCH_TOTAL_FIELD, searchFetchTotal) + .field(SEARCH_OPEN_CONTEXTS_FIELD, searchOpenContexts) + .field(SEARCH_QUERY_CURRENT_FIELD, searchQueryCurrent) + .field(SEARCH_QUERY_TIME_FIELD, searchQueryTime) + .field(SEARCH_QUERY_TOTAL_FIELD, searchQueryTotal) + .field(SEARCH_SCROLL_CURRENT_FIELD, searchScrollCurrent) + .field(SEARCH_SCROLL_TIME_FIELD, searchScrollTime) + .field(SEARCH_SCROLL_TOTAL_FIELD, searchScrollTotal) + .field(SEGMENTS_COUNT_FIELD, segmentsCount) + .field(SEGMENTS_MEMORY_FIELD, segmentsMemory) + .field(SEGMENTS_INDEX_WRITER_MEMORY_FIELD, segmentsIndexWriterMemory) + .field(SEGMENTS_VERSION_MAP_MEMORY_FIELD, segmentsVersionMapMemory) + .field(FIXED_BITSET_MEMORY_FIELD, fixedBitsetMemory) + .field(GLOBAL_CHECKPOINT_FIELD, globalCheckpoint) + .field(LOCAL_CHECKPOINT_FIELD, localCheckpoint) + .field(MAX_SEQ_NO_FIELD, maxSeqNo) + .field(SYNC_ID_FIELD, syncId) + .field(UNASSIGNED_AT_FIELD, unassignedAt) + .field(UNASSIGNED_DETAILS_FIELD, unassignedDetails) + .field(UNASSIGNED_FOR_FIELD, unassignedFor) + .field(UNASSIGNED_REASON_FIELD, unassignedReason) + return builder.endObject() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(index) + out.writeString(shard) + out.writeString(primaryOrReplica) + out.writeString(state) + out.writeString(docs) + out.writeString(store) + out.writeString(id) + out.writeString(node) + out.writeString(completionSize) + out.writeString(fieldDataMemory) + out.writeString(fieldDataEvictions) + out.writeString(flushTotal) + out.writeString(flushTotalTime) + out.writeString(getCurrent) + out.writeString(getTime) + out.writeString(getTotal) + out.writeString(getExistsTime) + out.writeString(getExistsTotal) + out.writeString(getMissingTime) + out.writeString(getMissingTotal) + out.writeString(indexingDeleteCurrent) + out.writeString(indexingDeleteTime) + out.writeString(indexingDeleteTotal) + out.writeString(indexingIndexCurrent) + out.writeString(indexingIndexTime) + out.writeString(indexingIndexTotal) + out.writeString(indexingIndexFailed) + out.writeString(mergesCurrent) + out.writeString(mergesCurrentDocs) + out.writeString(mergesCurrentSize) + out.writeString(mergesTotal) + out.writeString(mergesTotalDocs) + out.writeString(mergesTotalSize) + out.writeString(mergesTotalTime) + out.writeString(queryCacheMemory) + out.writeString(queryCacheEvictions) + out.writeString(recoverySourceType) + out.writeString(refreshTotal) + out.writeString(refreshTime) + out.writeString(searchFetchCurrent) + out.writeString(searchFetchTime) + out.writeString(searchFetchTotal) + out.writeString(searchOpenContexts) + out.writeString(searchQueryCurrent) + out.writeString(searchQueryTime) + out.writeString(searchQueryTotal) + out.writeString(searchScrollCurrent) + out.writeString(searchScrollTime) + out.writeString(searchScrollTotal) + out.writeString(segmentsCount) + out.writeString(segmentsMemory) + out.writeString(segmentsIndexWriterMemory) + out.writeString(segmentsVersionMapMemory) + out.writeString(fixedBitsetMemory) + out.writeString(globalCheckpoint) + out.writeString(localCheckpoint) + out.writeString(maxSeqNo) + out.writeString(syncId) + out.writeString(unassignedAt) + out.writeString(unassignedDetails) + out.writeString(unassignedFor) + out.writeString(unassignedReason) + } + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/SupportedClusterMetricsSettingsExtensions.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt similarity index 53% rename from alerting/src/main/kotlin/org/opensearch/alerting/util/SupportedClusterMetricsSettingsExtensions.kt rename to alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt index 2e3027991..a1c86d2f5 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/SupportedClusterMetricsSettingsExtensions.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt @@ -3,9 +3,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -package org.opensearch.alerting.util +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers -import org.opensearch.action.ActionResponse import org.opensearch.action.admin.cluster.health.ClusterHealthRequest import org.opensearch.action.admin.cluster.health.ClusterHealthResponse import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest @@ -16,20 +15,25 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.state.ClusterStateResponse import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse import org.opensearch.action.admin.indices.recovery.RecoveryRequest import org.opensearch.action.admin.indices.recovery.RecoveryResponse -import org.opensearch.alerting.core.model.ClusterMetricsInput -import org.opensearch.alerting.core.model.ClusterMetricsInput.ClusterMetricType +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse import org.opensearch.alerting.opensearchapi.convertToMap +import org.opensearch.alerting.opensearchapi.suspendUntil import org.opensearch.alerting.settings.SupportedClusterMetricsSettings import org.opensearch.alerting.settings.SupportedClusterMetricsSettings.Companion.resolveToActionRequest import org.opensearch.client.Client +import org.opensearch.cluster.metadata.Metadata import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.support.XContentMapValues +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.core.action.ActionResponse /** * Calls the appropriate transport action for the API requested in the [clusterMetricsInput]. @@ -37,20 +41,50 @@ import org.opensearch.common.xcontent.support.XContentMapValues * @param client The [Client] used to call the respective transport action. * @throws IllegalArgumentException When the requested API is not supported by this feature. */ -fun executeTransportAction(clusterMetricsInput: ClusterMetricsInput, client: Client): ActionResponse { +suspend fun executeTransportAction(clusterMetricsInput: ClusterMetricsInput, client: Client): ActionResponse { val request = resolveToActionRequest(clusterMetricsInput) return when (clusterMetricsInput.clusterMetricType) { - ClusterMetricType.CAT_PENDING_TASKS -> client.admin().cluster().pendingClusterTasks(request as PendingClusterTasksRequest).get() - ClusterMetricType.CAT_RECOVERY -> client.admin().indices().recoveries(request as RecoveryRequest).get() - ClusterMetricType.CAT_SNAPSHOTS -> client.admin().cluster().getSnapshots(request as GetSnapshotsRequest).get() - ClusterMetricType.CAT_TASKS -> client.admin().cluster().listTasks(request as ListTasksRequest).get() - ClusterMetricType.CLUSTER_HEALTH -> client.admin().cluster().health(request as ClusterHealthRequest).get() - ClusterMetricType.CLUSTER_SETTINGS -> { - val metadata = client.admin().cluster().state(request as ClusterStateRequest).get().state.metadata + ClusterMetricsInput.ClusterMetricType.CAT_INDICES -> { + request as CatIndicesRequestWrapper + val healthResponse: ClusterHealthResponse = client.suspendUntil { admin().cluster().health(request.clusterHealthRequest, it) } + val indexSettingsResponse: GetSettingsResponse = + client.suspendUntil { admin().indices().getSettings(request.indexSettingsRequest, it) } + val indicesResponse: IndicesStatsResponse = + client.suspendUntil { admin().indices().stats(request.indicesStatsRequest, it) } + val stateResponse: ClusterStateResponse = + client.suspendUntil { admin().cluster().state(request.clusterStateRequest, it) } + return CatIndicesResponseWrapper(healthResponse, stateResponse, indexSettingsResponse, indicesResponse) + } + ClusterMetricsInput.ClusterMetricType.CAT_PENDING_TASKS -> + client.suspendUntil { + admin().cluster().pendingClusterTasks(request as PendingClusterTasksRequest, it) + } + ClusterMetricsInput.ClusterMetricType.CAT_RECOVERY -> + client.suspendUntil { admin().indices().recoveries(request as RecoveryRequest, it) } + ClusterMetricsInput.ClusterMetricType.CAT_SHARDS -> { + request as CatShardsRequestWrapper + val stateResponse: ClusterStateResponse = + client.suspendUntil { admin().cluster().state(request.clusterStateRequest, it) } + val indicesResponse: IndicesStatsResponse = + client.suspendUntil { admin().indices().stats(request.indicesStatsRequest, it) } + return CatShardsResponseWrapper(stateResponse, indicesResponse) + } + ClusterMetricsInput.ClusterMetricType.CAT_SNAPSHOTS -> + client.suspendUntil { admin().cluster().getSnapshots(request as GetSnapshotsRequest, it) } + ClusterMetricsInput.ClusterMetricType.CAT_TASKS -> + client.suspendUntil { admin().cluster().listTasks(request as ListTasksRequest, it) } + ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH -> + client.suspendUntil { admin().cluster().health(request as ClusterHealthRequest, it) } + ClusterMetricsInput.ClusterMetricType.CLUSTER_SETTINGS -> { + val stateResponse: ClusterStateResponse = + client.suspendUntil { admin().cluster().state(request as ClusterStateRequest, it) } + val metadata: Metadata = stateResponse.state.metadata return ClusterGetSettingsResponse(metadata.persistentSettings(), metadata.transientSettings(), Settings.EMPTY) } - ClusterMetricType.CLUSTER_STATS -> client.admin().cluster().clusterStats(request as ClusterStatsRequest).get() - ClusterMetricType.NODES_STATS -> client.admin().cluster().nodesStats(request as NodesStatsRequest).get() + ClusterMetricsInput.ClusterMetricType.CLUSTER_STATS -> + client.suspendUntil { admin().cluster().clusterStats(request as ClusterStatsRequest, it) } + ClusterMetricsInput.ClusterMetricType.NODES_STATS -> + client.suspendUntil { admin().cluster().nodesStats(request as NodesStatsRequest, it) } else -> throw IllegalArgumentException("Unsupported API request type: ${request.javaClass.name}") } } @@ -64,35 +98,43 @@ fun ActionResponse.toMap(): Map { return when (this) { is ClusterHealthResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.CLUSTER_HEALTH.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH.defaultPath) ) is ClusterStatsResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.CLUSTER_STATS.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_STATS.defaultPath) ) is ClusterGetSettingsResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.CLUSTER_SETTINGS.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_SETTINGS.defaultPath) + ) + is CatIndicesResponseWrapper -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_INDICES.defaultPath) + ) + is CatShardsResponseWrapper -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_SHARDS.defaultPath) ) is NodesStatsResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.NODES_STATS.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.NODES_STATS.defaultPath) ) is PendingClusterTasksResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.CAT_PENDING_TASKS.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_PENDING_TASKS.defaultPath) ) is RecoveryResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.CAT_RECOVERY.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_RECOVERY.defaultPath) ) is GetSnapshotsResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.CAT_SNAPSHOTS.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_SNAPSHOTS.defaultPath) ) is ListTasksResponse -> redactFieldsFromResponse( this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricType.CAT_TASKS.defaultPath) + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_TASKS.defaultPath) ) else -> throw IllegalArgumentException("Unsupported ActionResponse type: ${this.javaClass.name}") } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt index e4c50ef48..d6e8c6ec0 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt @@ -5,12 +5,11 @@ package org.opensearch.alerting.util.destinationmigration -import org.apache.http.client.utils.URIBuilder +import org.apache.hc.core5.net.URIBuilder import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.model.destination.email.Recipient import org.opensearch.alerting.util.DestinationType -import org.opensearch.common.Strings import org.opensearch.commons.notifications.model.Chime import org.opensearch.commons.notifications.model.ConfigType import org.opensearch.commons.notifications.model.Email @@ -22,6 +21,7 @@ import org.opensearch.commons.notifications.model.NotificationConfig import org.opensearch.commons.notifications.model.Slack import org.opensearch.commons.notifications.model.SmtpAccount import org.opensearch.commons.notifications.model.Webhook +import org.opensearch.core.common.Strings import java.net.URI import java.net.URISyntaxException import java.util.Locale diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt index 5cb37c519..fbb3b1527 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt @@ -16,7 +16,7 @@ import org.opensearch.client.node.NodeClient import org.opensearch.cluster.ClusterChangedEvent import org.opensearch.cluster.ClusterStateListener import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.component.LifecycleListener +import org.opensearch.common.lifecycle.LifecycleListener import org.opensearch.common.unit.TimeValue import org.opensearch.threadpool.Scheduler import org.opensearch.threadpool.ThreadPool @@ -45,14 +45,13 @@ class DestinationMigrationCoordinator( } override fun clusterChanged(event: ClusterChangedEvent) { - logger.info("Detected cluster change event for destination migration") if (DestinationMigrationUtilService.finishFlag) { logger.info("Reset destination migration process.") scheduledMigration?.cancel() DestinationMigrationUtilService.finishFlag = false } if ( - event.localNodeMaster() && + event.localNodeClusterManager() && !runningLock && (scheduledMigration == null || scheduledMigration!!.isCancelled) ) { @@ -62,7 +61,8 @@ class DestinationMigrationCoordinator( } finally { runningLock = false } - } else if (!event.localNodeMaster()) { + } else if (!event.localNodeClusterManager()) { + logger.info("Cancelling the migration process.") scheduledMigration?.cancel() } } diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt index 898561142..508118dc0 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt @@ -12,7 +12,6 @@ import org.opensearch.action.delete.DeleteRequest import org.opensearch.action.search.SearchRequest import org.opensearch.action.search.SearchResponse import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.model.destination.email.EmailGroup @@ -21,20 +20,21 @@ import org.opensearch.alerting.util.destinationmigration.DestinationConversionUt import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertEmailAccountToNotificationConfig import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertEmailGroupToNotificationConfig import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils.Companion.createNotificationConfig +import org.opensearch.alerting.util.use import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.alerting.model.ScheduledJob import org.opensearch.commons.notifications.action.CreateNotificationConfigRequest import org.opensearch.commons.notifications.model.NotificationConfig import org.opensearch.commons.notifications.model.NotificationConfigInfo +import org.opensearch.core.common.Strings +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.search.fetch.subphase.FetchSourceContext import java.time.Instant @@ -174,7 +174,7 @@ class DestinationMigrationUtilService { hasMoreResults = false } for (hit in response.hits) { - val xcp = XContentFactory.xContent(XContentType.JSON) + val xcp = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) var notificationConfig: NotificationConfig? = null var userStr = "" diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt index cd44a7887..4278907d3 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt @@ -12,9 +12,9 @@ import org.opensearch.action.bulk.BackoffPolicy import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.opensearchapi.retryForNotification import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.use import org.opensearch.client.Client import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings import org.opensearch.common.unit.TimeValue import org.opensearch.commons.ConfigConstants import org.opensearch.commons.destination.message.LegacyBaseMessage @@ -27,11 +27,10 @@ import org.opensearch.commons.notifications.action.LegacyPublishNotificationRequ import org.opensearch.commons.notifications.action.LegacyPublishNotificationResponse import org.opensearch.commons.notifications.action.SendNotificationResponse import org.opensearch.commons.notifications.model.ChannelMessage -import org.opensearch.commons.notifications.model.ConfigType import org.opensearch.commons.notifications.model.EventSource import org.opensearch.commons.notifications.model.NotificationConfigInfo import org.opensearch.commons.notifications.model.SeverityType -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus class NotificationApiUtils { @@ -138,33 +137,11 @@ suspend fun NotificationConfigInfo.sendNotification(client: Client, title: Strin } /** - * For most channel types, a placeholder Alerting title will be used but the email channel will - * use the subject, so it appears as the actual subject of the email. + * A placeholder Alerting title will be used if no subject is passed in. */ fun NotificationConfigInfo.getTitle(subject: String?): String { val defaultTitle = "Alerting-Notification Action" - if (this.notificationConfig.configType == ConfigType.EMAIL) { - return if (subject.isNullOrEmpty()) defaultTitle else subject - } - - return defaultTitle -} - -fun NotificationConfigInfo.createMessageContent(subject: String?, message: String): String { - // For Email Channels, the subject is not passed in the main message since it's used as the title - if (this.notificationConfig.configType == ConfigType.EMAIL) { - return constructMessageContent("", message) - } - - return constructMessageContent(subject, message) -} - -/** - * Similar to Destinations, this is a generic utility method for constructing message content from - * a subject and message body when sending through Notifications since the Action definition in Monitors can have both. - */ -private fun constructMessageContent(subject: String?, message: String): String { - return if (Strings.isNullOrEmpty(subject)) message else "$subject \n\n $message" + return if (subject.isNullOrEmpty()) defaultTitle else subject } /** diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt new file mode 100644 index 000000000..cfed18c89 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt @@ -0,0 +1,392 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.workflow + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.BucketLevelMonitorRunner +import org.opensearch.alerting.DocumentLevelMonitorRunner +import org.opensearch.alerting.MonitorRunnerExecutionContext +import org.opensearch.alerting.QueryLevelMonitorRunner +import org.opensearch.alerting.WorkflowMetadataService +import org.opensearch.alerting.model.ChainedAlertTriggerRunResult +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.isDocLevelMonitor +import org.opensearch.alerting.util.isQueryLevelMonitor +import org.opensearch.cluster.routing.Preference +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.util.isBucketLevelMonitor +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.query.QueryBuilders.boolQuery +import org.opensearch.index.query.QueryBuilders.existsQuery +import org.opensearch.index.query.QueryBuilders.termsQuery +import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneOffset +import java.util.UUID + +object CompositeWorkflowRunner : WorkflowRunner() { + + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runWorkflow( + workflow: Workflow, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryRun: Boolean, + ): WorkflowRunResult { + val workflowExecutionStartTime = Instant.now() + + val isTempWorkflow = dryRun || workflow.id == Workflow.NO_ID + + val executionId = generateExecutionId(isTempWorkflow, workflow) + + val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( + workflow = workflow, + skipIndex = isTempWorkflow, + executionId = executionId + ) + var dataSources: DataSources? = null + logger.debug("Workflow ${workflow.id} in $executionId execution is running") + val delegates = (workflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } + var monitors: List + + try { + monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) + } catch (e: Exception) { + logger.error("Failed getting workflow delegates. Error: ${e.message}", e) + return WorkflowRunResult( + workflow.id, + workflow.name, + emptyList(), + workflowExecutionStartTime, + Instant.now(), + executionId, + AlertingException.wrap(e) + ) + } + // Validate the monitors size + validateMonitorSize(delegates, monitors, workflow) + val monitorsById = monitors.associateBy { it.id } + val resultList = mutableListOf>() + var lastErrorDelegateRun: Exception? = null + + for (delegate in delegates) { + var indexToDocIds = mapOf>() + var delegateMonitor: Monitor + delegateMonitor = monitorsById[delegate.monitorId] + ?: throw AlertingException.wrap( + IllegalStateException("Delegate monitor not found ${delegate.monitorId} for the workflow $workflow.id") + ) + if (delegate.chainedMonitorFindings != null) { + val chainedMonitorIds: MutableList = mutableListOf() + if (delegate.chainedMonitorFindings!!.monitorId.isNullOrBlank()) { + chainedMonitorIds.addAll(delegate.chainedMonitorFindings!!.monitorIds) + } else { + chainedMonitorIds.add(delegate.chainedMonitorFindings!!.monitorId!!) + } + val chainedMonitors = mutableListOf() + chainedMonitorIds.forEach { + val chainedMonitor = monitorsById[it] + ?: throw AlertingException.wrap( + IllegalStateException("Chained finding monitor not found ${delegate.monitorId} for the workflow $workflow.id") + ) + chainedMonitors.add(chainedMonitor) + } + + try { + indexToDocIds = monitorCtx.workflowService!!.getFindingDocIdsByExecutionId(chainedMonitors, executionId) + } catch (e: Exception) { + logger.error("Failed to execute workflow due to failure in chained findings. Error: ${e.message}", e) + return WorkflowRunResult( + workflow.id, workflow.name, emptyList(), workflowExecutionStartTime, Instant.now(), executionId, + AlertingException.wrap(e) + ) + } + } + val workflowRunContext = WorkflowRunContext( + workflowId = workflowMetadata.workflowId, + workflowMetadataId = workflowMetadata.id, + chainedMonitorId = delegate.chainedMonitorFindings?.monitorId, + matchingDocIdsPerIndex = indexToDocIds, + auditDelegateMonitorAlerts = if (workflow.auditDelegateMonitorAlerts == null) true + else workflow.auditDelegateMonitorAlerts!! + ) + try { + dataSources = delegateMonitor.dataSources + val delegateRunResult = + runDelegateMonitor(delegateMonitor, monitorCtx, periodStart, periodEnd, dryRun, workflowRunContext, executionId) + resultList.add(delegateRunResult!!) + } catch (ex: Exception) { + logger.error("Error executing workflow delegate monitor ${delegate.monitorId}", ex) + lastErrorDelegateRun = AlertingException.wrap(ex) + break + } + } + logger.debug("Workflow ${workflow.id} delegate monitors in execution $executionId completed") + // Update metadata only if the workflow is not temp + if (!isTempWorkflow) { + WorkflowMetadataService.upsertWorkflowMetadata( + workflowMetadata.copy(latestRunTime = workflowExecutionStartTime, latestExecutionId = executionId), + true + ) + } + val triggerResults = mutableMapOf() + val workflowRunResult = WorkflowRunResult( + workflowId = workflow.id, + workflowName = workflow.name, + monitorRunResults = resultList, + executionStartTime = workflowExecutionStartTime, + executionEndTime = null, + executionId = executionId, + error = lastErrorDelegateRun, + triggerResults = triggerResults + ) + val currentAlerts = try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(dataSources!!) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(dataSources) + monitorCtx.alertService!!.loadCurrentAlertsForWorkflow(workflow, dataSources) + } catch (e: Exception) { + logger.error("Failed to fetch current alerts for workflow", e) + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (workflow.id.trim().isEmpty()) "_na_" else workflow.id + logger.error("Error loading alerts for workflow: $id", e) + return workflowRunResult.copy(error = e) + } + try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(dataSources) + val updatedAlerts = mutableListOf() + val monitorIdToAlertIdsMap = fetchAlertsGeneratedInCurrentExecution(dataSources, executionId, monitorCtx, workflow) + for (trigger in workflow.triggers) { + val currentAlert = currentAlerts[trigger] + val caTrigger = trigger as ChainedAlertTrigger + val triggerCtx = ChainedAlertTriggerExecutionContext( + workflow = workflow, + workflowRunResult = workflowRunResult, + periodStart = workflowRunResult.executionStartTime, + periodEnd = workflowRunResult.executionEndTime, + trigger = caTrigger, + alertGeneratingMonitors = monitorIdToAlertIdsMap.keys, + monitorIdToAlertIdsMap = monitorIdToAlertIdsMap, + alert = currentAlert + ) + runChainedAlertTrigger( + monitorCtx, + workflow, + trigger, + executionId, + triggerCtx, + dryRun, + triggerResults, + updatedAlerts + ) + } + if (!dryRun && workflow.id != Workflow.NO_ID && updatedAlerts.isNotEmpty()) { + monitorCtx.retryPolicy?.let { + monitorCtx.alertService!!.saveAlerts( + dataSources, + updatedAlerts, + it, + routingId = workflow.id + ) + } + } + } catch (e: Exception) { + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (workflow.id.trim().isEmpty()) "_na_" else workflow.id + logger.error("Error loading current chained alerts for workflow: $id", e) + return WorkflowRunResult( + workflowId = workflow.id, + workflowName = workflow.name, + monitorRunResults = emptyList(), + executionStartTime = workflowExecutionStartTime, + executionEndTime = Instant.now(), + executionId = executionId, + error = AlertingException.wrap(e), + triggerResults = emptyMap() + ) + } + workflowRunResult.executionEndTime = Instant.now() + return workflowRunResult + } + + private suspend fun runDelegateMonitor( + delegateMonitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryRun: Boolean, + workflowRunContext: WorkflowRunContext, + executionId: String, + ): MonitorRunResult<*>? { + + if (delegateMonitor.isBucketLevelMonitor()) { + return BucketLevelMonitorRunner.runMonitor( + delegateMonitor, + monitorCtx, + periodStart, + periodEnd, + dryRun, + workflowRunContext, + executionId + ) + } else if (delegateMonitor.isDocLevelMonitor()) { + return DocumentLevelMonitorRunner.runMonitor( + delegateMonitor, + monitorCtx, + periodStart, + periodEnd, + dryRun, + workflowRunContext, + executionId + ) + } else if (delegateMonitor.isQueryLevelMonitor()) { + return QueryLevelMonitorRunner.runMonitor( + delegateMonitor, + monitorCtx, + periodStart, + periodEnd, + dryRun, + workflowRunContext, + executionId + ) + } else { + throw AlertingException.wrap( + IllegalStateException("Unsupported monitor type ${delegateMonitor.monitorType}") + ) + } + } + + fun generateExecutionId( + isTempWorkflow: Boolean, + workflow: Workflow, + ): String { + val randomPart = "_${LocalDateTime.now(ZoneOffset.UTC)}_${UUID.randomUUID()}" + return if (isTempWorkflow) randomPart else workflow.id.plus(randomPart) + } + + private fun validateMonitorSize( + delegates: List, + monitors: List, + workflow: Workflow, + ) { + if (delegates.size != monitors.size) { + val diffMonitorIds = delegates.map { it.monitorId }.minus(monitors.map { it.id }.toSet()).joinToString() + logger.error("Delegate monitors don't exist $diffMonitorIds for the workflow $workflow.id") + throw AlertingException.wrap( + IllegalStateException("Delegate monitors don't exist $diffMonitorIds for the workflow $workflow.id") + ) + } + } + + private suspend fun runChainedAlertTrigger( + monitorCtx: MonitorRunnerExecutionContext, + workflow: Workflow, + trigger: ChainedAlertTrigger, + executionId: String, + triggerCtx: ChainedAlertTriggerExecutionContext, + dryRun: Boolean, + triggerResults: MutableMap, + updatedAlerts: MutableList, + ) { + val triggerRunResult = monitorCtx.triggerService!!.runChainedAlertTrigger( + workflow, trigger, triggerCtx.alertGeneratingMonitors, triggerCtx.monitorIdToAlertIdsMap + ) + triggerResults[trigger.id] = triggerRunResult + if (monitorCtx.triggerService!!.isChainedAlertTriggerActionable(triggerCtx, triggerRunResult)) { + val actionCtx = triggerCtx + for (action in trigger.actions) { + triggerRunResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, workflow, dryRun) + } + } + val alert = monitorCtx.alertService!!.composeChainedAlert( + triggerCtx, executionId, workflow, triggerRunResult.associatedAlertIds.toList(), triggerRunResult + ) + if (alert != null) { + updatedAlerts.add(alert) + } + } + + private suspend fun fetchAlertsGeneratedInCurrentExecution( + dataSources: DataSources, + executionId: String, + monitorCtx: MonitorRunnerExecutionContext, + workflow: Workflow, + ): MutableMap> { + try { + val searchRequest = + SearchRequest(getDelegateMonitorAlertIndex(dataSources, workflow, monitorCtx.alertIndices!!.isAlertHistoryEnabled())) + searchRequest.preference(Preference.PRIMARY_FIRST.type()) + val queryBuilder = boolQuery() + queryBuilder.must(QueryBuilders.termQuery("execution_id", executionId)) + queryBuilder.must(QueryBuilders.termQuery("state", getDelegateMonitorAlertState(workflow).name)) + val noErrorQuery = boolQuery() + .should(boolQuery().mustNot(existsQuery(Alert.ERROR_MESSAGE_FIELD))) + .should(termsQuery(Alert.ERROR_MESSAGE_FIELD, "")) + queryBuilder.must(noErrorQuery) + searchRequest.source().query(queryBuilder).size(9999) + val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(searchRequest, it) } + val alerts = searchResponse.hits.map { hit -> + val xcp = XContentHelper.createParser( + monitorCtx.xContentRegistry, LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + val map = mutableMapOf>() + for (alert in alerts) { + if (map.containsKey(alert.monitorId)) { + map[alert.monitorId]!!.add(alert.id) + } else { + map[alert.monitorId] = mutableSetOf(alert.id) + } + } + return map + } catch (e: Exception) { + logger.error("failed to get alerts generated by delegate monitors in current execution $executionId", e) + return mutableMapOf() + } + } + + fun getDelegateMonitorAlertIndex( + dataSources: DataSources, + workflow: Workflow, + isAlertHistoryEnabled: Boolean, + ): String { + return if (workflow.triggers.isNotEmpty()) { + if (isAlertHistoryEnabled) { + dataSources.alertsHistoryIndex!! + } else dataSources.alertsIndex + } else dataSources.alertsIndex + } + + fun getDelegateMonitorAlertState( + workflow: Workflow, + ): Alert.State { + return if (workflow.triggers.isNotEmpty()) { + Alert.State.AUDIT + } else Alert.State.ACTIVE + } +} diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/workflow/WorkflowRunContext.kt b/alerting/src/main/kotlin/org/opensearch/alerting/workflow/WorkflowRunContext.kt new file mode 100644 index 000000000..14488a16a --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/workflow/WorkflowRunContext.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.workflow + +data class WorkflowRunContext( + // In case of dry run it's random generated id, while in other cases it's workflowId + val workflowId: String, + val workflowMetadataId: String, + val chainedMonitorId: String?, + val matchingDocIdsPerIndex: Map>, + val auditDelegateMonitorAlerts: Boolean +) diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/workflow/WorkflowRunner.kt b/alerting/src/main/kotlin/org/opensearch/alerting/workflow/WorkflowRunner.kt new file mode 100644 index 000000000..4b954b168 --- /dev/null +++ b/alerting/src/main/kotlin/org/opensearch/alerting/workflow/WorkflowRunner.kt @@ -0,0 +1,200 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.workflow + +import org.opensearch.OpenSearchSecurityException +import org.opensearch.alerting.MonitorRunnerExecutionContext +import org.opensearch.alerting.MonitorRunnerService +import org.opensearch.alerting.action.GetDestinationsAction +import org.opensearch.alerting.action.GetDestinationsRequest +import org.opensearch.alerting.action.GetDestinationsResponse +import org.opensearch.alerting.model.ActionRunResult +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.opensearchapi.withClosableContext +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext +import org.opensearch.alerting.util.destinationmigration.NotificationActionConfigs +import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils +import org.opensearch.alerting.util.destinationmigration.getTitle +import org.opensearch.alerting.util.destinationmigration.publishLegacyNotification +import org.opensearch.alerting.util.destinationmigration.sendNotification +import org.opensearch.alerting.util.isAllowed +import org.opensearch.alerting.util.isTestAction +import org.opensearch.alerting.util.use +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.notifications.model.NotificationConfigInfo +import org.opensearch.core.common.Strings +import org.opensearch.script.Script +import org.opensearch.script.TemplateScript +import java.time.Instant + +abstract class WorkflowRunner { + abstract suspend fun runWorkflow( + workflow: Workflow, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryRun: Boolean + ): WorkflowRunResult + + suspend fun runAction( + action: Action, + ctx: ChainedAlertTriggerExecutionContext, + monitorCtx: MonitorRunnerExecutionContext, + workflow: Workflow, + dryrun: Boolean + ): ActionRunResult { + return try { + if (!MonitorRunnerService.isActionActionable(action, ctx.alert)) { + return ActionRunResult(action.id, action.name, mapOf(), true, null, null) + } + val actionOutput = mutableMapOf() + actionOutput[Action.SUBJECT] = if (action.subjectTemplate != null) { + compileTemplate(action.subjectTemplate!!, ctx) + } else "" + actionOutput[Action.MESSAGE] = compileTemplate(action.messageTemplate, ctx) + if (Strings.isNullOrEmpty(actionOutput[Action.MESSAGE])) { + throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") + } + if (!dryrun) { + val client = monitorCtx.client + client!!.threadPool().threadContext.stashContext().use { + withClosableContext( + InjectorContextElement( + workflow.id, + monitorCtx.settings!!, + monitorCtx.threadPool!!.threadContext, + workflow.user?.roles, + workflow.user + ) + ) { + actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( + action, + monitorCtx, + actionOutput[Action.SUBJECT], + actionOutput[Action.MESSAGE]!! + ) + } + } + } + ActionRunResult(action.id, action.name, actionOutput, false, MonitorRunnerService.currentTime(), null) + } catch (e: Exception) { + ActionRunResult(action.id, action.name, mapOf(), false, MonitorRunnerService.currentTime(), e) + } + } + + protected suspend fun getConfigAndSendNotification( + action: Action, + monitorCtx: MonitorRunnerExecutionContext, + subject: String?, + message: String + ): String { + val config = getConfigForNotificationAction(action, monitorCtx) + if (config.destination == null && config.channel == null) { + throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.destinationId}]") + } + + // Adding a check on TEST_ACTION Destination type here to avoid supporting it as a LegacyBaseMessage type + // just for Alerting integration tests + if (config.destination?.isTestAction() == true) { + return "test action" + } + + if (config.destination?.isAllowed(monitorCtx.allowList) == false) { + throw IllegalStateException( + "Monitor contains a Destination type that is not allowed: ${config.destination.type}" + ) + } + + var actionResponseContent = "" + actionResponseContent = config.channel + ?.sendNotification( + monitorCtx.client!!, + config.channel.getTitle(subject), + message + ) ?: actionResponseContent + + actionResponseContent = config.destination + ?.buildLegacyBaseMessage(subject, message, monitorCtx.destinationContextFactory!!.getDestinationContext(config.destination)) + ?.publishLegacyNotification(monitorCtx.client!!) + ?: actionResponseContent + + return actionResponseContent + } + + /** + * The "destination" ID referenced in a Monitor Action could either be a Notification config or a Destination config + * depending on whether the background migration process has already migrated it from a Destination to a Notification config. + * + * To cover both of these cases, the Notification config will take precedence and if it is not found, the Destination will be retrieved. + */ + private suspend fun getConfigForNotificationAction( + action: Action, + monitorCtx: MonitorRunnerExecutionContext + ): NotificationActionConfigs { + var destination: Destination? = null + var notificationPermissionException: Exception? = null + + var channel: NotificationConfigInfo? = null + try { + channel = NotificationApiUtils.getNotificationConfigInfo(monitorCtx.client as NodeClient, action.destinationId) + } catch (e: OpenSearchSecurityException) { + notificationPermissionException = e + } + + // If the channel was not found, try to retrieve the Destination + if (channel == null) { + destination = try { + val table = Table( + "asc", + "destination.name.keyword", + null, + 1, + 0, + null + ) + val getDestinationsRequest = GetDestinationsRequest( + action.destinationId, + 0L, + null, + table, + "ALL" + ) + + val getDestinationsResponse: GetDestinationsResponse = monitorCtx.client!!.suspendUntil { + monitorCtx.client!!.execute(GetDestinationsAction.INSTANCE, getDestinationsRequest, it) + } + getDestinationsResponse.destinations.firstOrNull() + } catch (e: IllegalStateException) { + // Catching the exception thrown when the Destination was not found so the NotificationActionConfigs object can be returned + null + } catch (e: OpenSearchSecurityException) { + if (notificationPermissionException != null) { + throw notificationPermissionException + } else { + throw e + } + } + + if (destination == null && notificationPermissionException != null) { + throw notificationPermissionException + } + } + + return NotificationActionConfigs(destination, channel) + } + + internal fun compileTemplate(template: Script, ctx: ChainedAlertTriggerExecutionContext): String { + return MonitorRunnerService.monitorCtx.scriptService!!.compile(template, TemplateScript.CONTEXT) + .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) + .execute() + } +} diff --git a/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json b/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json index fcb1d1c94..76e5104cc 100644 --- a/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json +++ b/alerting/src/main/resources/org/opensearch/alerting/alerts/alert_mapping.json @@ -4,7 +4,7 @@ "required": true }, "_meta" : { - "schema_version": 4 + "schema_version": 5 }, "properties": { "schema_version": { @@ -71,6 +71,15 @@ } } }, + "execution_id": { + "type": "keyword" + }, + "workflow_id": { + "type": "keyword" + }, + "workflow_name": { + "type": "keyword" + }, "trigger_id": { "type": "keyword" }, @@ -91,6 +100,14 @@ } } }, + "associated_alert_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, "related_doc_ids": { "type" : "text", "fields" : { @@ -152,6 +169,14 @@ "type": "text" } } + }, + "clusters": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } } } } \ No newline at end of file diff --git a/alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json b/alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json index c9386b2ef..d2ecc0907 100644 --- a/alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json +++ b/alerting/src/main/resources/org/opensearch/alerting/alerts/finding_mapping.json @@ -1,7 +1,7 @@ { "dynamic": "strict", "_meta" : { - "schema_version": 1 + "schema_version": 4 }, "properties": { "schema_version": { @@ -46,11 +46,26 @@ "type" : "keyword" } } + }, + "fields": { + "type": "text" } } }, "timestamp": { "type": "long" + }, + "correlated_doc_ids": { + "type" : "text", + "analyzer": "whitespace", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "execution_id": { + "type": "keyword" } } } \ No newline at end of file diff --git a/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt b/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt index 78d53e839..bd1f94482 100644 --- a/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt +++ b/alerting/src/main/resources/org/opensearch/alerting/org.opensearch.alerting.txt @@ -1,4 +1,4 @@ -# Copyright OpenSearch Contributors + # Copyright OpenSearch Contributors # SPDX-License-Identifier: Apache-2.0 # Painless definition of classes used by alerting plugin @@ -31,21 +31,21 @@ class org.opensearch.alerting.script.QueryLevelTriggerExecutionContext { Exception getError() } -class org.opensearch.alerting.model.Monitor { +class org.opensearch.commons.alerting.model.Monitor { String getId() long getVersion() String getName() boolean getEnabled() } -class org.opensearch.alerting.model.QueryLevelTrigger { +class org.opensearch.commons.alerting.model.QueryLevelTrigger { String getId() String getName() String getSeverity() List getActions() } -class org.opensearch.alerting.model.Alert { +class org.opensearch.commons.alerting.model.Alert { String getId() long getVersion() boolean isAcknowledged() diff --git a/alerting/src/main/resources/org/opensearch/alerting/settings/supported_json_payloads.json b/alerting/src/main/resources/org/opensearch/alerting/settings/supported_json_payloads.json index 9ed045ab3..a153a67b2 100644 --- a/alerting/src/main/resources/org/opensearch/alerting/settings/supported_json_payloads.json +++ b/alerting/src/main/resources/org/opensearch/alerting/settings/supported_json_payloads.json @@ -1,6 +1,8 @@ { + "/_cat/indices": {}, "/_cat/pending_tasks": {}, "/_cat/recovery": {}, + "/_cat/shards": {}, "/_cat/snapshots": {}, "/_cat/tasks": {}, "/_cluster/health": {}, diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt b/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt index a82999bfe..6eda9ec30 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/ADTestHelpers.kt @@ -4,13 +4,13 @@ */ package org.opensearch.alerting -import org.opensearch.alerting.core.model.Input -import org.opensearch.alerting.core.model.IntervalSchedule -import org.opensearch.alerting.core.model.Schedule -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.QueryLevelTrigger -import org.opensearch.alerting.model.Trigger +import org.opensearch.commons.alerting.model.Input +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.Schedule +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Trigger import org.opensearch.commons.authuser.User import org.opensearch.index.query.BoolQueryBuilder import org.opensearch.index.query.QueryBuilders diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt index 918a1f1c3..7f415a8ac 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AccessRoles.kt @@ -5,7 +5,11 @@ package org.opensearch.alerting +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.commons.alerting.action.AlertingActions + val ALL_ACCESS_ROLE = "all_access" +val READALL_AND_MONITOR_ROLE = "readall_and_monitor" val ALERTING_FULL_ACCESS_ROLE = "alerting_full_access" val ALERTING_READ_ONLY_ACCESS = "alerting_read_access" val ALERTING_NO_ACCESS_ROLE = "no_access" @@ -15,11 +19,15 @@ val ALERTING_GET_EMAIL_GROUP_ACCESS = "alerting_get_email_group_access" val ALERTING_SEARCH_EMAIL_GROUP_ACCESS = "alerting_search_email_group_access" val ALERTING_INDEX_MONITOR_ACCESS = "alerting_index_monitor_access" val ALERTING_GET_MONITOR_ACCESS = "alerting_get_monitor_access" +val ALERTING_GET_WORKFLOW_ACCESS = "alerting_get_workflow_access" +val ALERTING_DELETE_WORKFLOW_ACCESS = "alerting_delete_workflow_access" val ALERTING_SEARCH_MONITOR_ONLY_ACCESS = "alerting_search_monitor_access" val ALERTING_EXECUTE_MONITOR_ACCESS = "alerting_execute_monitor_access" +val ALERTING_EXECUTE_WORKFLOW_ACCESS = "alerting_execute_workflow_access" val ALERTING_DELETE_MONITOR_ACCESS = "alerting_delete_monitor_access" val ALERTING_GET_DESTINATION_ACCESS = "alerting_get_destination_access" val ALERTING_GET_ALERTS_ACCESS = "alerting_get_alerts_access" +val ALERTING_INDEX_WORKFLOW_ACCESS = "alerting_index_workflow_access" val ROLE_TO_PERMISSION_MAPPING = mapOf( ALERTING_NO_ACCESS_ROLE to "", @@ -29,9 +37,13 @@ val ROLE_TO_PERMISSION_MAPPING = mapOf( ALERTING_SEARCH_EMAIL_GROUP_ACCESS to "cluster:admin/opendistro/alerting/destination/email_group/search", ALERTING_INDEX_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/write", ALERTING_GET_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/get", + ALERTING_GET_WORKFLOW_ACCESS to AlertingActions.GET_WORKFLOW_ACTION_NAME, ALERTING_SEARCH_MONITOR_ONLY_ACCESS to "cluster:admin/opendistro/alerting/monitor/search", ALERTING_EXECUTE_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/execute", + ALERTING_EXECUTE_WORKFLOW_ACCESS to ExecuteWorkflowAction.NAME, ALERTING_DELETE_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/delete", ALERTING_GET_DESTINATION_ACCESS to "cluster:admin/opendistro/alerting/destination/get", - ALERTING_GET_ALERTS_ACCESS to "cluster:admin/opendistro/alerting/alerts/get" + ALERTING_GET_ALERTS_ACCESS to "cluster:admin/opendistro/alerting/alerts/get", + ALERTING_INDEX_WORKFLOW_ACCESS to AlertingActions.INDEX_WORKFLOW_ACTION_NAME, + ALERTING_DELETE_WORKFLOW_ACCESS to AlertingActions.DELETE_WORKFLOW_ACTION_NAME ) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt index 1f208eda4..5c0e12b66 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AlertServiceTests.kt @@ -9,11 +9,6 @@ import org.junit.Before import org.mockito.Mockito import org.opensearch.Version import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.AggregationResultBucket -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.BucketLevelTrigger -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.action.AlertCategory import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.util.getBucketKeysHash import org.opensearch.client.Client @@ -22,7 +17,12 @@ import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.ClusterSettings import org.opensearch.common.settings.Setting import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.NamedXContentRegistry +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.test.ClusterServiceUtils import org.opensearch.test.OpenSearchTestCase import org.opensearch.threadpool.ThreadPool @@ -83,7 +83,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) @@ -115,7 +117,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertAlertsExistForBucketKeys( @@ -142,7 +146,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) val aggResultBuckets = listOf() - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) @@ -174,7 +180,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertAlertsExistForBucketKeys(listOf(listOf("b")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) @@ -198,7 +206,9 @@ class AlertServiceTests : OpenSearchTestCase() { ) ) - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor(monitor, trigger, currentAlerts, aggResultBuckets) + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) // Completed Alerts are what remains in currentAlerts after categorization val completedAlerts = currentAlerts.values.toList() assertAlertsExistForBucketKeys(listOf(listOf("a")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt index 5cb99d04c..5d33000ef 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/AlertingRestTestCase.kt @@ -5,39 +5,26 @@ package org.opensearch.alerting -import org.apache.http.HttpEntity -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.entity.ContentType.APPLICATION_JSON -import org.apache.http.entity.StringEntity -import org.apache.http.message.BasicHeader +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.ContentType.APPLICATION_JSON +import org.apache.hc.core5.http.HttpEntity +import org.apache.hc.core5.http.HttpHeaders +import org.apache.hc.core5.http.io.entity.StringEntity +import org.apache.hc.core5.http.message.BasicHeader import org.junit.AfterClass import org.junit.rules.DisableOnDebug import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_ACCOUNT_BASE_URI import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_GROUP_BASE_URI -import org.opensearch.alerting.action.GetFindingsResponse import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.alerts.AlertIndices.Companion.FINDING_HISTORY_WRITE_INDEX -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.core.settings.ScheduledJobSettings -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.BucketLevelTrigger -import org.opensearch.alerting.model.DocumentLevelTrigger -import org.opensearch.alerting.model.Finding -import org.opensearch.alerting.model.FindingWithDocs -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.destination.Chime import org.opensearch.alerting.model.destination.CustomWebhook import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.model.destination.Slack import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.opensearchapi.string import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.alerting.settings.DestinationSettings import org.opensearch.alerting.util.DestinationType @@ -50,17 +37,31 @@ import org.opensearch.common.io.PathUtils import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory import org.opensearch.common.xcontent.XContentFactory.jsonBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType -import org.opensearch.common.xcontent.json.JsonXContent import org.opensearch.common.xcontent.json.JsonXContent.jsonXContent -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.action.GetFindingsResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.FindingWithDocs +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import org.opensearch.search.SearchModule import java.net.URLEncoder import java.nio.file.Files @@ -70,14 +71,21 @@ import java.time.format.DateTimeFormatter import java.time.temporal.ChronoUnit import java.util.Locale import java.util.UUID +import java.util.stream.Collectors import javax.management.MBeanServerInvocationHandler import javax.management.ObjectName import javax.management.remote.JMXConnectorFactory import javax.management.remote.JMXServiceURL +import kotlin.collections.ArrayList import kotlin.collections.HashMap +/** + * Superclass for tests that interact with an external test cluster using OpenSearch's RestClient + */ abstract class AlertingRestTestCase : ODFERestTestCase() { + protected val password = "D%LMX3bo#@U3XqVQ" + protected val isDebuggingTest = DisableOnDebug(null).isDebugging protected val isDebuggingRemoteCluster = System.getProperty("cluster.debug", "false")!!.toBoolean() protected val numberOfNodes = System.getProperty("cluster.number_of_nodes", "1")!!.toInt() @@ -94,7 +102,9 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { DocLevelMonitorInput.XCONTENT_REGISTRY, QueryLevelTrigger.XCONTENT_REGISTRY, BucketLevelTrigger.XCONTENT_REGISTRY, - DocumentLevelTrigger.XCONTENT_REGISTRY + DocumentLevelTrigger.XCONTENT_REGISTRY, + Workflow.XCONTENT_REGISTRY, + ChainedAlertTrigger.XCONTENT_REGISTRY ) + SearchModule(Settings.EMPTY, emptyList()).namedXContents ) } @@ -103,10 +113,26 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return entityAsMap(this) } - protected fun createMonitorWithClient(client: RestClient, monitor: Monitor, refresh: Boolean = true): Monitor { + private fun createMonitorEntityWithBackendRoles(monitor: Monitor, rbacRoles: List?): HttpEntity { + if (rbacRoles == null) { + return monitor.toHttpEntity() + } + val temp = monitor.toJsonString() + val toReplace = temp.lastIndexOf("}") + val rbacString = rbacRoles.joinToString { "\"$it\"" } + val jsonString = temp.substring(0, toReplace) + ", \"rbac_roles\": [$rbacString] }" + return StringEntity(jsonString, APPLICATION_JSON) + } + + protected fun createMonitorWithClient( + client: RestClient, + monitor: Monitor, + rbacRoles: List? = null, + refresh: Boolean = true, + ): Monitor { val response = client.makeRequest( "POST", "$ALERTING_BASE_URI?refresh=$refresh", emptyMap(), - monitor.toHttpEntity() + createMonitorEntityWithBackendRoles(monitor, rbacRoles) ) assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) @@ -120,7 +146,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { } protected fun createMonitor(monitor: Monitor, refresh: Boolean = true): Monitor { - return createMonitorWithClient(client(), monitor, refresh) + return createMonitorWithClient(client(), monitor, emptyList(), refresh) } protected fun deleteMonitor(monitor: Monitor, refresh: Boolean = true): Response { @@ -133,6 +159,34 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return response } + protected fun deleteWorkflow(workflow: Workflow, deleteDelegates: Boolean = false, refresh: Boolean = true): Response { + val response = client().makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh&deleteDelegateMonitors=$deleteDelegates", + emptyMap(), + workflow.toHttpEntity() + ) + assertEquals("Unable to delete a workflow", RestStatus.OK, response.restStatus()) + return response + } + + protected fun deleteWorkflowWithClient( + client: RestClient, + workflow: Workflow, + deleteDelegates: Boolean = false, + refresh: Boolean = true, + ): Response { + val response = client.makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh&deleteDelegateMonitors=$deleteDelegates", + emptyMap(), + workflow.toHttpEntity() + ) + assertEquals("Unable to delete a workflow", RestStatus.OK, response.restStatus()) + + return response + } + /** * Destinations are now deprecated in favor of the Notification plugin's configs. * This method should only be used for checking legacy behavior/Notification migration scenarios. @@ -190,7 +244,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun getEmailAccount( emailAccountID: String, - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), ): EmailAccount { val response = client().makeRequest("GET", "$EMAIL_ACCOUNT_BASE_URI/$emailAccountID", null, header) assertEquals("Unable to get email account $emailAccountID", RestStatus.OK, response.restStatus()) @@ -250,7 +304,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun getEmailGroup( emailGroupID: String, - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), ): EmailGroup { val response = client().makeRequest("GET", "$EMAIL_GROUP_BASE_URI/$emailGroupID", null, header) assertEquals("Unable to get email group $emailGroupID", RestStatus.OK, response.restStatus()) @@ -332,7 +386,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun getDestinations( client: RestClient, dataMap: Map = emptyMap(), - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), ): List> { var baseEndpoint = "$DESTINATION_BASE_URI?" @@ -382,7 +436,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { } fun getChimeDestination(): Destination { - val chime = Chime("https://hooks.chime.aws/incomingwebhooks/chimeId") + val chime = Chime("https://hooks.chime.aws/incomingwebhooks/chimeId?token=abcdef") return Destination( type = DestinationType.CHIME, name = "test", @@ -496,6 +550,51 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return getMonitor(monitorId = monitor.id) } + @Suppress("UNCHECKED_CAST") + protected fun updateWorkflow(workflow: Workflow, refresh: Boolean = false): Workflow { + val response = client().makeRequest( + "PUT", + "${workflow.relativeUrl()}?refresh=$refresh", + emptyMap(), + workflow.toHttpEntity() + ) + assertEquals("Unable to update a workflow", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["workflow"] as Map) + return getWorkflow(workflowId = workflow.id) + } + + protected fun updateMonitorWithClient( + client: RestClient, + monitor: Monitor, + rbacRoles: List = emptyList(), + refresh: Boolean = true, + ): Monitor { + val response = client.makeRequest( + "PUT", "${monitor.relativeUrl()}?refresh=$refresh", + emptyMap(), createMonitorEntityWithBackendRoles(monitor, rbacRoles) + ) + assertEquals("Unable to update a monitor", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["monitor"] as Map) + return getMonitor(monitorId = monitor.id) + } + + protected fun updateWorkflowWithClient( + client: RestClient, + workflow: Workflow, + rbacRoles: List = emptyList(), + refresh: Boolean = true, + ): Workflow { + val response = client.makeRequest( + "PUT", + "${workflow.relativeUrl()}?refresh=$refresh", + emptyMap(), + createWorkflowEntityWithBackendRoles(workflow, rbacRoles) + ) + assertEquals("Unable to update a workflow", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["workflow"] as Map) + return getWorkflow(workflowId = workflow.id) + } + protected fun getMonitor(monitorId: String, header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")): Monitor { val response = client().makeRequest("GET", "$ALERTING_BASE_URI/$monitorId", null, header) assertEquals("Unable to get monitor $monitorId", RestStatus.OK, response.restStatus()) @@ -514,6 +613,16 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { "_id" -> id = parser.text() "_version" -> version = parser.longValue() "monitor" -> monitor = Monitor.parse(parser) + "associated_workflows" -> { + XContentParserUtils.ensureExpectedToken( + XContentParser.Token.START_ARRAY, + parser.currentToken(), + parser + ) + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + // do nothing + } + } } } @@ -525,7 +634,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun searchAlertsWithFilter( monitor: Monitor, indices: String = AlertIndices.ALERT_INDEX, - refresh: Boolean = true + refresh: Boolean = true, ): List { if (refresh) refreshIndex(indices) @@ -537,9 +646,9 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) return searchResponse.hits.hits.map { - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } Alert.parse(xcp, it.id, it.version) }.filter { alert -> alert.monitorId == monitor.id } } @@ -548,8 +657,10 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { monitorId: String = "NO_ID", monitorName: String = "NO_NAME", index: String = "testIndex", - docLevelQueries: List = listOf(DocLevelQuery(query = "test_field:\"us-west-2\"", name = "testQuery")), - matchingDocIds: List + docLevelQueries: List = listOf( + DocLevelQuery(query = "test_field:\"us-west-2\"", name = "testQuery", fields = listOf()) + ), + matchingDocIds: List, ): String { val finding = Finding( id = UUID.randomUUID().toString(), @@ -570,7 +681,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun searchFindings( monitor: Monitor, indices: String = AlertIndices.ALL_FINDING_INDEX_PATTERN, - refresh: Boolean = true + refresh: Boolean = true, ): List { if (refresh) refreshIndex(indices) @@ -582,9 +693,9 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) return searchResponse.hits.hits.map { - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } Finding.parse(xcp) }.filter { finding -> finding.monitorId == monitor.id } } @@ -607,9 +718,9 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", searchParams, StringEntity(request, APPLICATION_JSON)) assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) return searchResponse.hits.hits.map { - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } Alert.parse(xcp, it.id, it.version) } } @@ -629,10 +740,25 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return response } + protected fun acknowledgeChainedAlerts(workflowId: String, vararg alertId: String): Response { + val request = jsonBuilder().startObject() + .array("alerts", *alertId.map { it }.toTypedArray()) + .endObject() + .string() + .let { StringEntity(it, APPLICATION_JSON) } + + val response = client().makeRequest( + "POST", "${AlertingPlugin.WORKFLOW_BASE_URI}/$workflowId/_acknowledge/alerts", + emptyMap(), request + ) + assertEquals("Acknowledge call failed.", RestStatus.OK, response.restStatus()) + return response + } + protected fun getAlerts( client: RestClient, dataMap: Map = emptyMap(), - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), ): Response { var baseEndpoint = "$ALERTING_BASE_URI/alerts?" for (entry in dataMap.entries) { @@ -646,13 +772,13 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun getAlerts( dataMap: Map = emptyMap(), - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), ): Response { return getAlerts(client(), dataMap, header) } protected fun refreshIndex(index: String): Response { - val response = client().makeRequest("POST", "/$index/_refresh") + val response = client().makeRequest("POST", "/$index/_refresh?expand_wildcards=all") assertEquals("Unable to refresh index", RestStatus.OK, response.restStatus()) return response } @@ -667,10 +793,44 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return executeMonitor(client(), monitorId, params) } + protected fun executeWorkflow(workflowId: String, params: Map = mutableMapOf()): Response { + return executeWorkflow(client(), workflowId, params) + } + + protected fun getWorkflowAlerts( + workflowId: String, + alertId: String? = "", + getAssociatedAlerts: Boolean = true, + ): Response { + return getWorkflowAlerts( + client(), + mutableMapOf(Pair("workflowIds", workflowId), Pair("getAssociatedAlerts", getAssociatedAlerts), Pair("alertIds", alertId!!)) + ) + } + + protected fun getWorkflowAlerts( + client: RestClient, + dataMap: Map = emptyMap(), + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): Response { + var baseEndpoint = "$WORKFLOW_ALERTING_BASE_URI/alerts?" + for (entry in dataMap.entries) { + baseEndpoint += "${entry.key}=${entry.value}&" + } + + val response = client.makeRequest("GET", baseEndpoint, null, header) + assertEquals("Get call failed.", RestStatus.OK, response.restStatus()) + return response + } + protected fun executeMonitor(client: RestClient, monitorId: String, params: Map = mutableMapOf()): Response { return client.makeRequest("POST", "$ALERTING_BASE_URI/$monitorId/_execute", params) } + protected fun executeWorkflow(client: RestClient, workflowId: String, params: Map = mutableMapOf()): Response { + return client.makeRequest("POST", "$WORKFLOW_ALERTING_BASE_URI/$workflowId/_execute", params) + } + protected fun executeMonitor(monitor: Monitor, params: Map = mapOf()): Response { return executeMonitor(client(), monitor, params) } @@ -712,6 +872,18 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return GetFindingsResponse(response.restStatus(), totalFindings, findings) } + protected fun searchMonitors(): SearchResponse { + var baseEndpoint = "${AlertingPlugin.MONITOR_BASE_URI}/_search?" + val request = """ + { "version" : true, + "query": { "match_all": {} } + } + """.trimIndent() + val httpResponse = adminClient().makeRequest("POST", baseEndpoint, StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + return SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) + } + protected fun indexDoc(index: String, id: String, doc: String, refresh: Boolean = true): Response { return indexDoc(client(), index, id, doc, refresh) } @@ -723,7 +895,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { private fun indexDoc(client: RestClient, index: String, id: String, doc: String, refresh: Boolean = true): Response { val requestBody = StringEntity(doc, APPLICATION_JSON) val params = if (refresh) mapOf("refresh" to "true") else mapOf() - val response = client.makeRequest("PUT", "$index/_doc/$id", params, requestBody) + val response = client.makeRequest("POST", "$index/_doc/$id?op_type=create", params, requestBody) assertTrue( "Unable to index doc: '${doc.take(15)}...' to index: '$index'", listOf(RestStatus.OK, RestStatus.CREATED).contains(response.restStatus()) @@ -745,7 +917,8 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { """ "properties" : { "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" } + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } } """.trimIndent() ) @@ -757,6 +930,11 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return index } + protected fun createTestIndex(index: String, mapping: String?, alias: String): String { + createIndex(index, Settings.EMPTY, mapping?.trimIndent(), alias) + return index + } + protected fun createTestConfigIndex(index: String = "." + randomAlphaOfLength(10).lowercase(Locale.ROOT)): String { try { createIndex( @@ -776,7 +954,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { protected fun createTestAlias( alias: String = randomAlphaOfLength(10).lowercase(Locale.ROOT), numOfAliasIndices: Int = randomIntBetween(1, 10), - includeWriteIndex: Boolean = true + includeWriteIndex: Boolean = true, ): MutableMap> { return createTestAlias(alias = alias, indices = randomAliasIndices(alias, numOfAliasIndices, includeWriteIndex)) } @@ -787,12 +965,12 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { alias = alias, num = randomIntBetween(1, 10), includeWriteIndex = true - ) + ), ): MutableMap> { val indicesMap = mutableMapOf() val indicesJson = jsonBuilder().startObject().startArray("actions") indices.keys.map { - val indexName = createTestIndex(index = it.lowercase(Locale.ROOT), mapping = "") + val indexName = createTestIndex(index = it, mapping = "") val isWriteIndex = indices.getOrDefault(indexName, false) indicesMap[indexName] = isWriteIndex val indexMap = mapOf( @@ -809,17 +987,155 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { return mutableMapOf(alias to indicesMap) } + protected fun createDataStream(datastream: String, mappings: String?, useComponentTemplate: Boolean) { + val indexPattern = "$datastream*" + var componentTemplateMappings = "\"properties\": {" + + " \"netflow.destination_transport_port\":{ \"type\": \"long\" }," + + " \"netflow.destination_ipv4_address\":{ \"type\": \"ip\" }" + + "}" + if (mappings != null) { + componentTemplateMappings = mappings + } + if (useComponentTemplate) { + // Setup index_template + createComponentTemplateWithMappings( + "my_ds_component_template-$datastream", + componentTemplateMappings + ) + } + createComposableIndexTemplate( + "my_index_template_ds-$datastream", + listOf(indexPattern), + (if (useComponentTemplate) "my_ds_component_template-$datastream" else null), + mappings, + true, + 0 + ) + createDataStream(datastream) + } + + protected fun createDataStream(datastream: String? = randomAlphaOfLength(10).lowercase(Locale.ROOT)) { + client().makeRequest("PUT", "_data_stream/$datastream") + } + + protected fun deleteDataStream(datastream: String) { + client().makeRequest("DELETE", "_data_stream/$datastream") + } + + protected fun createIndexAlias(alias: String, mappings: String?) { + val indexPattern = "$alias*" + var componentTemplateMappings = "\"properties\": {" + + " \"netflow.destination_transport_port\":{ \"type\": \"long\" }," + + " \"netflow.destination_ipv4_address\":{ \"type\": \"ip\" }" + + "}" + if (mappings != null) { + componentTemplateMappings = mappings + } + createComponentTemplateWithMappings( + "my_alias_component_template-$alias", + componentTemplateMappings + ) + createComposableIndexTemplate( + "my_index_template_alias-$alias", + listOf(indexPattern), + "my_alias_component_template-$alias", + mappings, + false, + 0 + ) + createTestIndex( + "$alias-000001", + null, + """ + "$alias": { + "is_write_index": true + } + """.trimIndent() + ) + } + + protected fun deleteIndexAlias(alias: String) { + client().makeRequest("DELETE", "$alias*/_alias/$alias") + } + + protected fun createComponentTemplateWithMappings(componentTemplateName: String, mappings: String?) { + val body = """{"template" : { "mappings": {$mappings} }}""" + client().makeRequest( + "PUT", + "_component_template/$componentTemplateName", + emptyMap(), + StringEntity(body, ContentType.APPLICATION_JSON), + BasicHeader("Content-Type", "application/json") + ) + } + + protected fun createComposableIndexTemplate( + templateName: String, + indexPatterns: List, + componentTemplateName: String?, + mappings: String?, + isDataStream: Boolean, + priority: Int + ) { + var body = "{\n" + if (isDataStream) { + body += "\"data_stream\": { }," + } + body += "\"index_patterns\": [" + + indexPatterns.stream().collect( + Collectors.joining(",", "\"", "\"") + ) + "]," + if (componentTemplateName == null) { + body += "\"template\": {\"mappings\": {$mappings}}," + } + if (componentTemplateName != null) { + body += "\"composed_of\": [\"$componentTemplateName\"]," + } + body += "\"priority\":$priority}" + client().makeRequest( + "PUT", + "_index_template/$templateName", + emptyMap(), + StringEntity(body, APPLICATION_JSON), + BasicHeader("Content-Type", "application/json") + ) + } + + protected fun getDatastreamWriteIndex(datastream: String): String { + val response = client().makeRequest("GET", "_data_stream/$datastream", emptyMap(), null) + var respAsMap = responseAsMap(response) + if (respAsMap.containsKey("data_streams")) { + respAsMap = (respAsMap["data_streams"] as ArrayList>)[0] + val indices = respAsMap["indices"] as List> + val index = indices.last() + return index["index_name"] as String + } else { + respAsMap = respAsMap[datastream] as Map + } + val indices = respAsMap["indices"] as Array + return indices.last() + } + + protected fun rolloverDatastream(datastream: String) { + client().makeRequest( + "POST", + datastream + "/_rollover", + emptyMap(), + null + ) + } + protected fun randomAliasIndices( alias: String, num: Int = randomIntBetween(1, 10), - includeWriteIndex: Boolean = true + includeWriteIndex: Boolean = true, ): Map { val indices = mutableMapOf() - val writeIndex = randomIntBetween(0, num) + val writeIndex = randomIntBetween(0, num - 1) for (i: Int in 0 until num) { - var indexName = randomAlphaOfLength(10) + var indexName = randomAlphaOfLength(10).lowercase(Locale.ROOT) while (indexName.equals(alias) || indices.containsKey(indexName)) - indexName = randomAlphaOfLength(10) + indexName = randomAlphaOfLength(10).lowercase(Locale.ROOT) indices[indexName] = includeWriteIndex && i == writeIndex } return indices @@ -832,7 +1148,8 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { val testDoc = """ { "test_strict_date_time": "$testTime", - "test_field": "$value" + "test_field": "$value", + "number": "$i" } """.trimIndent() // Indexing documents with deterministic doc id to allow for easy selected deletion during testing @@ -1058,7 +1375,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { "PUT", "_cluster/settings", emptyMap(), StringEntity( - XContentFactory.jsonBuilder().startObject().field("persistent") + jsonBuilder().startObject().field("persistent") .startObject().field(AlertingSettings.FILTER_BY_BACKEND_ROLES.key, false).endObject() .endObject().string(), ContentType.APPLICATION_JSON @@ -1074,11 +1391,11 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { client().updateSettings(DestinationSettings.ALLOW_LIST.key, allowedDestinations) } - fun createUser(name: String, passwd: String, backendRoles: Array) { + fun createUser(name: String, backendRoles: Array) { val request = Request("PUT", "/_plugins/_security/api/internalusers/$name") val broles = backendRoles.joinToString { it -> "\"$it\"" } var entity = " {\n" + - "\"password\": \"$passwd\",\n" + + "\"password\": \"$password\",\n" + "\"backend_roles\": [$broles],\n" + "\"attributes\": {\n" + "}} " @@ -1086,6 +1403,18 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { client().performRequest(request) } + fun patchUserBackendRoles(name: String, backendRoles: Array) { + val request = Request("PATCH", "/_plugins/_security/api/internalusers/$name") + val broles = backendRoles.joinToString { "\"$it\"" } + var entity = " [{\n" + + "\"op\": \"replace\",\n" + + "\"path\": \"/backend_roles\",\n" + + "\"value\": [$broles]\n" + + "}]" + request.setJsonEntity(entity) + client().performRequest(request) + } + fun createIndexRole(name: String, index: String) { val request = Request("PUT", "/_plugins/_security/api/roles/$name") var entity = "{\n" + @@ -1135,10 +1464,72 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { client().performRequest(request) } - fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String) { + private fun createCustomIndexRole(name: String, index: String, clusterPermissions: List) { val request = Request("PUT", "/_plugins/_security/api/roles/$name") + + val clusterPermissionsStr = + clusterPermissions.stream().map { p: String? -> "\"" + p + "\"" }.collect( + Collectors.joining(",") + ) + var entity = "{\n" + "\"cluster_permissions\": [\n" + + "$clusterPermissionsStr\n" + + "],\n" + + "\"index_permissions\": [\n" + + "{\n" + + "\"index_patterns\": [\n" + + "\"$index\"\n" + + "],\n" + + "\"dls\": \"\",\n" + + "\"fls\": [],\n" + + "\"masked_fields\": [],\n" + + "\"allowed_actions\": [\n" + + "\"crud\"\n" + + "]\n" + + "}\n" + + "],\n" + + "\"tenant_permissions\": []\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String, clusterPermissions: String? = "") { + val request = Request("PUT", "/_plugins/_security/api/roles/$name") + var entity = "{\n" + + "\"cluster_permissions\": [\n" + + "\"$clusterPermissions\"\n" + + "],\n" + + "\"index_permissions\": [\n" + + "{\n" + + "\"index_patterns\": [\n" + + "\"$index\"\n" + + "],\n" + + "\"dls\": \"$dlsQuery\",\n" + + "\"fls\": [],\n" + + "\"masked_fields\": [],\n" + + "\"allowed_actions\": [\n" + + "\"crud\"\n" + + "]\n" + + "}\n" + + "],\n" + + "\"tenant_permissions\": []\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String, clusterPermissions: List) { + val clusterPermissionsStr = + clusterPermissions.stream().map { p: String -> "\"" + getClusterPermissionsFromCustomRole(p) + "\"" }.collect( + Collectors.joining(",") + ) + + val request = Request("PUT", "/_plugins/_security/api/roles/$name") + var entity = "{\n" + + "\"cluster_permissions\": [\n" + + "$clusterPermissionsStr\n" + "],\n" + "\"index_permissions\": [\n" + "{\n" + @@ -1171,6 +1562,22 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { client().performRequest(request) } + fun updateRoleMapping(role: String, users: List, addUser: Boolean) { + val request = Request("PATCH", "/_plugins/_security/api/rolesmapping/$role") + val usersStr = users.joinToString { it -> "\"$it\"" } + + val op = if (addUser) "add" else "remove" + + val entity = "[{\n" + + " \"op\" : \"$op\",\n" + + " \"path\" : \"/users\",\n" + + " \"value\" : [$usersStr]\n" + + "}]" + + request.setJsonEntity(entity) + client().performRequest(request) + } + fun deleteUser(name: String) { client().makeRequest("DELETE", "/_plugins/_security/api/internalusers/$name") } @@ -1189,7 +1596,7 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { } fun createUserWithTestData(user: String, index: String, role: String, backendRole: String) { - createUser(user, user, arrayOf(backendRole)) + createUser(user, arrayOf(backendRole)) createTestIndex(index) createIndexRole(role, index) createUserRolesMapping(role, arrayOf(user)) @@ -1199,23 +1606,52 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { user: String, index: String, role: String, - backendRole: String, - clusterPermissions: String? + backendRoles: List, + clusterPermissions: String?, + ) { + createUser(user, backendRoles.toTypedArray()) + createTestIndex(index) + createCustomIndexRole(role, index, clusterPermissions) + createUserRolesMapping(role, arrayOf(user)) + } + + fun createUserWithTestDataAndCustomRole( + user: String, + index: String, + role: String, + backendRoles: List, + clusterPermissions: List, ) { - createUser(user, user, arrayOf(backendRole)) + createUser(user, backendRoles.toTypedArray()) createTestIndex(index) createCustomIndexRole(role, index, clusterPermissions) createUserRolesMapping(role, arrayOf(user)) } + fun createUserWithRoles( + user: String, + roles: List, + backendRoles: List, + isExistingRole: Boolean, + ) { + createUser(user, backendRoles.toTypedArray()) + for (role in roles) { + if (isExistingRole) { + updateRoleMapping(role, listOf(user), true) + } else { + createUserRolesMapping(role, arrayOf(user)) + } + } + } + fun createUserWithDocLevelSecurityTestData( user: String, index: String, role: String, backendRole: String, - dlsQuery: String + dlsQuery: String, ) { - createUser(user, user, arrayOf(backendRole)) + createUser(user, arrayOf(backendRole)) createTestIndex(index) createIndexRoleWithDocLevelSecurity(role, index, dlsQuery) createUserRolesMapping(role, arrayOf(user)) @@ -1227,9 +1663,9 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { role: String, backendRole: String, dlsQuery: String, - clusterPermissions: String? + clusterPermissions: String?, ) { - createUser(user, user, arrayOf(backendRole)) + createUser(user, arrayOf(backendRole)) createTestIndex(index) createIndexRoleWithDocLevelSecurity(role, index, dlsQuery) createCustomIndexRole(role, index, clusterPermissions) @@ -1279,4 +1715,83 @@ abstract class AlertingRestTestCase : ODFERestTestCase() { } } } + + protected fun createRandomWorkflow(monitorIds: List, refresh: Boolean = false): Workflow { + val workflow = randomWorkflow(monitorIds = monitorIds) + return createWorkflow(workflow, refresh) + } + + private fun createWorkflowEntityWithBackendRoles(workflow: Workflow, rbacRoles: List?): HttpEntity { + if (rbacRoles == null) { + return workflow.toHttpEntity() + } + val temp = workflow.toJsonString() + val toReplace = temp.lastIndexOf("}") + val rbacString = rbacRoles.joinToString { "\"$it\"" } + val jsonString = temp.substring(0, toReplace) + ", \"rbac_roles\": [$rbacString] }" + return StringEntity(jsonString, ContentType.APPLICATION_JSON) + } + + protected fun createWorkflowWithClient( + client: RestClient, + workflow: Workflow, + rbacRoles: List? = null, + refresh: Boolean = true, + ): Workflow { + val response = client.makeRequest( + "POST", "$WORKFLOW_ALERTING_BASE_URI?refresh=$refresh", emptyMap(), + createWorkflowEntityWithBackendRoles(workflow, rbacRoles) + ) + assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) + + val workflowJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + assertUserNull(workflowJson as HashMap) + return workflow.copy(id = workflowJson["_id"] as String) + } + + protected fun createWorkflow(workflow: Workflow, refresh: Boolean = true): Workflow { + return createWorkflowWithClient(client(), workflow, emptyList(), refresh) + } + + protected fun Workflow.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + private fun Workflow.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return shuffleXContent(toXContent(builder, ToXContent.EMPTY_PARAMS)).string() + } + + protected fun getWorkflow( + workflowId: String, + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): Workflow { + val response = client().makeRequest("GET", "$WORKFLOW_ALERTING_BASE_URI/$workflowId", null, header) + assertEquals("Unable to get workflow $workflowId", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) + + lateinit var id: String + var version: Long = 0 + lateinit var workflow: Workflow + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "_id" -> id = parser.text() + "_version" -> version = parser.longValue() + "workflow" -> workflow = Workflow.parse(parser) + } + } + + assertUserNull(workflow) + return workflow.copy(id = id, version = version) + } + + protected fun Workflow.relativeUrl() = "$WORKFLOW_ALERTING_BASE_URI/$id" } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt index d56b59366..479e29dca 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/DocumentMonitorRunnerIT.kt @@ -5,20 +5,28 @@ package org.opensearch.alerting +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.io.entity.StringEntity +import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_ALERT_INDEX_PATTERN import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_FINDING_INDEX_PATTERN -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.model.action.ActionExecutionPolicy -import org.opensearch.alerting.model.action.AlertCategory -import org.opensearch.alerting.model.action.PerAlertActionScope -import org.opensearch.alerting.model.action.PerExecutionActionScope import org.opensearch.client.Response import org.opensearch.client.ResponseException +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.core.rest.RestStatus import org.opensearch.script.Script import java.time.ZonedDateTime import java.time.format.DateTimeFormatter import java.time.temporal.ChronoUnit.MILLIS +import java.util.Locale class DocumentMonitorRunnerIT : AlertingRestTestCase() { @@ -33,7 +41,7 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { val index = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) @@ -76,7 +84,7 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { "test_field" : "us-west-2" }""" - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) @@ -108,7 +116,7 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { "test_field" : "us-west-2" }""" - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) @@ -139,6 +147,77 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) } + fun `test execute monitor with tag as trigger condition generates alerts and findings`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", tags = listOf("test_tag"), fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = Script("query[tag=test_tag]")) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + } + + fun `test execute monitor input error`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", tags = listOf("test_tag"), fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + deleteIndex(testIndex) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val inputResults = output.stringMap("input_results") + assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + assertEquals("Alert status is incorrect", Alert.State.ERROR, alerts[0].state) + } + fun `test execute monitor generates alerts and findings with per alert execution for actions`() { val testIndex = createTestIndex() val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) @@ -148,7 +227,7 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { "test_field" : "us-west-2" }""" - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val alertCategories = AlertCategory.values() @@ -196,6 +275,71 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { } } + refreshAllIndices() + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + } + + fun `test execute monitor generates alerts and findings with per trigger execution for actions`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val actionExecutionScope = PerExecutionActionScope() + val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) + val actions = (0..randomInt(10)).map { + randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = actionExecutionPolicy + ) + } + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) + + for (triggerResult in output.objectMap("trigger_results").values) { + assertEquals(2, triggerResult.objectMap("action_results").values.size) + for (alertActionResult in triggerResult.objectMap("action_results").values) { + assertEquals(actions.size, alertActionResult.values.size) + for (actionResult in alertActionResult.values) { + @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] + as Map + assertEquals("Hello ${monitor.name}", actionOutput["subject"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + } + val alerts = searchAlertsWithFilter(monitor) assertEquals("Alert saved for test monitor", 2, alerts.size) @@ -205,72 +349,625 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) } - fun `test execute monitor generates alerts and findings with per trigger execution for actions`() { - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" + fun `test execute monitor with wildcard index that generates alerts and findings for EQUALS query operator`() { + val testIndexPrefix = "test-index-${randomAlphaOfLength(10).lowercase(Locale.ROOT)}" + val testQueryName = "wildcard-test-query" + val testIndex = createTestIndex("${testIndexPrefix}1") + val testIndex2 = createTestIndex("${testIndexPrefix}2") + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = testQueryName, fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("$testIndexPrefix*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = Script("query[name=$testQueryName]")) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex2"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + val foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Didn't find findings for docs 1 and 5", 2, foundFindings.size) + } + + fun `test execute monitor for bulk index findings`() { + val testIndexPrefix = "test-index-${randomAlphaOfLength(10).lowercase(Locale.ROOT)}" + val testQueryName = "wildcard-test-query" + val testIndex = createTestIndex("${testIndexPrefix}1") + val testIndex2 = createTestIndex("${testIndexPrefix}2") + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = testQueryName, fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("$testIndexPrefix*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = Script("query[name=$testQueryName]")) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + for (i in 0 until 9) { + indexDoc(testIndex, i.toString(), testDoc) + } + indexDoc(testIndex2, "3", testDoc) + adminClient().updateSettings("plugins.alerting.alert_findings_indexing_batch_size", 2) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Correct search result", 10, matchingDocsToQuery.size) + assertTrue("Correct search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "2|$testIndex", "3|$testIndex2"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 10, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 10, findings.size) + val foundFindings = + findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("2") || it.relatedDocIds.contains("3") } + assertEquals("Found findings for all docs", 4, foundFindings.size) + } + + fun `test execute monitor with wildcard index that generates alerts and findings for NOT EQUALS query operator`() { + val testIndexPrefix = "test-index-${randomAlphaOfLength(10).lowercase(Locale.ROOT)}" + val testQueryName = "wildcard-test-query" + val testIndex = createTestIndex("${testIndexPrefix}1") + val testIndex2 = createTestIndex("${testIndexPrefix}2") + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "NOT (test_field:\"us-west-1\")", name = testQueryName, fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("$testIndexPrefix*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = Script("query[name=$testQueryName]")) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex2"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + val foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Didn't find findings for docs 1 and 5", 2, foundFindings.size) + } + + fun `test execute monitor with new index added after first execution that generates alerts and findings`() { + val testIndex = createTestIndex("test1") + val testIndex2 = createTestIndex("test2") + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "5", testDoc) + executeMonitor(monitor.id) + + var alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + var findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + var foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Findings saved for test monitor expected 1 and 5", 2, foundFindings.size) + + // clear previous findings and alerts + deleteIndex(ALL_FINDING_INDEX_PATTERN) + deleteIndex(ALL_ALERT_INDEX_PATTERN) + + val testIndex3 = createTestIndex("test3") + indexDoc(testIndex3, "10", testDoc) + indexDoc(testIndex, "14", testDoc) + indexDoc(testIndex2, "51", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 3, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("14|$testIndex", "51|$testIndex2", "10|$testIndex3"))) + + alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 3, alerts.size) + + findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 3, findings.size) + + foundFindings = findings.filter { + it.relatedDocIds.contains("14") || it.relatedDocIds.contains("51") || it.relatedDocIds.contains("10") + } + assertEquals("Findings saved for test monitor expected 14, 51 and 10", 3, foundFindings.size) + } + + fun `test execute monitor with indices having fields with same name but different data types`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "source.device.port": { "type": "long" }, + "source.device.hwd.id": { "type": "long" }, + "nested_field": { + "type": "nested", + "properties": { + "test1": { + "type": "keyword" + } + } + }, + "my_join_field": { + "type": "join", + "relations": { + "question": "answer" + } + }, + "test_field" : { "type" : "integer" } + } + """.trimIndent() + ) + var testDoc = """{ + "source" : { "device": {"port" : 12345 } }, + "nested_field": { "test1": "some text" }, + "test_field": 12345 + }""" + + val docQuery1 = DocLevelQuery( + query = "(source.device.port:12345 AND test_field:12345) OR source.device.hwd.id:12345", + name = "4", + fields = listOf() + ) + val docQuery2 = DocLevelQuery( + query = "(source.device.port:\"12345\" AND test_field:\"12345\") OR source.device.hwd.id:\"12345\"", + name = "5", + fields = listOf() + ) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + executeMonitor(monitor.id) + + var alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 1, alerts.size) + + var findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + + // clear previous findings and alerts + deleteIndex(ALL_FINDING_INDEX_PATTERN) + deleteIndex(ALL_ALERT_INDEX_PATTERN) + + indexDoc(testIndex, "2", testDoc) + + // no fields expanded as only index test1 is present + val oldExpectedQueries = listOf( + "(source.device.port_test__${monitor.id}:12345 AND test_field_test__${monitor.id}:12345) OR " + + "source.device.hwd.id_test__${monitor.id}:12345", + "(source.device.port_test__${monitor.id}:\"12345\" AND test_field_test__${monitor.id}:\"12345\") " + + "OR source.device.hwd.id_test__${monitor.id}:\"12345\"" + ) + + val request = """{ + "size": 10, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(oldExpectedQueries.contains(query)) + } + + val testIndex2 = createTestIndex( + "test2", + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent() + ) + testDoc = """{ + "source" : { "device": {"port" : "12345" } }, + "nested_field": { "test1": "some text" }, + "test_field": "12345" + }""" + indexDoc(testIndex2, "1", testDoc) + executeMonitor(monitor.id) + + // only fields source.device.port & test_field is expanded as they have same name but different data types + // in indices test1 & test2 + val newExpectedQueries = listOf( + "(source.device.port_test2_${monitor.id}:12345 AND test_field_test2_${monitor.id}:12345) " + + "OR source.device.hwd.id_test__${monitor.id}:12345", + "(source.device.port_test1_${monitor.id}:12345 AND test_field_test1_${monitor.id}:12345) " + + "OR source.device.hwd.id_test__${monitor.id}:12345", + "(source.device.port_test2_${monitor.id}:\"12345\" AND test_field_test2_${monitor.id}:\"12345\") " + + "OR source.device.hwd.id_test__${monitor.id}:\"12345\"", + "(source.device.port_test1_${monitor.id}:\"12345\" AND test_field_test1_${monitor.id}:\"12345\") " + + "OR source.device.hwd.id_test__${monitor.id}:\"12345\"" + ) + + alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(oldExpectedQueries.contains(query) || newExpectedQueries.contains(query)) + } + } + + fun `test execute monitor with indices having fields with same name but with different nesting`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "nested_field": { + "type": "nested", + "properties": { + "test1": { + "type": "keyword" + } + } + } + } + """.trimIndent() + ) + + val testIndex2 = createTestIndex( + "test2", + """"properties": { + "nested_field": { + "properties": { + "test1": { + "type": "keyword" + } + } + } + } + """.trimIndent() + ) + val testDoc = """{ + "nested_field": { "test1": "12345" } + }""" + + val docQuery = DocLevelQuery( + query = "nested_field.test1:\"12345\"", + name = "5", + fields = listOf() + ) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "1", testDoc) + + executeMonitor(monitor.id) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + // as mappings of source.id & test_field are different so, both of them expands + val expectedQueries = listOf( + "nested_field.test1_test__${monitor.id}:\"12345\"" + ) + + val request = """{ + "size": 10, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(expectedQueries.contains(query)) + } + } + + fun `test execute monitor with indices having fields with same name but different field mappings`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "source": { + "properties": { + "id": { + "type":"text", + "analyzer":"whitespace" + } + } + }, + "test_field" : { + "type":"text", + "analyzer":"whitespace" + } + } + """.trimIndent() + ) + + val testIndex2 = createTestIndex( + "test2", + """"properties": { + "source": { + "properties": { + "id": { + "type":"text" + } + } + }, + "test_field" : { + "type":"text" + } + } + """.trimIndent() + ) + val testDoc = """{ + "source" : {"id" : "12345" }, + "nested_field": { "test1": "some text" }, + "test_field": "12345" + }""" + + val docQuery = DocLevelQuery( + query = "test_field:\"12345\" AND source.id:\"12345\"", + name = "5", + fields = listOf() + ) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "1", testDoc) + + executeMonitor(monitor.id) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + // as mappings of source.id & test_field are different so, both of them expands + val expectedQueries = listOf( + "test_field_test2_${monitor.id}:\"12345\" AND source.id_test2_${monitor.id}:\"12345\"", + "test_field_test1_${monitor.id}:\"12345\" AND source.id_test1_${monitor.id}:\"12345\"" + ) + + val request = """{ + "size": 10, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(expectedQueries.contains(query)) + } + } + + fun `test execute monitor with indices having fields with same name but different field mappings in multiple indices`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "source": { + "properties": { + "device": { + "properties": { + "hwd": { + "properties": { + "id": { + "type":"text", + "analyzer":"whitespace" + } + } + } + } + } + } + }, + "test_field" : { + "type":"text" + } + } + """.trimIndent() + ) + + val testIndex2 = createTestIndex( + "test2", + """"properties": { + "test_field" : { + "type":"keyword" + } + } + """.trimIndent() + ) + + val testIndex4 = createTestIndex( + "test4", + """"properties": { + "source": { + "properties": { + "device": { + "properties": { + "hwd": { + "properties": { + "id": { + "type":"text" + } + } + } + } + } + } + }, + "test_field" : { + "type":"text" + } + } + """.trimIndent() + ) + + val testDoc1 = """{ + "source" : {"device" : {"hwd" : {"id" : "12345"}} }, + "nested_field": { "test1": "some text" } + }""" + val testDoc2 = """{ + "nested_field": { "test1": "some text" }, + "test_field": "12345" }""" - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val docQuery1 = DocLevelQuery( + query = "test_field:\"12345\"", + name = "4", + fields = listOf() + ) + val docQuery2 = DocLevelQuery( + query = "source.device.hwd.id:\"12345\"", + name = "5", + fields = listOf() + ) - val actionExecutionScope = PerExecutionActionScope() - val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) - val actions = (0..randomInt(10)).map { - randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - actionExecutionPolicy = actionExecutionPolicy - ) - } + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) assertNotNull(monitor.id) - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex, "5", testDoc) + indexDoc(testIndex4, "1", testDoc1) + indexDoc(testIndex2, "1", testDoc2) + indexDoc(testIndex, "1", testDoc1) + indexDoc(testIndex, "2", testDoc2) - val response = executeMonitor(monitor.id) + executeMonitor(monitor.id) - val output = entityAsMap(response) + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 4, alerts.size) - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 4, findings.size) - for (triggerResult in output.objectMap("trigger_results").values) { - assertEquals(2, triggerResult.objectMap("action_results").values.size) - for (alertActionResult in triggerResult.objectMap("action_results").values) { - assertEquals(actions.size, alertActionResult.values.size) - for (actionResult in alertActionResult.values) { - @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] - as Map - assertEquals("Hello ${monitor.name}", actionOutput["subject"]) - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } + val request = """{ + "size": 0, + "query": { + "match_all": {} } - } - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) + }""" + val httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.totalHits?.let { assertEquals(5L, it.value) } } - fun `test execute monitor with wildcard index that generates alerts and findings`() { + fun `test no of queries generated for document-level monitor based on wildcard indexes`() { val testIndex = createTestIndex("test1") - val testIndex2 = createTestIndex("test2") val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) val testDoc = """{ "message" : "This is an error from IAD region", @@ -278,7 +975,7 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { "test_field" : "us-west-2" }""" - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) @@ -286,30 +983,38 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { assertNotNull(monitor.id) indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex2, "5", testDoc) + executeMonitor(monitor.id) - val response = executeMonitor(monitor.id) + val request = """{ + "size": 0, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - val output = entityAsMap(response) + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.totalHits?.let { assertEquals(1L, it.value) } - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex2"))) + val testIndex2 = createTestIndex("test2") + indexDoc(testIndex2, "1", testDoc) + executeMonitor(monitor.id) - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) + httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.totalHits?.let { assertEquals(1L, it.value) } } - fun `test execute monitor with new index added after first execution that generates alerts and findings`() { + fun `test execute monitor with new index added after first execution that generates alerts and findings from new query`() { val testIndex = createTestIndex("test1") val testIndex2 = createTestIndex("test2") val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) @@ -319,8 +1024,9 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { "test_field" : "us-west-2" }""" - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + val docQuery1 = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_new:\"us-west-2\"", name = "4", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -335,23 +1041,22 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { var findings = searchFindings(monitor) assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue( - "Findings saved for test monitor expected 1 instead of ${findings[0].relatedDocIds}", - findings[0].relatedDocIds.contains("1") - ) - assertTrue( - "Findings saved for test monitor expected 51 instead of ${findings[1].relatedDocIds}", - findings[1].relatedDocIds.contains("5") - ) + + var foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Findings saved for test monitor expected 1 and 5", 2, foundFindings.size) // clear previous findings and alerts deleteIndex(ALL_FINDING_INDEX_PATTERN) deleteIndex(ALL_ALERT_INDEX_PATTERN) + val testDocNew = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field_new" : "us-west-2" + }""" + val testIndex3 = createTestIndex("test3") - indexDoc(testIndex3, "10", testDoc) - indexDoc(testIndex, "14", testDoc) - indexDoc(testIndex2, "51", testDoc) + indexDoc(testIndex3, "10", testDocNew) val response = executeMonitor(monitor.id) @@ -361,27 +1066,20 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { @Suppress("UNCHECKED_CAST") val searchResult = (output.objectMap("input_results")["results"] as List>).first() @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 3, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("14|$testIndex", "51|$testIndex2", "10|$testIndex3"))) + val matchingDocsToQuery = searchResult[docQuery2.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("10|$testIndex3"))) alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 3, alerts.size) + assertEquals("Alert saved for test monitor", 1, alerts.size) findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 3, findings.size) - assertTrue( - "Findings saved for test monitor expected 14 instead of ${findings[0].relatedDocIds}", - findings[0].relatedDocIds.contains("14") - ) - assertTrue( - "Findings saved for test monitor expected 51 instead of ${findings[1].relatedDocIds}", - findings[1].relatedDocIds.contains("51") - ) - assertTrue( - "Findings saved for test monitor expected 10 instead of ${findings[2].relatedDocIds}", - findings[2].relatedDocIds.contains("10") - ) + assertEquals("Findings saved for test monitor", 1, findings.size) + + foundFindings = findings.filter { + it.relatedDocIds.contains("10") + } + assertEquals("Findings saved for test monitor expected 10", 1, foundFindings.size) } fun `test document-level monitor when alias only has write index with 0 docs`() { @@ -579,6 +1277,403 @@ class DocumentMonitorRunnerIT : AlertingRestTestCase() { } } + fun `test document-level monitor when datastreams contain docs that do match query`() { + val dataStreamName = "test-datastream" + createDataStream( + dataStreamName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "2", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + deleteDataStream(dataStreamName) + } + + fun `test document-level monitor when datastreams contain docs across read-only indices that do match query`() { + val dataStreamName = "test-datastream" + createDataStream( + dataStreamName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + indexDoc(dataStreamName, "2", testDoc) + rolloverDatastream(dataStreamName) + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "4", testDoc) + rolloverDatastream(dataStreamName) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + + indexDoc(dataStreamName, "5", testDoc) + indexDoc(dataStreamName, "6", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + deleteDataStream(dataStreamName) + } + + fun `test document-level monitor when index alias contain docs that do match query`() { + val aliasName = "test-alias" + createIndexAlias( + aliasName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent() + ) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("$aliasName"), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(aliasName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + rolloverDatastream(aliasName) + indexDoc(aliasName, "2", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + deleteIndexAlias(aliasName) + } + + fun `test document-level monitor when multiple datastreams contain docs across read-only indices that do match query`() { + val dataStreamName1 = "test-datastream1" + createDataStream( + dataStreamName1, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + val dataStreamName2 = "test-datastream2" + createDataStream( + dataStreamName2, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName2, "-1", testDoc) + rolloverDatastream(dataStreamName2) + indexDoc(dataStreamName2, "0", testDoc) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("test-datastream*"), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + indexDoc(dataStreamName1, "1", testDoc) + indexDoc(dataStreamName2, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + + indexDoc(dataStreamName1, "2", testDoc) + indexDoc(dataStreamName2, "2", testDoc) + rolloverDatastream(dataStreamName1) + rolloverDatastream(dataStreamName1) + rolloverDatastream(dataStreamName2) + indexDoc(dataStreamName1, "4", testDoc) + indexDoc(dataStreamName2, "4", testDoc) + rolloverDatastream(dataStreamName1) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 4, matchingDocsToQuery.size) + + indexDoc(dataStreamName1, "5", testDoc) + indexDoc(dataStreamName1, "6", testDoc) + indexDoc(dataStreamName2, "5", testDoc) + indexDoc(dataStreamName2, "6", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 4, matchingDocsToQuery.size) + deleteDataStream(dataStreamName1) + deleteDataStream(dataStreamName2) + } + + fun `test document-level monitor ignoring old read-only indices for datastreams`() { + val dataStreamName = "test-datastream" + createDataStream( + dataStreamName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName, "-1", testDoc) + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "0", testDoc) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + indexDoc(dataStreamName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "2", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + deleteDataStream(dataStreamName) + } + + fun `test execute monitor with non-null data sources`() { + + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val alertCategories = AlertCategory.values() + val actionExecutionScope = PerAlertActionScope( + actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() + ) + val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) + val actions = (0..randomInt(10)).map { + randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = actionExecutionPolicy + ) + } + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + try { + createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + findingsIndex = "custom_findings_index", + alertsIndex = "custom_alerts_index", + ) + ) + ) + fail("Expected create monitor to fail") + } catch (e: ResponseException) { + assertTrue(e.message!!.contains("illegal_argument_exception")) + } + } + + fun `test execute monitor with indices removed after first run`() { + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val index1 = createTestIndex() + val index2 = createTestIndex() + val index4 = createTestIndex() + val index5 = createTestIndex() + + val docQuery = DocLevelQuery(query = "\"us-west-2\"", fields = listOf(), name = "3") + var docLevelInput = DocLevelMonitorInput("description", listOf(index1, index2, index4, index5), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + indexDoc(index1, "1", testDoc) + indexDoc(index2, "1", testDoc) + indexDoc(index4, "1", testDoc) + indexDoc(index5, "1", testDoc) + + var response = executeMonitor(monitor.id) + + var output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + + assertEquals(1, output.objectMap("trigger_results").values.size) + deleteIndex(index1) + deleteIndex(index2) + + indexDoc(index4, "2", testDoc) + response = executeMonitor(monitor.id) + + output = entityAsMap(response) + assertEquals(1, output.objectMap("trigger_results").values.size) + } + @Suppress("UNCHECKED_CAST") /** helper that returns a field in a json map whose values are all json objects */ private fun Map.objectMap(key: String): Map> { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt new file mode 100644 index 000000000..2a3527dd1 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorDataSourcesIT.kt @@ -0,0 +1,5921 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.junit.Assert +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.indices.alias.Alias +import org.opensearch.action.admin.indices.close.CloseIndexRequest +import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.admin.indices.open.OpenIndexRequest +import org.opensearch.action.admin.indices.refresh.RefreshRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.fieldcaps.FieldCapabilitiesRequest +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.alerting.transport.AlertingSingleNodeTestCase +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.alerting.util.DocLevelMonitorQueries.Companion.INDEX_PATTERN_SUFFIX +import org.opensearch.alerting.workflow.CompositeWorkflowRunner +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest +import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse +import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.action.GetAlertsResponse +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.action.SearchMonitorRequest +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ChainedMonitorFindings +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.DOC_LEVEL_QUERIES_INDEX +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.mapper.MapperService +import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.query.TermQueryBuilder +import org.opensearch.rest.RestRequest +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import java.time.Instant +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit +import java.time.temporal.ChronoUnit.MILLIS +import java.util.Collections +import java.util.Map +import java.util.UUID +import java.util.concurrent.ExecutionException +import java.util.concurrent.TimeUnit +import java.util.stream.Collectors + +class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { + + fun `test execute monitor with dryrun`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, true) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 0) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 0) + try { + client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, "wrong_alert_index")) + .get() + fail() + } catch (e: Exception) { + Assert.assertTrue(e.message!!.contains("IndexNotFoundException")) + } + } + + fun `test execute monitor with custom alerts index`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(alertsIndex = customAlertsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + val alerts = searchAlerts(id, customAlertsIndex) + assertEquals("Alert saved for test monitor", 1, alerts.size) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val alertId = getAlertsResponse.alerts.get(0).id + val acknowledgeAlertResponse = client().execute( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, + AcknowledgeAlertRequest(id, listOf(alertId), WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + Assert.assertEquals(acknowledgeAlertResponse.acknowledged.size, 1) + } + + fun `test mappings parsing`() { + + val index1 = "index_123" + val index2 = "index_456" + val index3 = "index_789" + val index4 = "index_012" + val q1 = DocLevelQuery(query = "properties:\"abcd\"", name = "1", fields = listOf()) + val q2 = DocLevelQuery(query = "type.properties:\"abcd\"", name = "2", fields = listOf()) + val q3 = DocLevelQuery(query = "type.something.properties:\"abcd\"", name = "3", fields = listOf()) + val q4 = DocLevelQuery(query = "type.something.properties.lastone:\"abcd\"", name = "4", fields = listOf()) + + createIndex(index1, Settings.EMPTY) + createIndex(index2, Settings.EMPTY) + createIndex(index3, Settings.EMPTY) + createIndex(index4, Settings.EMPTY) + + val m1 = """{ + "properties": { + "properties": { + "type": "keyword" + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index1).source(m1, XContentType.JSON)).get() + + val m2 = """{ + "properties": { + "type": { + "properties": { + "properties": { "type": "keyword" } + } + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index2).source(m2, XContentType.JSON)).get() + + val m3 = """{ + "properties": { + "type": { + "properties": { + "something": { + "properties" : { + "properties": { "type": "keyword" } + } + } + } + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index3).source(m3, XContentType.JSON)).get() + + val m4 = """{ + "properties": { + "type": { + "properties": { + "something": { + "properties" : { + "properties": { + "properties": { + "lastone": { "type": "keyword" } + } + } + } + } + } + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index4).source(m4, XContentType.JSON)).get() + + val docLevelInput = DocLevelMonitorInput( + "description", + listOf(index1, index2, index3, index4), + listOf(q1, q2, q3, q4) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + + val testDoc1 = """{ + "properties": "abcd" + }""" + indexDoc(index1, "1", testDoc1) + val testDoc2 = """{ + "type.properties": "abcd" + }""" + indexDoc(index2, "1", testDoc2) + val testDoc3 = """{ + "type.something.properties": "abcd" + }""" + indexDoc(index3, "1", testDoc3) + val testDoc4 = """{ + "type.something.properties.lastone": "abcd" + }""" + indexDoc(index4, "1", testDoc4) + + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 4, findings.size) + } + + fun `test execute monitor without triggers`() { + val docQuery = DocLevelQuery(query = "eventType:\"login\"", name = "3", fields = listOf()) + + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + val testDoc = """{ + "eventType" : "login" + }""" + indexDoc(index, "1", testDoc) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + // Execute dry run first and expect no alerts or findings + var executeMonitorResponse = executeMonitor(monitor, id, true) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.isEmpty()) + var findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 0, findings.size) + + // Execute real run - expect findings, but no alerts + executeMonitorResponse = executeMonitor(monitor, id, false) + + searchAlerts(id) + table = Table("asc", "id", null, 1, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.isEmpty()) + + findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertEquals("Didn't match query", 1, findings[0].docLevelQueries.size) + } + + fun `test execute monitor with custom query index`() { + val q1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val q2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val q3 = DocLevelQuery(query = "source.ip.v4.v0:120", name = "5", fields = listOf()) + val q4 = DocLevelQuery(query = "alias.some.fff:\"us-west-2\"", name = "6", fields = listOf()) + val q5 = DocLevelQuery(query = "message:\"This is an error from IAD region\"", name = "7", fields = listOf()) + val q6 = DocLevelQuery(query = "f1.type.f4:\"hello\"", name = "8", fields = listOf()) + val q7 = DocLevelQuery(query = "f1.type.f2.f3:\"world\"", name = "9", fields = listOf()) + val q8 = DocLevelQuery(query = "type:\"some type\"", name = "10", fields = listOf()) + val q9 = DocLevelQuery(query = "properties:123", name = "11", fields = listOf()) + + val docLevelInput = DocLevelMonitorInput( + "description", + listOf(index), + listOf(q1, q2, q3, q4, q5, q6, q7, q8, q9) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + // Trying to test here few different "nesting" situations and "wierd" characters + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v1" : 12345, + "source.ip.v6.v2" : 16645, + "source.ip.v4.v0" : 120, + "test_bad_char" : "\u0000", + "test_strict_date_time" : "$testTime", + "test_field.some_other_field" : "us-west-2", + "f1.type.f2.f3" : "world", + "f1.type.f4" : "hello", + "type" : "some type", + "properties": 123 + }""" + indexDoc(index, "1", testDoc) + client().admin().indices().putMapping( + PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") + ) + val mappings = "{\"properties\":{\"type\":{\"type\":\"text\",\"fields\":{\"keyword\":{\"type\":\"keyword\"," + + "\"ignore_above\":256}}},\"query\":{\"type\":\"text\"}}}" + val mappingsResp = client().admin().indices().putMapping( + PutMappingRequest(index).source(mappings, XContentType.JSON) + ).get() + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertEquals("Didn't match all 9 queries", 9, findings[0].docLevelQueries.size) + } + + fun `test execute monitor with non-flattened json doc as source`() { + val docQuery1 = DocLevelQuery(query = "source.device.port:12345 OR source.device.hwd.id:12345", name = "3", fields = listOf()) + + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + + val mappings = """{ + "properties": { + "source.device.port": { "type": "long" }, + "source.device.hwd.id": { "type": "long" }, + "nested_field": { + "type": "nested", + "properties": { + "test1": { + "type": "keyword" + } + } + }, + "my_join_field": { + "type": "join", + "relations": { + "question": "answer" + } + } + } + }""" + + client().admin().indices().putMapping(PutMappingRequest(index).source(mappings, XContentType.JSON)).get() + val getFieldCapabilitiesResp = client().fieldCaps(FieldCapabilitiesRequest().indices(index).fields("*")).get() + assertTrue(getFieldCapabilitiesResp.getField("source").containsKey("object")) + assertTrue(getFieldCapabilitiesResp.getField("source.device").containsKey("object")) + assertTrue(getFieldCapabilitiesResp.getField("source.device.hwd").containsKey("object")) + // testing both, nested and flatten documents + val testDocuments = mutableListOf() + testDocuments += """{ + "source" : { "device": {"port" : 12345 } }, + "nested_field": { "test1": "some text" } + }""" + testDocuments += """{ + "source.device.port" : "12345" + }""" + testDocuments += """{ + "source.device.port" : 12345 + }""" + testDocuments += """{ + "source" : { "device": {"hwd": { "id": 12345 } } } + }""" + testDocuments += """{ + "source.device.hwd.id" : 12345 + }""" + // Document with join field + testDocuments += """{ + "source" : { "device" : { "hwd": { "id" : 12345 } } }, + "my_join_field": { "name": "question" } + }""" + // Checking if these pointless but valid documents cause any issues + testDocuments += """{ + "source" : {} + }""" + testDocuments += """{ + "source.device" : null + }""" + testDocuments += """{ + "source.device" : {} + }""" + testDocuments += """{ + "source.device.hwd" : {} + }""" + testDocuments += """{ + "source.device.hwd.id" : null + }""" + testDocuments += """{ + "some.multi.val.field" : [12345, 10, 11] + }""" + // Insert all documents + for (i in testDocuments.indices) { + indexDoc(index, "$i", testDocuments[i]) + } + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 6, findings.size) + assertEquals("Didn't match query", 1, findings[0].docLevelQueries.size) + } + + fun `test execute monitor with custom query index old`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docQuery3 = DocLevelQuery(query = "source.ip.v4.v0:120", name = "5", fields = listOf()) + val docQuery4 = DocLevelQuery(query = "alias.some.fff:\"us-west-2\"", name = "6", fields = listOf()) + val docQuery5 = DocLevelQuery(query = "message:\"This is an error from IAD region\"", name = "7", fields = listOf()) + val docQuery6 = DocLevelQuery(query = "type.subtype:\"some subtype\"", name = "8", fields = listOf()) + val docQuery7 = DocLevelQuery(query = "supertype.type:\"some type\"", name = "9", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1, docQuery2, docQuery3, docQuery4, docQuery5, docQuery6, docQuery7) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + // Trying to test here few different "nesting" situations and "wierd" characters + val testDoc = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v1" : 12345, + "source.ip.v6.v2" : 16645, + "source.ip.v4.v0" : 120, + "test_bad_char" : "\u0000", + "test_strict_date_time" : "$testTime", + "test_field.some_other_field" : "us-west-2", + "type.subtype" : "some subtype", + "supertype.type" : "some type" + }""" + indexDoc(index, "1", testDoc) + client().admin().indices().putMapping( + PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") + ) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertEquals("Didn't match all 7 queries", 7, findings[0].docLevelQueries.size) + } + + fun `test monitor error alert created and updated with new error`() { + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val testDoc = """{ + "message" : "This is an error from IAD region" + }""" + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + + // Close index to force error alert + client().admin().indices().close(CloseIndexRequest(index)).get() + + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") + // Reopen index + client().admin().indices().open(OpenIndexRequest(index)).get() + // Close queryIndex + client().admin().indices().close(CloseIndexRequest(DOC_LEVEL_QUERIES_INDEX + INDEX_PATTERN_SUFFIX)).get() + + indexDoc(index, "1", testDoc) + + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertTrue(getAlertsResponse.alerts[0].errorHistory[0].message == "IndexClosedException[closed]") + Assert.assertEquals(1, getAlertsResponse.alerts[0].errorHistory.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Failed to run percolate search")) + } + + fun `test monitor error alert created trigger run errored 2 times same error`() { + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = Script("invalid script code")) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Trigger errors")) + + val oldLastNotificationTime = getAlertsResponse.alerts[0].lastNotificationTime + + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertEquals(0, getAlertsResponse.alerts[0].errorHistory.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Trigger errors")) + Assert.assertTrue(getAlertsResponse.alerts[0].lastNotificationTime!!.isAfter(oldLastNotificationTime)) + } + + fun `test monitor error alert cleared after successful monitor run`() { + val customAlertIndex = "custom-alert-index" + val customAlertHistoryIndex = "custom-alert-history-index" + val customAlertHistoryIndexPattern = "" + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndex = customAlertHistoryIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern + ) + ) + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + + // Close index to force error alert + client().admin().indices().close(CloseIndexRequest(index)).get() + + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(1, getAlertsResponse.alerts.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") + Assert.assertNull(getAlertsResponse.alerts[0].endTime) + + // Open index to have monitor run successfully + client().admin().indices().open(OpenIndexRequest(index)).get() + // Execute monitor again and expect successful run + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Verify that alert is moved to history index + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(0, getAlertsResponse.alerts.size) + + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertHistoryIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(1, getAlertsResponse.alerts.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") + Assert.assertNotNull(getAlertsResponse.alerts[0].endTime) + } + + fun `test multiple monitor error alerts cleared after successful monitor run`() { + val customAlertIndex = "custom-alert-index" + val customAlertHistoryIndex = "custom-alert-history-index" + val customAlertHistoryIndexPattern = "" + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndex = customAlertHistoryIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern + ) + ) + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val monitorId = monitorResponse.id + + // Close index to force error alert + client().admin().indices().close(CloseIndexRequest(index)).get() + + var executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + // Create 10 old alerts to simulate having "old error alerts"(2.6) + for (i in 1..10) { + val startTimestamp = Instant.now().minusSeconds(3600 * 24 * i.toLong()).toEpochMilli() + val oldErrorAlertAsString = """ + {"id":"$i","version":-1,"monitor_id":"$monitorId", + "schema_version":4,"monitor_version":1,"monitor_name":"geCNcHKTlp","monitor_user":{"name":"","backend_roles":[], + "roles":[],"custom_attribute_names":[],"user_requested_tenant":null},"trigger_id":"_nnk_YcB5pHgSZwYwO2r", + "trigger_name":"NoOp trigger","finding_ids":[],"related_doc_ids":[],"state":"ERROR","error_message":"some monitor error", + "alert_history":[],"severity":"","action_execution_results":[], + "start_time":$startTimestamp,"last_notification_time":$startTimestamp,"end_time":null,"acknowledged_time":null} + """.trimIndent() + + client().index( + IndexRequest(customAlertIndex) + .id("$i") + .routing(monitorId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(oldErrorAlertAsString, XContentType.JSON) + ).get() + } + var table = Table("asc", "id", null, 1000, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertIndex)) + .get() + + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(1 + 10, getAlertsResponse.alerts.size) + val newErrorAlert = getAlertsResponse.alerts.firstOrNull { it.errorMessage == "IndexClosedException[closed]" } + Assert.assertNotNull(newErrorAlert) + Assert.assertNull(newErrorAlert!!.endTime) + + // Open index to have monitor run successfully + client().admin().indices().open(OpenIndexRequest(index)).get() + // Execute monitor again and expect successful run + executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Verify that alert is moved to history index + table = Table("asc", "id", null, 1000, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(0, getAlertsResponse.alerts.size) + + table = Table("asc", "id", null, 1000, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertHistoryIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(11, getAlertsResponse.alerts.size) + getAlertsResponse.alerts.forEach { alert -> assertNotNull(alert.endTime) } + } + + fun `test execute monitor with custom query index and nested mappings`() { + val docQuery1 = DocLevelQuery(query = "message:\"msg 1 2 3 4\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + + // We are verifying here that index with nested mappings and nested aliases + // won't break query matching + + // Create index mappings + val m: MutableMap = HashMap() + val m1: MutableMap = HashMap() + m1["title"] = Map.of("type", "text") + m1["category"] = Map.of("type", "keyword") + m["rule"] = Map.of("type", "nested", "properties", m1) + val properties = Map.of("properties", m) + + client().admin().indices().putMapping( + PutMappingRequest( + index + ).source(properties) + ).get() + + // Put alias for nested fields + val mm: MutableMap = HashMap() + val mm1: MutableMap = HashMap() + mm1["title_alias"] = Map.of("type", "alias", "path", "rule.title") + mm["rule"] = Map.of("type", "nested", "properties", mm1) + val properties1 = Map.of("properties", mm) + client().admin().indices().putMapping( + PutMappingRequest( + index + ).source(properties1) + ).get() + + val testDoc = """{ + "rule": {"title": "some_title"}, + "message": "msg 1 2 3 4" + }""" + indexDoc(index, "2", testDoc) + + client().admin().indices().putMapping( + PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") + ) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + assertEquals("Didn't match all 4 queries", 1, findings[0].docLevelQueries.size) + } + + fun `test cleanup monitor on partial create monitor failure`() { + val docQuery = DocLevelQuery(query = "dnbkjndsfkjbnds:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_alerts_index" + val analyzer = "dfbdfbafd" + val testDoc = """{ + "rule": {"title": "some_title"}, + "message": "msg 1 2 3 4" + }""" + indexDoc(index, "2", testDoc) + client().admin().indices() + .create( + CreateIndexRequest(customQueryIndex + "-000001").alias(Alias(customQueryIndex)) + .mapping( + """ + { + "_meta": { + "schema_version": 1 + }, + "properties": { + "query": { + "type": "percolator_ext" + }, + "monitor_id": { + "type": "text" + }, + "index": { + "type": "text" + } + } + } + """.trimIndent() + ) + ).get() + + client().admin().indices().close(CloseIndexRequest(customQueryIndex + "-000001")).get() + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + try { + createMonitor(monitor) + fail("monitor creation should fail due to incorrect analyzer name in test setup") + } catch (e: Exception) { + Assert.assertEquals(client().search(SearchRequest(SCHEDULED_JOBS_INDEX)).get().hits.hits.size, 0) + } + } + + fun `test execute monitor without create when no monitors exists`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_alerts_index" + val analyzer = "whitespace" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + var executeMonitorResponse = executeMonitor(monitor, null) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + assertIndexNotExists(SCHEDULED_JOBS_INDEX) + + val createMonitorResponse = createMonitor(monitor) + + assertIndexExists(SCHEDULED_JOBS_INDEX) + + indexDoc(index, "1", testDoc) + + executeMonitorResponse = executeMonitor(monitor, createMonitorResponse?.id, dryRun = false) + + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + Assert.assertEquals( + (executeMonitorResponse.monitorRunResult.triggerResults.iterator().next().value as DocumentLevelTriggerRunResult) + .triggeredDocs.size, + 1 + ) + } + + fun `test execute monitor with custom query index and custom field mappings`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_alerts_index" + val analyzer = "whitespace" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val mapping = client().admin().indices().getMappings(GetMappingsRequest().indices(customQueryIndex)).get() + Assert.assertTrue(mapping.toString().contains("\"analyzer\":\"$analyzer\"")) + } + + fun `test delete monitor deletes all queries and metadata too`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_query_index" + val analyzer = "whitespace" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val clusterStateResponse = client().admin().cluster().state(ClusterStateRequest().indices(customQueryIndex).metadata(true)).get() + val mapping = client().admin().indices().getMappings(GetMappingsRequest().indices(customQueryIndex)).get() + Assert.assertTrue(mapping.toString().contains("\"analyzer\":\"$analyzer\"") == true) + // Verify queries exist + var searchResponse = client().search( + SearchRequest(customQueryIndex).source(SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + ).get() + assertNotEquals(0, searchResponse.hits.hits.size) + + deleteMonitor(monitorId) + assertIndexNotExists(customQueryIndex + "*") + assertAliasNotExists(customQueryIndex) + } + + fun `test execute monitor with custom findings index and pattern`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + + indexDoc(index, "2", testDoc) + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("2")) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute monitor with multiple indices in input success`() { + + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + + createIndex(testSourceIndex1, Settings.EMPTY) + createIndex(testSourceIndex2, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + indexDoc(testSourceIndex1, "1", testDoc) + indexDoc(testSourceIndex2, "1", testDoc) + + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + var foundFindings = findings.filter { it.relatedDocIds.contains("1") } + assertEquals("Didn't find 2 findings", 2, foundFindings.size) + + indexDoc(testSourceIndex1, "2", testDoc) + indexDoc(testSourceIndex2, "2", testDoc) + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 4, findings.size) + foundFindings = findings.filter { it.relatedDocIds.contains("2") } + assertEquals("Didn't find 2 findings", 2, foundFindings.size) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute monitor with multiple indices in input first index gets deleted`() { + // Index #1 does not exist + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + + createIndex(testSourceIndex1, Settings.EMPTY) + createIndex(testSourceIndex2, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + indexDoc(testSourceIndex2, "1", testDoc) + + client().admin().indices().delete(DeleteIndexRequest(testSourceIndex1)).get() + + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 1, findings.size) + var foundFindings = findings.filter { it.relatedDocIds.contains("1") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + indexDoc(testSourceIndex2, "2", testDoc) + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + foundFindings = findings.filter { it.relatedDocIds.contains("2") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute monitor with multiple indices in input second index gets deleted`() { + // Second index does not exist + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + + createIndex(testSourceIndex1, Settings.EMPTY) + createIndex(testSourceIndex2, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + indexDoc(testSourceIndex1, "1", testDoc) + + client().admin().indices().delete(DeleteIndexRequest(testSourceIndex2)).get() + + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 1, findings.size) + var foundFindings = findings.filter { it.relatedDocIds.contains("1") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + indexDoc(testSourceIndex1, "2", testDoc) + + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + foundFindings = findings.filter { it.relatedDocIds.contains("2") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute pre-existing monitor and update`() { + val request = CreateIndexRequest(SCHEDULED_JOBS_INDEX).mapping(ScheduledJobIndices.scheduledJobMappings()) + .settings(Settings.builder().put("index.hidden", true).build()) + client().admin().indices().create(request) + val monitorStringWithoutName = """ + { + "monitor": { + "type": "monitor", + "schema_version": 0, + "name": "UayEuXpZtb", + "monitor_type": "doc_level_monitor", + "user": { + "name": "", + "backend_roles": [], + "roles": [], + "custom_attribute_names": [], + "user_requested_tenant": null + }, + "enabled": true, + "enabled_time": 1662753436791, + "schedule": { + "period": { + "interval": 5, + "unit": "MINUTES" + } + }, + "inputs": [{ + "doc_level_input": { + "description": "description", + "indices": [ + "$index" + ], + "queries": [{ + "id": "63efdcce-b5a1-49f4-a25f-6b5f9496a755", + "name": "3", + "query": "test_field:\"us-west-2\"", + "tags": [] + }] + } + }], + "triggers": [{ + "document_level_trigger": { + "id": "OGnTI4MBv6qt0ATc9Phk", + "name": "mrbHRMevYI", + "severity": "1", + "condition": { + "script": { + "source": "return true", + "lang": "painless" + } + }, + "actions": [] + } + }], + "last_update_time": 1662753436791 + } + } + """.trimIndent() + val monitorId = "abc" + indexDoc(SCHEDULED_JOBS_INDEX, monitorId, monitorStringWithoutName) + val getMonitorResponse = getMonitorResponse(monitorId) + Assert.assertNotNull(getMonitorResponse) + Assert.assertNotNull(getMonitorResponse.monitor) + val monitor = getMonitorResponse.monitor + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(index, "1", testDoc) + var executeMonitorResponse = executeMonitor(monitor!!, monitorId, false) + Assert.assertNotNull(executeMonitorResponse) + if (executeMonitorResponse != null) { + Assert.assertNotNull(executeMonitorResponse.monitorRunResult.monitorName) + } + val alerts = searchAlerts(monitorId) + assertEquals(1, alerts.size) + + val customAlertsIndex = "custom_alerts_index" + val customQueryIndex = "custom_query_index" + val customFindingsIndex = "custom_findings_index" + val updateMonitorResponse = updateMonitor( + monitor.copy( + id = monitorId, + owner = "security_analytics_plugin", + dataSources = DataSources( + alertsIndex = customAlertsIndex, + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex + ) + ), + monitorId + ) + Assert.assertNotNull(updateMonitorResponse) + Assert.assertEquals(updateMonitorResponse!!.monitor.owner, "security_analytics_plugin") + indexDoc(index, "2", testDoc) + if (updateMonitorResponse != null) { + executeMonitorResponse = executeMonitor(updateMonitorResponse.monitor, monitorId, false) + } + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + val customAlertsIndexAlerts = searchAlerts(monitorId, customAlertsIndex) + assertEquals("Alert saved for test monitor", 1, customAlertsIndexAlerts.size) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX) + var searchMonitorResponse = + client().execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, SearchMonitorRequest(searchRequest)) + .get() + Assert.assertEquals(searchMonitorResponse.hits.hits.size, 0) + searchRequest.source().query(MatchQueryBuilder("monitor.owner", "security_analytics_plugin")) + searchMonitorResponse = + client().execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, SearchMonitorRequest(searchRequest)) + .get() + Assert.assertEquals(searchMonitorResponse.hits.hits.size, 1) + } + + fun `test execute pre-existing monitor without triggers`() { + val request = CreateIndexRequest(SCHEDULED_JOBS_INDEX).mapping(ScheduledJobIndices.scheduledJobMappings()) + .settings(Settings.builder().put("index.hidden", true).build()) + client().admin().indices().create(request) + val monitorStringWithoutName = """ + { + "monitor": { + "type": "monitor", + "schema_version": 0, + "name": "UayEuXpZtb", + "monitor_type": "doc_level_monitor", + "user": { + "name": "", + "backend_roles": [], + "roles": [], + "custom_attribute_names": [], + "user_requested_tenant": null + }, + "enabled": true, + "enabled_time": 1662753436791, + "schedule": { + "period": { + "interval": 5, + "unit": "MINUTES" + } + }, + "inputs": [{ + "doc_level_input": { + "description": "description", + "indices": [ + "$index" + ], + "queries": [{ + "id": "63efdcce-b5a1-49f4-a25f-6b5f9496a755", + "name": "3", + "query": "test_field:\"us-west-2\"", + "tags": [] + }] + } + }], + "triggers": [], + "last_update_time": 1662753436791 + } + } + """.trimIndent() + val monitorId = "abc" + indexDoc(SCHEDULED_JOBS_INDEX, monitorId, monitorStringWithoutName) + val getMonitorResponse = getMonitorResponse(monitorId) + Assert.assertNotNull(getMonitorResponse) + Assert.assertNotNull(getMonitorResponse.monitor) + val monitor = getMonitorResponse.monitor + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(index, "1", testDoc) + var executeMonitorResponse = executeMonitor(monitor!!, monitorId, false) + Assert.assertNotNull(executeMonitorResponse) + if (executeMonitorResponse != null) { + Assert.assertNotNull(executeMonitorResponse.monitorRunResult.monitorName) + } + val alerts = searchAlerts(monitorId) + assertEquals(0, alerts.size) + + val findings = searchFindings(monitorId) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + } + + fun `test execute monitor with empty source index`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + val monitorId = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, monitorId, false) + + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + refreshIndex(customFindingsIndex) + + var findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 0, findings.size) + + indexDoc(index, "1", testDoc) + + executeMonitor(monitor, monitorId, false) + + refreshIndex(customFindingsIndex) + + findings = searchFindings(monitorId, customFindingsIndex) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + } + + fun `test execute GetFindingsAction with monitorId param`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + // fetch findings - pass monitorId as reference to finding_index + val findingsFromAPI = getFindings(findings.get(0).id, monitorId, null) + assertEquals( + "Findings mismatch between manually searched and fetched via GetFindingsAction", + findings.get(0).id, + findingsFromAPI.get(0).id + ) + } + + fun `test execute GetFindingsAction with unknown monitorId`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + // fetch findings - don't send monitorId or findingIndexName. It should fall back to hardcoded finding index name + try { + getFindings(findings.get(0).id, "unknown_monitor_id_123456789", null) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("Monitor not found") + ) + } + } + } + + fun `test execute monitor with owner field`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(alertsIndex = customAlertsIndex), + owner = "owner" + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + Assert.assertEquals(monitor.owner, "owner") + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + val alerts = searchAlerts(id, customAlertsIndex) + assertEquals("Alert saved for test monitor", 1, alerts.size) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + } + + fun `test execute GetFindingsAction with unknown findingIndex param`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + // fetch findings - don't send monitorId or findingIndexName. It should fall back to hardcoded finding index name + try { + getFindings(findings.get(0).id, null, "unknown_finding_index_123456789") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("no such index") + ) + } + } + } + + fun `test search custom alerts history index`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger1, trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + var alertsBefore = searchAlerts(monitorId, customAlertsIndex) + Assert.assertEquals(2, alertsBefore.size) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) + // Remove 1 trigger from monitor to force moveAlerts call to move alerts to history index + monitor = monitor.copy(triggers = listOf(trigger1)) + updateMonitor(monitor, monitorId) + + var alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsHistoryIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 1, alerts.size) + } + + fun `test search custom alerts history index after alert ack`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger1, trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + var alertsBefore = searchAlerts(monitorId, customAlertsIndex) + Assert.assertEquals(2, alertsBefore.size) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) + + var alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom index", 2, alerts.size) + + val ackReq = AcknowledgeAlertRequest(monitorId, alerts.map { it.id }.toMutableList(), WriteRequest.RefreshPolicy.IMMEDIATE) + client().execute(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, ackReq).get() + + // verify alerts moved from alert index to alert history index + alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsHistoryIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 2, alerts.size) + + // verify alerts deleted from alert index + alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 0, alerts.size) + } + + fun `test get alerts by list of monitors containing both existent and non-existent ids`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + monitor = monitorResponse!!.monitor + + val id = monitorResponse.id + + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + val monitorResponse1 = createMonitor(monitor1) + monitor1 = monitorResponse1!!.monitor + val id1 = monitorResponse1.id + indexDoc(index, "1", testDoc) + executeMonitor(monitor1, id1, false) + executeMonitor(monitor, id, false) + val alerts = searchAlerts(id) + assertEquals("Alert saved for test monitor", 1, alerts.size) + val alerts1 = searchAlerts(id) + assertEquals("Alert saved for test monitor", 1, alerts1.size) + val table = Table("asc", "id", null, 1000, 0, "") + var getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null) + ) + .get() + + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 2) + + var alertsResponseForRequestWithoutCustomIndex = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, monitorIds = listOf(id, id1, "1", "2")) + ) + .get() + Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex != null) + Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex.alerts.size == 2) + val alertIds = getAlertsResponse.alerts.stream().map { alert -> alert.id }.collect(Collectors.toList()) + var getAlertsByAlertIds = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = alertIds) + ) + .get() + Assert.assertTrue(getAlertsByAlertIds != null) + Assert.assertTrue(getAlertsByAlertIds.alerts.size == 2) + + var getAlertsByWrongAlertIds = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = listOf("1", "2")) + ) + .get() + + Assert.assertTrue(getAlertsByWrongAlertIds != null) + Assert.assertEquals(getAlertsByWrongAlertIds.alerts.size, 0) + } + + fun `test queryIndex rollover and delete monitor success`() { + + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create close to 1000 (limit) fields in index mapping. It's easier to add mappings like this then via api + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..3300) { + docPayload.append(""" "id$i.somefield.somefield$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor #1 + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + // Execute monitor #1 + var executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Create monitor #2 + var monitorResponse2 = createMonitor(monitor) + assertFalse(monitorResponse2?.id.isNullOrEmpty()) + monitor = monitorResponse2!!.monitor + // Insert doc #2. This one should trigger creation of alerts during monitor exec + val testDoc = """{ + "test_field" : "us-west-2" + }""" + indexDoc(testSourceIndex, "2", testDoc) + // Execute monitor #2 + var executeMonitorResponse2 = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse2!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse2.monitorRunResult.triggerResults.size, 1) + + refreshIndex(AlertIndices.ALERT_INDEX) + var alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 1) + + // Both monitors used same queryIndex alias. Since source index has close to limit amount of fields in mappings, + // we expect that creation of second monitor would trigger rollover of queryIndex + var getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(2, getIndexResponse.indices.size) + assertEquals(DOC_LEVEL_QUERIES_INDEX + "-000001", getIndexResponse.indices[0]) + assertEquals(DOC_LEVEL_QUERIES_INDEX + "-000002", getIndexResponse.indices[1]) + // Now we'll verify that execution of both monitors still works + indexDoc(testSourceIndex, "3", testDoc) + // Exec Monitor #1 + executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + // Exec Monitor #2 + executeMonitorResponse = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + // Delete monitor #1 + client().execute( + AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorResponse.id, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + // Expect first concrete queryIndex to be deleted since that one was only used by this monitor + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(1, getIndexResponse.indices.size) + assertEquals(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "-000002", getIndexResponse.indices[0]) + // Delete monitor #2 + client().execute( + AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorResponse2.id, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + // Expect second concrete queryIndex to be deleted since that one was only used by this monitor + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(0, getIndexResponse.indices.size) + } + + fun `test queryIndex rollover failure source_index field count over limit`() { + + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create 999 fields in mapping, only 1 field less then limit + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..998) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor and expect failure. + // queryIndex has 3 fields in mappings initially so 999 + 3 > 1000(default limit) + try { + createMonitor(monitor) + } catch (e: Exception) { + assertTrue(e.message?.contains("can't process index [$testSourceIndex] due to field mapping limit") ?: false) + } + } + + fun `test queryIndex not rolling over multiple monitors`() { + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // Create doc with 11 fields + val docPayload: StringBuilder = StringBuilder(1000) + docPayload.append("{") + for (i in 1..10) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor #1 + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + // Execute monitor #1 + var executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Create monitor #2 + var monitorResponse2 = createMonitor(monitor) + assertFalse(monitorResponse2?.id.isNullOrEmpty()) + monitor = monitorResponse2!!.monitor + // Insert doc #2. This one should trigger creation of alerts during monitor exec + val testDoc = """{ + "test_field" : "us-west-2" + }""" + indexDoc(testSourceIndex, "2", testDoc) + // Execute monitor #2 + var executeMonitorResponse2 = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse2!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse2.monitorRunResult.triggerResults.size, 1) + + refreshIndex(AlertIndices.ALERT_INDEX) + var alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 1) + + // Both monitors used same queryIndex. Since source index has well below limit amount of fields in mappings, + // we expect only 1 backing queryIndex + val getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(1, getIndexResponse.indices.size) + // Now we'll verify that execution of both monitors work + indexDoc(testSourceIndex, "3", testDoc) + // Exec Monitor #1 + executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + // Exec Monitor #2 + executeMonitorResponse = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + } + + /** + * 1. Create monitor with input source_index with 9000 fields in mappings - can fit 1 in queryIndex + * 2. Update monitor and change input source_index to a new one with 9000 fields in mappings + * 3. Expect queryIndex rollover resulting in 2 backing indices + * 4. Delete monitor and expect that all backing indices are deleted + * */ + fun `test updating monitor no execution queryIndex rolling over`() { + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + createIndex(testSourceIndex1, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + createIndex(testSourceIndex2, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create close to 10000 (limit) fields in index mapping. It's easier to add mappings like this then via api + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..9000) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + // Indexing docs here as an easier means to set index mappings + indexDoc(testSourceIndex1, "1", docPayload.toString()) + indexDoc(testSourceIndex2, "1", docPayload.toString()) + // Create monitor + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + // Update monitor and change input + val updatedMonitor = monitor.copy( + inputs = listOf( + DocLevelMonitorInput("description", listOf(testSourceIndex2), listOf(docQuery)) + ) + ) + updateMonitor(updatedMonitor, updatedMonitor.id) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + // Expect queryIndex to rollover after setting new source_index with close to limit amount of fields in mappings + var getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(2, getIndexResponse.indices.size) + + deleteMonitor(updatedMonitor.id) + waitUntil { + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + return@waitUntil getIndexResponse.indices.isEmpty() + } + assertEquals(0, getIndexResponse.indices.size) + } + + fun `test queryIndex gets increased max fields in mappings`() { + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create 12000 fields in index mapping. It's easier to add mappings like this then via api + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..9998) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + // Indexing docs here as an easier means to set index mappings + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + // Expect queryIndex to rollover after setting new source_index with close to limit amount of fields in mappings + var getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(1, getIndexResponse.indices.size) + val field_max_limit = getIndexResponse + .getSetting(DOC_LEVEL_QUERIES_INDEX + "-000001", MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key).toInt() + + assertEquals(10000 + DocLevelMonitorQueries.QUERY_INDEX_BASE_FIELDS_COUNT, field_max_limit) + + deleteMonitor(monitorResponse.id) + waitUntil { + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + return@waitUntil getIndexResponse.indices.isEmpty() + } + assertEquals(0, getIndexResponse.indices.size) + } + + fun `test queryIndex bwc when index was not an alias`() { + createIndex(DOC_LEVEL_QUERIES_INDEX, Settings.builder().put("index.hidden", true).build()) + assertIndexExists(DOC_LEVEL_QUERIES_INDEX) + + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create 999 fields in mapping, only 1 field less then limit + val docPayload = "{\"test_field\" : \"us-west-2\" }" + // Create monitor + try { + var monitorResponse = createMonitor(monitor) + indexDoc(testSourceIndex, "1", docPayload) + var executeMonitorResponse = executeMonitor(monitor, monitorResponse!!.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + val alerts = searchAlerts(monitorResponse.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 1) + // check if DOC_LEVEL_QUERIES_INDEX alias exists + assertAliasExists(DOC_LEVEL_QUERIES_INDEX) + } catch (e: Exception) { + fail("Exception happend but it shouldn't!") + } + } + + // TODO - revisit single node integ tests setup to figure out why we cannot have multiple test classes implementing it + + fun `test execute workflow with custom alerts and finding index when bucket monitor is used in chained finding of doc monitor`() { + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong + // to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + val bucketCustomAlertsIndex = "custom_alerts_index" + val bucketCustomFindingsIndex = "custom_findings_index" + val bucketCustomFindingsIndexPattern = "custom_findings_index-1" + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = bucketCustomAlertsIndex, + findingsIndex = bucketCustomFindingsIndex, + findingsIndexPattern = bucketCustomFindingsIndexPattern + ) + ) + )!! + + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_1\"", name = "2", fields = listOf()) + val docQuery3 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2, docQuery3)) + val docTrigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val docCustomAlertsIndex = "custom_alerts_index" + val docCustomFindingsIndex = "custom_findings_index" + val docCustomFindingsIndexPattern = "custom_findings_index-1" + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(docTrigger), + dataSources = DataSources( + alertsIndex = docCustomAlertsIndex, + findingsIndex = docCustomFindingsIndex, + findingsIndexPattern = docCustomFindingsIndexPattern + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, docLevelMonitorResponse.id), + enabled = false, + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + if (bucketLevelMonitorResponse.monitor.name == monitorRunResults.monitorName) { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 3, buckets.size) + + val getAlertsResponse = assertAlerts(bucketLevelMonitorResponse.id, bucketCustomAlertsIndex, 2, workflowId) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings(bucketLevelMonitorResponse.id, bucketCustomFindingsIndex, 1, 4, listOf("1", "2", "3", "4")) + } else { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("1", "2", "3", "4") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = assertAlerts(docLevelMonitorResponse.id, docCustomAlertsIndex, 4, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings(docLevelMonitorResponse.id, docCustomFindingsIndex, 4, 4, listOf("1", "2", "3", "4")) + } + } + } + + fun `test execute workflow with custom alerts and finding index when doc level delegate is used in chained finding`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "2", fields = listOf()) + + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + )!! + + var docLevelMonitor1 = randomDocumentLevelMonitor( + // Match the documents with test_field_1: test_value_3 + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index_1", + findingsIndex = "custom_findings_index_1", + findingsIndexPattern = "custom_findings_index_1-1" + ) + ) + + val docLevelMonitorResponse1 = createMonitor(docLevelMonitor1)!! + + val queryMonitorInput = SearchInput( + indices = listOf(index), + query = SearchSourceBuilder().query( + QueryBuilders + .rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + ) + ) + val queryTriggerScript = """ + return ctx.results[0].hits.hits.size() > 0 + """.trimIndent() + + val queryLevelTrigger = randomQueryLevelTrigger(condition = Script(queryTriggerScript)) + val queryMonitorResponse = + createMonitor(randomQueryLevelMonitor(inputs = listOf(queryMonitorInput), triggers = listOf(queryLevelTrigger)))!! + + // 1. docMonitor (chainedFinding = null) 2. bucketMonitor (chainedFinding = docMonitor) 3. docMonitor (chainedFinding = bucketMonitor) 4. queryMonitor (chainedFinding = docMonitor 3) + var workflow = randomWorkflow( + monitorIds = listOf( + docLevelMonitorResponse.id, + bucketLevelMonitorResponse.id, + docLevelMonitorResponse1.id, + queryMonitorResponse.id + ), + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. Doc level monitor should reduce the doc findings to 4 (3 - test_value_2, 4 - test_value_2, 5 - test_value_3, 6 - test_value_3) + // 2. Bucket level monitor will match the fetch the docs from current findings execution, although it contains rules for matching documents which has test_value_2 and test value_3 + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + when (monitorRunResults.monitorName) { + // Verify first doc level monitor execution, alerts and findings + docLevelMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("3", "4", "5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts(docLevelMonitorResponse.id, docLevelMonitorResponse.monitor.dataSources.alertsIndex, 4, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings( + docLevelMonitorResponse.id, + docLevelMonitorResponse.monitor.dataSources.findingsIndex, + 4, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify second bucket level monitor execution, alerts and findings + bucketLevelMonitorResponse.monitor.name -> { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = + searchResult + .stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 2, buckets.size) + + val getAlertsResponse = + assertAlerts( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.alertsIndex, + 2, + workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.findingsIndex, + 1, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify third doc level monitor execution, alerts and findings + docLevelMonitorResponse1.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts(docLevelMonitorResponse1.id, docLevelMonitorResponse1.monitor.dataSources.alertsIndex, 2, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse1.id, 2) + assertFindings( + docLevelMonitorResponse1.id, + docLevelMonitorResponse1.monitor.dataSources.findingsIndex, + 2, + 2, + listOf("5", "6") + ) + } + // Verify fourth query level monitor execution + queryMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val totalHits = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] as kotlin.collections.Map + )["total"] as kotlin.collections.Map + )["value"] + assertEquals(2, totalHits) + @Suppress("UNCHECKED_CAST") + val docIds = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] as kotlin.collections.Map + )["hits"] as List> + ).map { it["_id"]!! } + assertEquals(listOf("5", "6"), docIds.sorted()) + } + } + } + } + + private fun assertAlerts( + monitorId: String, + customAlertsIndex: String, + alertSize: Int, + workflowId: String, + ): GetAlertsResponse { + val table = Table("asc", "id", null, alertSize, 0, "") + val getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest( + table, "ALL", "ALL", monitorId, customAlertsIndex, + workflowIds = listOf(workflowId) + ) + ) + .get() + assertTrue(getAlertsResponse != null) + assertTrue(getAlertsResponse.alerts.size == alertSize) + return getAlertsResponse + } + + fun `test execute workflow with custom alerts and finding index with doc level delegates`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex1 = "custom_alerts_index" + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex2 = "custom_alerts_index_2" + val customFindingsIndex2 = "custom_findings_index_2" + val customFindingsIndexPattern2 = "custom_findings_index-2" + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex2, + findingsIndex = customFindingsIndex2, + findingsIndexPattern = customFindingsIndexPattern2 + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Doesn't match + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + assertEquals(monitor1.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) + Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) + + val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex1, alertSize = 2, workflowId = workflowId) + assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + + val getAlertsResponse2 = assertAlerts(monitorResponse2.id, customAlertsIndex2, alertSize = 1, workflowId = workflowId) + assertAcknowledges(getAlertsResponse2.alerts, monitorResponse2.id, 1) + assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) + } + + fun `test execute workflow with multiple monitors in chained monitor findings of single monitor`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex1 = "custom_alerts_index" + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + enabled = false, + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + enabled = false, + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val docQuery3 = DocLevelQuery(query = "_id:*", name = "5", fields = listOf()) + val docLevelInput3 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery3)) + val trigger3 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + var monitor3 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput3), + triggers = listOf(trigger3), + enabled = false, + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + + val monitorResponse3 = createMonitor(monitor3)!! + val d1 = Delegate(1, monitorResponse.id) + val d2 = Delegate(2, monitorResponse2.id) + val d3 = Delegate( + 3, monitorResponse3.id, + ChainedMonitorFindings(null, listOf(monitorResponse.id, monitorResponse2.id)) + ) + var workflow = Workflow( + id = "", + name = "test", + enabled = false, + schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + lastUpdateTime = Instant.now(), + enabledTime = null, + workflowType = Workflow.WorkflowType.COMPOSITE, + user = randomUser(), + inputs = listOf(CompositeInput(org.opensearch.commons.alerting.model.Sequence(listOf(d1, d2, d3)))), + version = -1L, + schemaVersion = 0, + triggers = emptyList(), + auditDelegateMonitorAlerts = false + + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(3, monitorsRunResults.size) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + assertFindings(monitorResponse2.id, customFindingsIndex1, 2, 2, listOf("2", "3")) + assertFindings(monitorResponse3.id, customFindingsIndex1, 3, 3, listOf("1", "2", "3")) + } + + fun `test execute workflows with shared doc level monitor delegate`() { + val docQuery = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var workflow1 = randomWorkflow( + monitorIds = listOf(monitorResponse.id), + auditDelegateMonitorAlerts = false + ) + val workflowResponse1 = upsertWorkflow(workflow1)!! + val workflowById1 = searchWorkflow(workflowResponse1.id) + assertNotNull(workflowById1) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults.size) + + assertEquals(monitor.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + // Assert and not ack the alerts (in order to verify later on that all the alerts are generated) + assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId) + assertFindings(monitorResponse.id, customFindingsIndex, 2, 2, listOf("1", "2")) + // Verify workflow and monitor delegate metadata + val workflowMetadata = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse.workflowRunResult.executionId, + workflowMetadata!!.latestExecutionId + ) + val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata = searchMonitorMetadata(monitorMetadataId) + assertNotNull(monitorMetadata) + + // Execute second workflow + val workflowId1 = workflowResponse1.id + val executeWorkflowResponse1 = executeWorkflow(workflowById1, workflowId1, false)!! + val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults1.size) + + assertEquals(monitor.name, monitorsRunResults1[0].monitorName) + assertEquals(1, monitorsRunResults1[0].triggerResults.size) + + val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId1) + assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) + assertFindings(monitorResponse.id, customFindingsIndex, 4, 4, listOf("1", "2", "1", "2")) + // Verify workflow and monitor delegate metadata + val workflowMetadata1 = searchWorkflowMetadata(id = workflowId1) + assertNotNull("Workflow metadata not initialized", workflowMetadata1) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse1.workflowRunResult.executionId, + workflowMetadata1!!.latestExecutionId + ) + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + // Verify that for two workflows two different doc level monitor metadata has been created + assertTrue("Different monitor is used in workflows", monitorMetadata!!.monitorId == monitorMetadata1!!.monitorId) + assertTrue(monitorMetadata.id != monitorMetadata1.id) + } + + fun `test execute workflows with shared doc level monitor delegate updating delegate datasource`() { + val docQuery = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + val workflow1 = randomWorkflow( + monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse1 = upsertWorkflow(workflow1)!! + val workflowById1 = searchWorkflow(workflowResponse1.id) + assertNotNull(workflowById1) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults.size) + + assertEquals(monitor.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + assertAlerts(monitorResponse.id, AlertIndices.ALERT_INDEX, alertSize = 2, workflowId) + assertFindings(monitorResponse.id, AlertIndices.FINDING_HISTORY_WRITE_INDEX, 2, 2, listOf("1", "2")) + // Verify workflow and monitor delegate metadata + val workflowMetadata = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse.workflowRunResult.executionId, + workflowMetadata!!.latestExecutionId + ) + val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata = searchMonitorMetadata(monitorMetadataId) + assertNotNull(monitorMetadata) + + val customAlertsIndex = "custom_alerts_index" + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val monitorId = monitorResponse.id + updateMonitor( + monitor = monitor.copy( + dataSources = DataSources( + alertsIndex = customAlertsIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ), + monitorId + ) + + // Execute second workflow + val workflowId1 = workflowResponse1.id + val executeWorkflowResponse1 = executeWorkflow(workflowById1, workflowId1, false)!! + val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults1.size) + + assertEquals(monitor.name, monitorsRunResults1[0].monitorName) + assertEquals(1, monitorsRunResults1[0].triggerResults.size) + + // Verify alerts for the custom index + val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId1) + assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) + assertFindings(monitorResponse.id, customFindingsIndex, 2, 2, listOf("1", "2")) + + // Verify workflow and monitor delegate metadata + val workflowMetadata1 = searchWorkflowMetadata(id = workflowId1) + assertNotNull("Workflow metadata not initialized", workflowMetadata1) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse1.workflowRunResult.executionId, + workflowMetadata1!!.latestExecutionId + ) + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + // Verify that for two workflows two different doc level monitor metadata has been created + assertTrue("Different monitor is used in workflows", monitorMetadata!!.monitorId == monitorMetadata1!!.monitorId) + assertTrue(monitorMetadata.id != monitorMetadata1.id) + } + + fun `test execute workflow verify workflow metadata`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + // First execution + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + val workflowMetadata = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse.workflowRunResult.executionId, + workflowMetadata!!.latestExecutionId + ) + val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata = searchMonitorMetadata(monitorMetadataId) + assertNotNull(monitorMetadata) + + // Second execution + val executeWorkflowResponse1 = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults1.size) + + val workflowMetadata1 = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse1.workflowRunResult.executionId, + workflowMetadata1!!.latestExecutionId + ) + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) + assertTrue(monitorMetadataId == monitorMetadataId1) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + } + + fun `test execute workflow dryrun verify workflow metadata not created`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + // First execution + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, true) + + assertNotNull("Workflow run result is null", executeWorkflowResponse) + val monitorsRunResults = executeWorkflowResponse!!.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + var exception: java.lang.Exception? = null + try { + searchWorkflowMetadata(id = workflowId) + } catch (ex: java.lang.Exception) { + exception = ex + assertTrue(exception is java.util.NoSuchElementException) + } + } + + fun `test execute workflow with custom alerts and finding index with bucket and doc monitor bucket monitor used as chained finding`() { + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + val bucketCustomAlertsIndex = "custom_alerts_index" + val bucketCustomFindingsIndex = "custom_findings_index" + val bucketCustomFindingsIndexPattern = "custom_findings_index-1" + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = bucketCustomAlertsIndex, + findingsIndex = bucketCustomFindingsIndex, + findingsIndexPattern = bucketCustomFindingsIndexPattern + ) + ) + )!! + + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_1\"", name = "2", fields = listOf()) + val docQuery3 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2, docQuery3)) + val docTrigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val docCustomAlertsIndex = "custom_alerts_index" + val docCustomFindingsIndex = "custom_findings_index" + val docCustomFindingsIndexPattern = "custom_findings_index-1" + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(docTrigger), + dataSources = DataSources( + alertsIndex = docCustomAlertsIndex, + findingsIndex = docCustomFindingsIndex, + findingsIndexPattern = docCustomFindingsIndexPattern + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, docLevelMonitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + if (bucketLevelMonitorResponse.monitor.name == monitorRunResults.monitorName) { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 3, buckets.size) + + val getAlertsResponse = assertAlerts(bucketLevelMonitorResponse.id, bucketCustomAlertsIndex, alertSize = 2, workflowId) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings(bucketLevelMonitorResponse.id, bucketCustomFindingsIndex, 1, 4, listOf("1", "2", "3", "4")) + } else { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("1", "2", "3", "4") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = assertAlerts(docLevelMonitorResponse.id, docCustomAlertsIndex, alertSize = 4, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings(docLevelMonitorResponse.id, docCustomFindingsIndex, 4, 4, listOf("1", "2", "3", "4")) + } + } + } + + fun `test chained alerts for bucket level monitors generating audit alerts custom alerts index`() { + val customAlertIndex = "custom-alert-index" + val customAlertHistoryIndex = "custom-alert-history-index" + val customAlertHistoryIndexPattern = "" + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern, + alertsHistoryIndex = customAlertHistoryIndex + + ) + ) + )!! + + val bucketLevelMonitorResponse2 = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern, + alertsHistoryIndex = customAlertHistoryIndex + + ) + ) + )!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${bucketLevelMonitorResponse.id}] && monitor[id=${bucketLevelMonitorResponse2.id}]") + ) + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, bucketLevelMonitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.isNotEmpty()) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) + + val auditStateAlerts = getAuditStateAlerts( + alertsIndex = customAlertHistoryIndex, + monitorId = bucketLevelMonitorResponse.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts.size, 2) + + val auditStateAlerts2 = getAuditStateAlerts( + alertsIndex = customAlertHistoryIndex, + monitorId = bucketLevelMonitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts2.size, 2) + } + + fun `test chained alerts for bucket level monitors generating audit alerts`() { + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger) + ) + )!! + + val bucketLevelMonitorResponse2 = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger) + ) + )!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${bucketLevelMonitorResponse.id}] && monitor[id=${bucketLevelMonitorResponse2.id}]") + ) + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, bucketLevelMonitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.isNotEmpty()) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) + + val auditStateAlerts = getAuditStateAlerts( + alertsIndex = bucketLevelMonitorResponse.monitor.dataSources.alertsHistoryIndex, + monitorId = bucketLevelMonitorResponse.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts.size, 2) + + val auditStateAlerts2 = getAuditStateAlerts( + alertsIndex = bucketLevelMonitorResponse.monitor.dataSources.alertsHistoryIndex, + monitorId = bucketLevelMonitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts2.size, 2) + } + + fun `test execute with custom alerts and finding index with bucket and doc monitor when doc monitor is used in chained finding`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "2", fields = listOf()) + + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + )!! + + var docLevelMonitor1 = randomDocumentLevelMonitor( + // Match the documents with test_field_1: test_value_3 + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index_1", + findingsIndex = "custom_findings_index_1", + findingsIndexPattern = "custom_findings_index_1-1" + ) + ) + + val docLevelMonitorResponse1 = createMonitor(docLevelMonitor1)!! + + val queryMonitorInput = SearchInput( + indices = listOf(index), + query = SearchSourceBuilder().query( + QueryBuilders + .rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + ) + ) + val queryTriggerScript = """ + return ctx.results[0].hits.hits.size() > 0 + """.trimIndent() + + val queryLevelTrigger = randomQueryLevelTrigger(condition = Script(queryTriggerScript)) + val queryMonitorResponse = + createMonitor(randomQueryLevelMonitor(inputs = listOf(queryMonitorInput), triggers = listOf(queryLevelTrigger)))!! + + // 1. docMonitor (chainedFinding = null) 2. bucketMonitor (chainedFinding = docMonitor) 3. docMonitor (chainedFinding = bucketMonitor) 4. queryMonitor (chainedFinding = docMonitor 3) + var workflow = randomWorkflow( + monitorIds = listOf( + docLevelMonitorResponse.id, + bucketLevelMonitorResponse.id, + docLevelMonitorResponse1.id, + queryMonitorResponse.id + ), + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. Doc level monitor should reduce the doc findings to 4 (3 - test_value_2, 4 - test_value_2, 5 - test_value_3, 6 - test_value_3) + // 2. Bucket level monitor will match the fetch the docs from current findings execution, although it contains rules for matching documents which has test_value_2 and test value_3 + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + when (monitorRunResults.monitorName) { + // Verify first doc level monitor execution, alerts and findings + docLevelMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("3", "4", "5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts( + docLevelMonitorResponse.id, + docLevelMonitorResponse.monitor.dataSources.alertsIndex, + alertSize = 4, + workflowId = workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings( + docLevelMonitorResponse.id, + docLevelMonitorResponse.monitor.dataSources.findingsIndex, + 4, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify second bucket level monitor execution, alerts and findings + bucketLevelMonitorResponse.monitor.name -> { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = + searchResult + .stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 2, buckets.size) + + val getAlertsResponse = + assertAlerts( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.alertsIndex, + alertSize = 2, + workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.findingsIndex, + 1, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify third doc level monitor execution, alerts and findings + docLevelMonitorResponse1.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts( + docLevelMonitorResponse1.id, + docLevelMonitorResponse1.monitor.dataSources.alertsIndex, + alertSize = 2, + workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse1.id, 2) + assertFindings( + docLevelMonitorResponse1.id, + docLevelMonitorResponse1.monitor.dataSources.findingsIndex, + 2, + 2, + listOf("5", "6") + ) + } + // Verify fourth query level monitor execution + queryMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val totalHits = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] + as kotlin.collections.Map + )["total"] as kotlin.collections.Map + )["value"] + assertEquals(2, totalHits) + @Suppress("UNCHECKED_CAST") + val docIds = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] + as kotlin.collections.Map + )["hits"] as List> + ) + .map { it["_id"]!! } + assertEquals(listOf("5", "6"), docIds.sorted()) + } + } + } + } + + fun `test execute workflow input error`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + deleteIndex(index) + + val response = executeWorkflow(workflowById, workflowById!!.id, false)!! + val error = response.workflowRunResult.monitorRunResults[0].error + assertNotNull(error) + assertTrue(error is AlertingException) + assertEquals(RestStatus.INTERNAL_SERVER_ERROR, (error as AlertingException).status) + assertTrue(error.message!!.contains("no such index [$index]")) + } + + fun `test execute workflow wrong workflow id`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + var exception: java.lang.Exception? = null + val badWorkflowId = getWorkflowResponse.id + "bad" + try { + executeWorkflow(id = badWorkflowId) + } catch (ex: java.lang.Exception) { + exception = ex + } + assertTrue(exception is ExecutionException) + assertTrue(exception!!.cause is AlertingException) + assertEquals(RestStatus.NOT_FOUND, (exception.cause as AlertingException).status) + assertEquals("Can't find workflow with id: $badWorkflowId", exception.cause!!.message) + } + + private fun assertFindings( + monitorId: String, + customFindingsIndex: String, + findingSize: Int, + matchedQueryNumber: Int, + relatedDocIds: List, + ) { + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", findingSize, findings.size) + + val findingDocIds = findings.flatMap { it.relatedDocIds } + + assertEquals("Didn't match $matchedQueryNumber query", matchedQueryNumber, findingDocIds.size) + assertTrue("Findings saved for test monitor", relatedDocIds.containsAll(findingDocIds)) + } + + private fun getAuditStateAlerts( + alertsIndex: String? = AlertIndices.ALERT_INDEX, + monitorId: String, + executionId: String? = null, + ): List { + val searchRequest = SearchRequest(alertsIndex) + val boolQueryBuilder = QueryBuilders.boolQuery() + boolQueryBuilder.must(TermQueryBuilder("monitor_id", monitorId)) + if (executionId.isNullOrEmpty() == false) + boolQueryBuilder.must(TermQueryBuilder("execution_id", executionId)) + searchRequest.source().query(boolQueryBuilder) + val searchResponse = client().search(searchRequest).get() + return searchResponse.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry(), LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + } + + private fun assertAlerts( + monitorId: String, + alertsIndex: String? = AlertIndices.ALERT_INDEX, + executionId: String? = null, + alertSize: Int, + workflowId: String, + ): GetAlertsResponse { + val alerts = searchAlerts(monitorId, alertsIndex!!, executionId = executionId) + assertEquals("Alert saved for test monitor", alertSize, alerts.size) + val table = Table("asc", "id", null, alertSize, 0, "") + var getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, alertsIndex) + ) + .get() + assertTrue(getAlertsResponse != null) + assertTrue(getAlertsResponse.alerts.size == alertSize) + getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", monitorId, null, workflowIds = listOf(workflowId)) + ) + .get() + assertTrue(getAlertsResponse != null) + assertTrue(getAlertsResponse.alerts.size == alertSize) + + return getAlertsResponse + } + + private fun assertAcknowledges( + alerts: List, + monitorId: String, + alertSize: Int, + ) { + val alertIds = alerts.map { it.id } + val acknowledgeAlertResponse = client().execute( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, + AcknowledgeAlertRequest(monitorId, alertIds, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + + assertEquals(alertSize, acknowledgeAlertResponse.acknowledged.size) + } + + private fun verifyAcknowledgeChainedAlerts( + alerts: List, + workflowId: String, + alertSize: Int, + ) { + val alertIds = alerts.map { it.id }.toMutableList() + val acknowledgeAlertResponse = ackChainedAlerts(alertIds, workflowId) + assertTrue(acknowledgeAlertResponse.acknowledged.stream().map { it.id }.collect(Collectors.toList()).containsAll(alertIds)) + assertEquals(alertSize, acknowledgeAlertResponse.acknowledged.size) + alertIds.add("dummy") + val redundantAck = ackChainedAlerts(alertIds, workflowId) + Assert.assertTrue(redundantAck.acknowledged.isEmpty()) + Assert.assertTrue(redundantAck.missing.contains("dummy")) + alertIds.remove("dummy") + Assert.assertTrue(redundantAck.failed.map { it.id }.toList().containsAll(alertIds)) + } + + private fun ackChainedAlerts(alertIds: List, workflowId: String): AcknowledgeAlertResponse { + + return client().execute( + AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, + AcknowledgeChainedAlertRequest(workflowId, alertIds) + ).get() + } + + private fun assertAuditStateAlerts( + monitorId: String, + alerts: List, + ) { + alerts.forEach { Assert.assertEquals(it.state, Alert.State.AUDIT) } + val alertIds = alerts.stream().map { it.id }.collect(Collectors.toList()) + val ack = client().execute( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, + AcknowledgeAlertRequest(monitorId, alertIds, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + Assert.assertTrue(ack.acknowledged.isEmpty()) + Assert.assertTrue(ack.missing.containsAll(alertIds)) + Assert.assertTrue(ack.failed.isEmpty()) + } + + fun `test execute workflow with bucket-level and doc-level chained monitors`() { + createTestIndex(TEST_HR_INDEX) + + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) + ) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ), + actions = listOf() + ) + val bucketMonitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger) + ) + ) + assertNotNull("The bucket monitor was not created", bucketMonitor) + + val docQuery1 = DocLevelQuery(query = "test_field_1:\"a\"", name = "3", fields = listOf()) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), listOf(docQuery1))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) + ) + val docMonitor = createMonitor(monitor1)!! + assertNotNull("The doc level monitor was not created", docMonitor) + + val workflow = randomWorkflow(monitorIds = listOf(bucketMonitor!!.id, docMonitor.id)) + val workflowResponse = upsertWorkflow(workflow) + assertNotNull("The workflow was not created", workflowResponse) + + // Add a doc that is accessible to the user + indexDoc( + TEST_HR_INDEX, + "1", + """ + { + "test_field_1": "a", + "accessible": true + } + """.trimIndent() + ) + + // Add a second doc that is not accessible to the user + indexDoc( + TEST_HR_INDEX, + "2", + """ + { + "test_field_1": "b", + "accessible": false + } + """.trimIndent() + ) + + indexDoc( + TEST_HR_INDEX, + "3", + """ + { + "test_field_1": "c", + "accessible": true + } + """.trimIndent() + ) + + val executeResult = executeWorkflow(id = workflowResponse!!.id) + assertNotNull(executeResult) + assertEquals(2, executeResult!!.workflowRunResult.monitorRunResults.size) + } + + fun `test chained alerts for AND OR and NOT conditions with custom alerts indices`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + dataSources = DataSources( + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1, + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex2 = "custom_findings_index_2" + val customFindingsIndexPattern2 = "custom_findings_index-2" + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + dataSources = DataSources( + findingsIndex = customFindingsIndex2, + findingsIndexPattern = customFindingsIndexPattern2, + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger, notTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + val workflowId = workflowResponse.id + + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + var andTriggerResult = triggerResults[andTrigger.id] + var notTriggerResult = triggerResults[notTrigger.id] + Assert.assertTrue(notTriggerResult!!.triggered) + Assert.assertFalse(andTriggerResult!!.triggered) + var res = + getWorkflowAlerts(workflowId = workflowId, alertIndex = customAlertsIndex, associatedAlertsIndex = customAlertsHistoryIndex) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + Assert.assertTrue(res.associatedAlerts.isEmpty()) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == notTrigger.id) + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Doesn't match + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + andTriggerResult = triggerResults[andTrigger.id] + notTriggerResult = triggerResults[notTrigger.id] + Assert.assertFalse(notTriggerResult!!.triggered) + Assert.assertTrue(andTriggerResult!!.triggered) + res = getWorkflowAlerts(workflowId, alertIndex = customAlertsIndex, associatedAlertsIndex = customAlertsHistoryIndex) + chainedAlerts = res.alerts + val numChainedAlerts = 1 + Assert.assertTrue(res.associatedAlerts.isNotEmpty()) + Assert.assertTrue(chainedAlerts.size == numChainedAlerts) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == andTrigger.id) + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + assertEquals(monitor1.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) + Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) + + Assert.assertEquals( + monitor1.dataSources.alertsHistoryIndex, + CompositeWorkflowRunner.getDelegateMonitorAlertIndex(dataSources = monitor1.dataSources, workflow, true) + ) + val alerts = getAuditStateAlerts( + monitorId = monitorResponse.id, executionId = executeWorkflowResponse.workflowRunResult.executionId, + alertsIndex = monitor1.dataSources.alertsHistoryIndex, + ) + assertAuditStateAlerts(monitorResponse.id, alerts) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + val associatedAlertIds = res.associatedAlerts.map { it.id }.toList() + associatedAlertIds.containsAll(alerts.map { it.id }.toList()) + val alerts1 = getAuditStateAlerts( + alertsIndex = monitor2.dataSources.alertsHistoryIndex, monitorId = monitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId, + ) + assertAuditStateAlerts(monitorResponse2.id, alerts1) + assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) + associatedAlertIds.containsAll(alerts1.map { it.id }.toList()) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, numChainedAlerts) + } + + fun `test chained alerts for AND OR and NOT conditions`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + dataSources = DataSources( + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex2 = "custom_findings_index_2" + val customFindingsIndexPattern2 = "custom_findings_index-2" + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + dataSources = DataSources( + findingsIndex = customFindingsIndex2, + findingsIndexPattern = customFindingsIndexPattern2 + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger, notTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + val workflowId = workflowResponse.id + + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + var andTriggerResult = triggerResults[andTrigger.id] + var notTriggerResult = triggerResults[notTrigger.id] + Assert.assertTrue(notTriggerResult!!.triggered) + Assert.assertFalse(andTriggerResult!!.triggered) + var res = getWorkflowAlerts( + workflowId, + ) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + + // verify get alerts api with defaults set in query params returns only chained alerts and not audit alerts + val table = Table("asc", "id", null, 1, 0, "") + val getAlertsDefaultParamsResponse = client().execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest( + table = table, + severityLevel = "ALL", + alertState = "ALL", + monitorId = null, + alertIndex = null, + monitorIds = null, + workflowIds = null, + alertIds = null + ) + ).get() + Assert.assertEquals(getAlertsDefaultParamsResponse.alerts.size, 1) + Assert.assertTrue(res.associatedAlerts.isEmpty()) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == notTrigger.id) + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Doesn't match + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + andTriggerResult = triggerResults[andTrigger.id] + notTriggerResult = triggerResults[notTrigger.id] + Assert.assertFalse(notTriggerResult!!.triggered) + Assert.assertTrue(andTriggerResult!!.triggered) + val getAuditAlertsForMonitor1 = client().execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest( + table = table, + severityLevel = "ALL", + alertState = "AUDIT", + monitorId = monitorResponse.id, + alertIndex = null, + monitorIds = null, + workflowIds = listOf(workflowId), + alertIds = null + ) + ).get() + Assert.assertEquals(getAuditAlertsForMonitor1.alerts.size, 1) + res = getWorkflowAlerts(workflowId) + chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + Assert.assertTrue(res.associatedAlerts.isNotEmpty()) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == andTrigger.id) + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + assertEquals(monitor1.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) + Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) + + Assert.assertEquals( + monitor1.dataSources.alertsHistoryIndex, + CompositeWorkflowRunner.getDelegateMonitorAlertIndex(dataSources = monitor1.dataSources, workflow, true) + ) + val alerts = getAuditStateAlerts( + alertsIndex = monitor1.dataSources.alertsHistoryIndex, monitorId = monitorResponse.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + val associatedAlertIds = res.associatedAlerts.map { it.id }.toList() + associatedAlertIds.containsAll(alerts.map { it.id }.toList()) + assertAuditStateAlerts(monitorResponse.id, alerts) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + + val alerts1 = getAuditStateAlerts( + alertsIndex = monitor2.dataSources.alertsHistoryIndex, monitorId = monitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + associatedAlertIds.containsAll(alerts1.map { it.id }.toList()) + assertAuditStateAlerts(monitorResponse2.id, alerts1) + assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) + // test redundant executions of workflow dont query old data again to verify metadata updation works fine + val redundantExec = executeWorkflow(workflow) + Assert.assertFalse(redundantExec?.workflowRunResult!!.triggerResults[andTrigger.id]!!.triggered) + Assert.assertTrue(redundantExec.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + } + + private fun getDelegateMonitorMetadataId( + workflowMetadata: WorkflowMetadata?, + monitorResponse: IndexMonitorResponse, + ) = "${workflowMetadata!!.id}-${monitorResponse.id}-metadata" + + fun `test create workflow success`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + val workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse1.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow add monitor success`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + var workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + val monitor3 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse3 = createMonitor(monitor3)!! + + val updatedWorkflowResponse = upsertWorkflow( + randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id, monitorResponse3.id) + ), + workflowResponse.id, + RestRequest.Method.PUT + )!! + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse.workflow) + assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = searchWorkflow(updatedWorkflowResponse.id)!! + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 3, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse1.id, delegate2.chainedMonitorFindings!!.monitorId + ) + + val delegate3 = delegates[2] + assertNotNull(delegate3) + assertEquals("Delegate3 order not correct", 3, delegate3.order) + assertEquals("Delegate3 id not correct", monitorResponse3.id, delegate3.monitorId) + assertEquals( + "Delegate3 Chained finding not correct", monitorResponse2.id, delegate3.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow change order of delegate monitors`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + var workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + val updatedWorkflowResponse = upsertWorkflow( + randomWorkflow( + monitorIds = listOf(monitorResponse2.id, monitorResponse1.id) + ), + workflowResponse.id, + RestRequest.Method.PUT + )!! + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse.workflow) + assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = searchWorkflow(updatedWorkflowResponse.id)!! + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse2.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse1.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse2.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow remove monitor success`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + var workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + val updatedWorkflowResponse = upsertWorkflow( + randomWorkflow( + monitorIds = listOf(monitorResponse1.id) + ), + workflowResponse.id, + RestRequest.Method.PUT + )!! + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse.workflow) + assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = searchWorkflow(updatedWorkflowResponse.id)!! + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 1, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) + } + + fun `test update workflow doesn't exist failure`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + try { + upsertWorkflow(workflow, "testId", RestRequest.Method.PUT) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow with testId is not found") + ) + } + } + } + + fun `test get workflow`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = upsertWorkflow(workflowRequest)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + assertNotNull(getWorkflowResponse) + + val workflowById = getWorkflowResponse.workflow!! + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, getWorkflowResponse.id) + assertTrue("incorrect version", getWorkflowResponse.version > 0) + assertEquals("Workflow name not correct", workflowRequest.name, workflowById.name) + assertEquals("Workflow owner not correct", workflowRequest.owner, workflowById.owner) + assertEquals("Workflow input not correct", workflowRequest.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 1, delegates.size) + + val delegate = delegates[0] + assertNotNull(delegate) + assertEquals("Delegate order not correct", 1, delegate.order) + assertEquals("Delegate id not correct", monitorResponse.id, delegate.monitorId) + } + + fun `test get workflow for invalid id monitor index doesn't exist`() { + // Get workflow for non existing workflow id + try { + getWorkflowById(id = "-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found") + ) + } + } + } + + fun `test get workflow for invalid id monitor index exists`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + createMonitor(monitor) + // Get workflow for non existing workflow id + try { + getWorkflowById(id = "-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found") + ) + } + } + } + + fun `test delete workflow keeping delegate monitor`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + deleteWorkflow(workflowId, false) + // Verify that the workflow is deleted + try { + getWorkflowById(workflowId) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + // Verify that the monitor is not deleted + val existingDelegate = getMonitorResponse(monitorResponse.id) + assertNotNull(existingDelegate) + } + + fun `test delete workflow delegate monitor deleted`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + deleteWorkflow(workflowId, true) + // Verify that the workflow is deleted + try { + getWorkflowById(workflowId) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + // Verify that the monitor is deleted + try { + getMonitorResponse(monitorResponse.id) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("Monitor not found") + ) + } + } + } + + fun `test delete executed workflow with metadata deleted`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + val workflowMetadata = searchWorkflowMetadata(workflowId) + assertNotNull(workflowMetadata) + + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + + val monitorMetadataId2 = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse2) + val monitorMetadata2 = searchMonitorMetadata(monitorMetadataId2) + assertNotNull(monitorMetadata2) + + assertFalse(monitorMetadata1!!.id == monitorMetadata2!!.id) + + deleteWorkflow(workflowId, true) + // Verify that the workflow is deleted + try { + getWorkflowById(workflowId) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + // Verify that the workflow metadata is deleted + try { + searchWorkflowMetadata(workflowId) + fail("expected searchWorkflowMetadata method to throw exception") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("List is empty") + ) + } + } + // Verify that the monitors metadata are deleted + try { + searchMonitorMetadata(monitorMetadataId1) + fail("expected searchMonitorMetadata method to throw exception") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("List is empty") + ) + } + } + + try { + searchMonitorMetadata(monitorMetadataId2) + fail("expected searchMonitorMetadata method to throw exception") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("List is empty") + ) + } + } + } + + fun `test delete workflow delegate monitor part of another workflow not deleted`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + val workflowRequest2 = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse2 = upsertWorkflow(workflowRequest2)!! + val workflowId2 = workflowResponse2.id + val getWorkflowResponse2 = getWorkflowById(id = workflowResponse2.id) + + assertNotNull(getWorkflowResponse2) + assertEquals(workflowId2, getWorkflowResponse2.id) + + try { + deleteWorkflow(workflowId, true) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("[Not allowed to delete ${monitorResponse.id} monitors") + ) + } + } + val existingMonitor = getMonitorResponse(monitorResponse.id) + assertNotNull(existingMonitor) + } + + fun `test trying to delete monitor that is part of workflow sequence`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + // Verify that the monitor can't be deleted because it's included in the workflow + try { + deleteMonitor(monitorResponse.id) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning DeleteMonitor Action error ", + it.contains("Monitor can't be deleted because it is a part of workflow(s)") + ) + } + } + } + + fun `test delete workflow for invalid id monitor index doesn't exists`() { + // Try deleting non-existing workflow + try { + deleteWorkflow("-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning DeleteWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + } + + fun `test delete workflow for invalid id monitor index exists`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + createMonitor(monitor) + // Try deleting non-existing workflow + try { + deleteWorkflow("-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning DeleteWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + } + + fun `test create workflow without delegate failure`() { + val workflow = randomWorkflow( + monitorIds = Collections.emptyList() + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be empty.") + ) + } + } + } + + fun `test create workflow with 26 delegates failure`() { + val monitorsIds = mutableListOf() + for (i in 0..25) { + monitorsIds.add(UUID.randomUUID().toString()) + } + val workflow = randomWorkflow( + monitorIds = monitorsIds + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be larger then 25.") + ) + } + } + } + + fun `test update workflow without delegate failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = Collections.emptyList() + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be empty.") + ) + } + } + } + + fun `test create workflow duplicate delegate failure`() { + val workflow = randomWorkflow( + monitorIds = listOf("1", "1", "2") + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test update workflow duplicate delegate failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("1", "1", "2") + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test create workflow delegate monitor doesn't exist failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + val workflow = randomWorkflow( + monitorIds = listOf("-1", monitorResponse.id) + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test update workflow delegate monitor doesn't exist failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("-1", monitorResponse.id) + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test create workflow sequence order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test update workflow sequence order not correct failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test create workflow chained findings monitor not in sequence failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test create workflow query monitor chained findings monitor failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor)!! + + val queryMonitor = randomQueryLevelMonitor() + val queryMonitorResponse = createMonitor(queryMonitor)!! + + val workflow = randomWorkflow( + monitorIds = listOf(queryMonitorResponse.id, docMonitorResponse.id) + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Query level monitor can't be part of chained findings") + ) + } + } + } + + fun `test create workflow delegate and chained finding monitor different indices failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor)!! + + val index1 = "$index-1" + createTestIndex(index1) + + val docLevelInput1 = DocLevelMonitorInput( + "description", listOf(index1), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + + val docMonitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger) + ) + val docMonitorResponse1 = createMonitor(docMonitor1)!! + + val workflow = randomWorkflow( + monitorIds = listOf(docMonitorResponse1.id, docMonitorResponse.id) + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("doesn't query all of chained findings monitor's indices") + ) + } + } + } + + fun `test create workflow when monitor index not initialized failure`() { + val delegates = listOf( + Delegate(1, "monitor-1") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Monitors not found") + ) + } + } + } + + fun `test update workflow chained findings monitor not in sequence failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test create workflow chained findings order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + fun `test update workflow chained findings order not correct failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + fun `test create workflow with chained alert triggers`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val chainedAlertTrigger1 = randomChainedAlertTrigger( + condition = Script("monitor[id=${monitorResponse1.id}] && monitor[id=${monitorResponse2.id}") + ) + val chainedAlertTrigger2 = randomChainedAlertTrigger( + condition = Script("monitor[id=${monitorResponse1.id}] || monitor[id=${monitorResponse2.id}]") + ) + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id), + triggers = listOf( + chainedAlertTrigger1, + chainedAlertTrigger2 + ) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id)!! + + assertEquals("Workflow input not correct", workflowById.triggers.size, 2) + assertEquals("Workflow input not correct", workflowById.triggers.get(0).name, chainedAlertTrigger1.name) + assertEquals("Workflow input not correct", workflowById.triggers.get(1).name, chainedAlertTrigger2.name) + assertEquals("Workflow input not correct", workflowById.triggers.get(0).id, chainedAlertTrigger1.id) + assertEquals("Workflow input not correct", workflowById.triggers.get(1).id, chainedAlertTrigger2.id) + assertEquals( + "Workflow input not correct", + (workflowById.triggers.get(0) as ChainedAlertTrigger).condition.idOrCode, + chainedAlertTrigger1.condition.idOrCode + ) + assertEquals( + "Workflow input not correct", + (workflowById.triggers.get(1) as ChainedAlertTrigger).condition.idOrCode, + chainedAlertTrigger2.condition.idOrCode + ) + } + + fun `test postIndex on workflow update with trigger deletion`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + val workflowId = workflowById!!.id + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var res = getWorkflowAlerts( + workflowId, + ) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + val updatedWorkflowResponse = upsertWorkflow( + workflowById.copy(triggers = listOf(notTrigger)), + workflowResponse.id, + RestRequest.Method.PUT + )!! + val updatedWorkflow = searchWorkflow(workflowResponse.id) + Assert.assertTrue(updatedWorkflow!!.triggers.size == 1) + Assert.assertTrue(updatedWorkflow.triggers[0].id == notTrigger.id) + OpenSearchTestCase.waitUntil({ + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + sr.hits.hits.size == 3 + }, 5, TimeUnit.MINUTES) + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + Assert.assertTrue(sr.hits.hits.size == 3) + val alerts = sr.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + Assert.assertTrue(alerts.stream().anyMatch { it.state == Alert.State.DELETED && chainedAlerts[0].id == it.id }) + } + + fun `test postDelete on workflow deletion`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + val workflowId = workflowById!!.id + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var res = getWorkflowAlerts( + workflowId, + ) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + val deleteRes = deleteWorkflow(workflowId, false) + logger.info(deleteRes) + OpenSearchTestCase.waitUntil({ + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + sr.hits.hits.size == 3 + }, 5, TimeUnit.MINUTES) + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + Assert.assertTrue(sr.hits.hits.size == 3) + val alerts = sr.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + Assert.assertTrue(alerts.stream().anyMatch { it.state == Alert.State.DELETED && chainedAlerts[0].id == it.id }) + } + + fun `test get chained alerts with alertId paginating for associated alerts`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val workflowId = workflowById!!.id + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + var i = 1 + val indexRequests = mutableListOf() + while (i++ < 300) { + indexRequests += IndexRequest(index).source(testDoc1, XContentType.JSON).id("$i").opType(DocWriteRequest.OpType.INDEX) + } + val bulkResponse: BulkResponse = + client().bulk(BulkRequest().add(indexRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).get() + if (bulkResponse.hasFailures()) { + fail("Bulk request to index to test index has failed") + } + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var res = getWorkflowAlerts( + workflowId = workflowId + ) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + Assert.assertEquals(res.associatedAlerts.size, 10) + var res100to200 = getWorkflowAlerts( + workflowId = workflowId, + alertIds = listOf(res.alerts[0].id), + table = Table("asc", "monitor_id", null, 100, 100, null) + ) + Assert.assertEquals(res100to200.associatedAlerts.size, 100) + var res200to300 = getWorkflowAlerts( + workflowId = workflowId, + alertIds = listOf(res.alerts[0].id), + table = Table("asc", "monitor_id", null, 100, 201, null) + ) + Assert.assertEquals(res200to300.associatedAlerts.size, 100) + var res0to99 = getWorkflowAlerts( + workflowId = workflowId, + alertIds = listOf(res.alerts[0].id), + table = Table("asc", "monitor_id", null, 100, 0, null) + ) + Assert.assertEquals(res0to99.associatedAlerts.size, 100) + + val ids100to200 = res100to200.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) + val idsSet0to99 = res0to99.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) + val idsSet200to300 = res200to300.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) + + Assert.assertTrue(idsSet0to99.all { it !in ids100to200 }) + Assert.assertTrue(idsSet0to99.all { it !in idsSet200to300 }) + Assert.assertTrue(ids100to200.all { it !in idsSet200to300 }) + } + + fun `test existing chained alert active alert is updated on consequtive trigger condition match`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(notTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val workflowId = workflowById!!.id + + /** no ACTIVE alert exists and chained alert trigger matches. Expect: new ACTIVE alert created**/ + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val workflowAlerts = getWorkflowAlerts(workflowId) + Assert.assertTrue(workflowAlerts.alerts.size == 1) + Assert.assertEquals(workflowAlerts.alerts[0].state, Alert.State.ACTIVE) + /** ACTIVE alert exists and chained alert trigger matched again. Expect: existing alert updated and remains in ACTIVE*/ + var executeWorkflowResponse1 = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse1.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val udpdatedActiveAlerts = getWorkflowAlerts(workflowId) + Assert.assertTrue(udpdatedActiveAlerts.alerts.size == 1) + Assert.assertEquals(udpdatedActiveAlerts.alerts[0].state, Alert.State.ACTIVE) + Assert.assertTrue(udpdatedActiveAlerts.alerts[0].lastNotificationTime!! > workflowAlerts.alerts[0].lastNotificationTime!!) + + /** Acknowledge ACTIVE alert*/ + val ackChainedAlerts = ackChainedAlerts(udpdatedActiveAlerts.alerts.stream().map { it.id }.collect(Collectors.toList()), workflowId) + Assert.assertTrue(ackChainedAlerts.acknowledged.size == 1) + Assert.assertTrue(ackChainedAlerts.missing.size == 0) + Assert.assertTrue(ackChainedAlerts.failed.size == 0) + + /** ACKNOWLEDGED alert exists and chained alert trigger matched again. Expect: existing alert updated and remains ACKNOWLEDGED*/ + var executeWorkflowResponse2 = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse2.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val acknowledgedAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.ACKNOWLEDGED) + Assert.assertTrue(acknowledgedAlert.alerts.size == 1) + Assert.assertEquals(acknowledgedAlert.alerts[0].state, Alert.State.ACKNOWLEDGED) + Assert.assertTrue(acknowledgedAlert.alerts[0].lastNotificationTime!! == udpdatedActiveAlerts.alerts[0].lastNotificationTime!!) + + /** ACKNOWLEDGED alert exists and chained alert trigger NOT matched. Expect: ACKNOWLEDGD alert marked as COMPLETED**/ + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + var executeWorkflowResponse3 = executeWorkflow(workflowById, workflowId, false)!! + assertFalse(executeWorkflowResponse3.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val completedAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.COMPLETED) + Assert.assertTrue(completedAlert.alerts.size == 1) + Assert.assertEquals(completedAlert.alerts[0].state, Alert.State.COMPLETED) + Assert.assertTrue(completedAlert.alerts[0].endTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) + + /** COMPLETED state alert exists and trigger matches. Expect: new ACTIVE state chaiend alert created*/ + var executeWorkflowResponse4 = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse4.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val newActiveAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.ACTIVE) + Assert.assertTrue(newActiveAlert.alerts.size == 1) + Assert.assertEquals(newActiveAlert.alerts[0].state, Alert.State.ACTIVE) + Assert.assertTrue(newActiveAlert.alerts[0].lastNotificationTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) + val completedAlert1 = getWorkflowAlerts(workflowId, alertState = Alert.State.COMPLETED) + Assert.assertTrue(completedAlert1.alerts.size == 1) + Assert.assertEquals(completedAlert1.alerts[0].state, Alert.State.COMPLETED) + Assert.assertTrue(completedAlert1.alerts[0].endTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt index d56ca4d95..27a653d5f 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorRunnerServiceIT.kt @@ -6,23 +6,7 @@ package org.opensearch.alerting import org.junit.Assert -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.alerting.alerts.AlertError import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.core.model.IntervalSchedule -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.model.ActionExecutionResult -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.Alert.State.ACKNOWLEDGED -import org.opensearch.alerting.model.Alert.State.ACTIVE -import org.opensearch.alerting.model.Alert.State.COMPLETED -import org.opensearch.alerting.model.Alert.State.ERROR -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.action.ActionExecutionPolicy -import org.opensearch.alerting.model.action.AlertCategory -import org.opensearch.alerting.model.action.PerAlertActionScope -import org.opensearch.alerting.model.action.PerExecutionActionScope -import org.opensearch.alerting.model.action.Throttle import org.opensearch.alerting.model.destination.CustomWebhook import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.model.destination.email.Email @@ -32,13 +16,38 @@ import org.opensearch.alerting.util.getBucketKeysHash import org.opensearch.client.ResponseException import org.opensearch.client.WarningFailureException import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Alert.State +import org.opensearch.commons.alerting.model.Alert.State.ACKNOWLEDGED +import org.opensearch.commons.alerting.model.Alert.State.ACTIVE +import org.opensearch.commons.alerting.model.Alert.State.COMPLETED +import org.opensearch.commons.alerting.model.Alert.State.ERROR +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.commons.alerting.model.action.Throttle import org.opensearch.commons.authuser.User +import org.opensearch.core.rest.RestStatus import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.script.Script import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase import java.net.URLEncoder import java.time.Instant import java.time.ZonedDateTime @@ -47,7 +56,7 @@ import java.time.temporal.ChronoUnit import java.time.temporal.ChronoUnit.DAYS import java.time.temporal.ChronoUnit.MILLIS import java.time.temporal.ChronoUnit.MINUTES -import kotlin.collections.HashMap +import java.util.concurrent.TimeUnit class MonitorRunnerServiceIT : AlertingRestTestCase() { @@ -131,7 +140,9 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { verifyAlert(firstRunAlert, monitor) // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to // see lastNotificationTime change. - Thread.sleep(200) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 200, TimeUnit.MILLISECONDS) executeMonitor(monitor.id) val secondRunAlert = searchAlerts(monitor).single() verifyAlert(secondRunAlert, monitor) @@ -189,6 +200,31 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { Assert.assertEquals(404, exception?.response?.statusLine?.statusCode) } + fun `test execute doclevel monitor without triggers success`() { + // use a non-existent monitoid to trigger a 404. + val index = "foo" + createIndex(index, Settings.EMPTY) + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf() + ) + ) + val doc = """ + { "test_field": "us-west-2" } + """.trimIndent() + indexDoc(index, "1", doc) + + val response = executeMonitor(monitor.id) + var output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + assertTrue("Unexpected monitor error message", (output["error"] as String?).isNullOrEmpty()) + assertTrue(searchFindings(monitor).size == 1) + assertTrue(searchAlerts(monitor).isEmpty()) + } + fun `test acknowledged alert does not suppress subsequent errors`() { val destinationId = createDestination().id @@ -233,7 +269,9 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to // let lastNotificationTime change. W/o this sleep the test can result in a false negative. - Thread.sleep(200) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 200, TimeUnit.MILLISECONDS) val response = executeMonitor(monitor.id) val output = entityAsMap(response) @@ -665,7 +703,7 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { val monitor = createMonitor( randomQueryLevelMonitor( triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = actions)), - schedule = IntervalSchedule(interval = 1, unit = ChronoUnit.MINUTES) + schedule = IntervalSchedule(interval = 1, unit = MINUTES) ) ) val monitorRunResultNotThrottled = entityAsMap(executeMonitor(monitor.id)) @@ -733,7 +771,9 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { verifyAlert(activeAlert1.single(), monitor, ACTIVE) val actionResults1 = verifyActionExecutionResultInAlert(activeAlert1[0], mutableMapOf(Pair(actionThrottleEnabled.id, 0))) - Thread.sleep(200) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 200, TimeUnit.MILLISECONDS) updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = NEVER_RUN)), id = monitor.id)) executeMonitor(monitor.id) val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() @@ -936,7 +976,7 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { // GIVEN val indices = (1..5).map { createTestIndex() }.toTypedArray() val pathParams = indices.joinToString(",") - val path = "/_cluster/health/" + val path = "/_cluster/health" val input = randomClusterMetricsInput( path = path, pathParams = pathParams @@ -995,7 +1035,7 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { } } - fun `test execute AD monitor doesn't return search result without user`() { + fun `test execute AD monitor returns search result without user`() { // TODO: change to REST API call to test security enabled case if (!securityEnabled()) { val user = randomUser() @@ -1015,14 +1055,14 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { val searchResult = (output.objectMap("input_results")["results"] as List>).first() @Suppress("UNCHECKED_CAST") val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 1, total["value"]) + assertEquals("Incorrect search result", 5, total["value"]) @Suppress("UNCHECKED_CAST") val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map - assertEquals("Incorrect search result", 0.75, maxAnomalyGrade["value"]) + assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) } } - fun `test execute AD monitor doesn't return search result with empty backend role`() { + fun `test execute AD monitor returns search result with empty backend role`() { // TODO: change to REST API call to test security enabled case if (!securityEnabled()) { val user = randomUser() @@ -1045,7 +1085,7 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { val searchResult = (output.objectMap("input_results")["results"] as List>).first() @Suppress("UNCHECKED_CAST") val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 1, total["value"]) + assertEquals("Incorrect search result", 5, total["value"]) @Suppress("UNCHECKED_CAST") val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) @@ -1071,10 +1111,10 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { val searchResult = (output.objectMap("input_results")["results"] as List>).first() @Suppress("UNCHECKED_CAST") val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 3, total["value"]) + assertEquals("Incorrect search result", 5, total["value"]) @Suppress("UNCHECKED_CAST") val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map - assertEquals("Incorrect search result", 0.8, maxAnomalyGrade["value"]) + assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) } } @@ -1094,13 +1134,13 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { @Suppress("UNCHECKED_CAST") (output["trigger_results"] as HashMap).forEach { _, v -> - assertFalse((v as HashMap)["triggered"] as Boolean) + assertTrue((v as HashMap)["triggered"] as Boolean) } @Suppress("UNCHECKED_CAST") val searchResult = (output.objectMap("input_results")["results"] as List>).first() @Suppress("UNCHECKED_CAST") val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 0, total["value"]) + assertEquals("Incorrect search result", 5, total["value"]) } } @@ -1150,6 +1190,89 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { assertEquals("Incorrect search result", 2, buckets.size) } + fun `test execute bucket-level monitor returns search result with multi term agg`() { + val index = "test_index_1234" + indexDoc( + index, + "1", + """{"user_id": "1", + "ip_addr": "12345678", + "user_agent": "chrome" + } + """.trimIndent() + ) + indexDoc( + index, + "2", + """{"user_id": "2", + "ip_addr": "12345678", + "user_agent": "chrome" + } + """.trimIndent() + ) + indexDoc( + index, + "3", + """{"user_id": "2", + "ip_addr": "3443534", + "user_agent": "chrome" + } + """.trimIndent() + ) + + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("_value" to "distinct_user_count", "docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "hot", + filter = null + ) + ) + + val m = randomBucketLevelMonitor( + triggers = listOf(trigger), + inputs = listOf( + SearchInput( + listOf(index), + SearchSourceBuilder().aggregation( + MultiTermsAggregationBuilder("hot") + .terms( + listOf( + MultiTermsValuesSourceConfig.Builder().setFieldName("ip_addr.keyword").build(), + MultiTermsValuesSourceConfig.Builder().setFieldName("user_agent.keyword").build() + ) + ) + .subAggregation(CardinalityAggregationBuilder("distinct_user_count").field("user_id.keyword")) + ) + ) + ) + ) + val monitor = createMonitor(m) + val response = executeMonitor(monitor.id, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val buckets = searchResult.stringMap("aggregations")?.stringMap("hot")?.get("buckets") as List> + assertEquals("Incorrect search result", 2, buckets.size) + val distinctUserCountAgg1 = buckets.find { + it.get("key_as_string") == "12345678|chrome" + }!!.get("distinct_user_count") as Map + assertEquals(2, distinctUserCountAgg1.get("value")) + val distinctUserCountAgg2 = buckets.find { + it.get("key_as_string") == "3443534|chrome" + }!!.get("distinct_user_count") as Map + assertEquals(1, distinctUserCountAgg2.get("value")) + } + fun `test bucket-level monitor alert creation and completion`() { val testIndex = createTestIndex() insertSampleTimeSerializedData( @@ -1283,7 +1406,9 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to // let lastNotificationTime change. W/o this sleep the test can result in a false negative. - Thread.sleep(200) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 200, TimeUnit.MILLISECONDS) executeMonitor(monitor.id) // Check that the lastNotification time of the acknowledged Alert wasn't updated and the active Alert's was @@ -1303,7 +1428,9 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { ) // Execute Monitor and check that both Alerts were updated - Thread.sleep(200) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 200, TimeUnit.MILLISECONDS) executeMonitor(monitor.id) currentAlerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) val completedAlerts = currentAlerts.filter { it.state == COMPLETED } @@ -1320,6 +1447,209 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { ) } + fun `test bucket-level monitor with findings enabled on term agg`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val termAgg = TermsAggregationBuilder("test_field").field("test_field") + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(termAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "test_field", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + } + + fun `test bucket-level monitor with findings enabled on composite agg`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + } + + fun `test bucket-level monitor with findings enabled for multiple group by fields`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field"), + TermsValuesSourceBuilder("number").field("number") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 0) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 0, findings.size) + } + @Suppress("UNCHECKED_CAST") fun `test bucket-level monitor with one good action and one bad action`() { val testIndex = createTestIndex() @@ -1622,7 +1952,9 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to // let Action executionTime change. W/o this sleep the test can result in a false negative. - Thread.sleep(200) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 200, TimeUnit.MILLISECONDS) val monitorRunResultThrottled = entityAsMap(executeMonitor(monitor.id)) verifyActionThrottleResultsForBucketLevelMonitor( monitorRunResult = monitorRunResultThrottled, @@ -1738,7 +2070,7 @@ class MonitorRunnerServiceIT : AlertingRestTestCase() { private fun verifyAlert( alert: Alert, monitor: Monitor, - expectedState: Alert.State = ACTIVE, + expectedState: State = ACTIVE, expectNotification: Boolean = true ) { assertNotNull(alert.id) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorTests.kt index 02631eac9..f6ed78541 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/MonitorTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/MonitorTests.kt @@ -5,7 +5,7 @@ package org.opensearch.alerting -import org.opensearch.alerting.model.Trigger +import org.opensearch.commons.alerting.model.Trigger import org.opensearch.test.OpenSearchTestCase import java.lang.IllegalArgumentException import java.time.Instant diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/ODFERestTestCase.kt b/alerting/src/test/kotlin/org/opensearch/alerting/ODFERestTestCase.kt index d81ac9602..74df1e644 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/ODFERestTestCase.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/ODFERestTestCase.kt @@ -5,7 +5,7 @@ package org.opensearch.alerting -import org.apache.http.HttpHost +import org.apache.hc.core5.http.HttpHost import org.junit.After import org.opensearch.client.Request import org.opensearch.client.RequestOptions @@ -13,15 +13,15 @@ import org.opensearch.client.RestClient import org.opensearch.client.WarningsHandler import org.opensearch.common.io.PathUtils import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.DeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_ENABLED import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_FILEPATH import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_KEYPASSWORD import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_PASSWORD import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_PEMCERT_FILEPATH import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.xcontent.DeprecationHandler +import org.opensearch.core.xcontent.MediaType +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.test.rest.OpenSearchRestTestCase import java.io.IOException @@ -81,7 +81,7 @@ abstract class ODFERestTestCase : OpenSearchRestTestCase() { val response = client().performRequest(Request("GET", "/_cat/indices?format=json&expand_wildcards=all")) - val xContentType = XContentType.fromMediaType(response.entity.contentType.value) + val xContentType = MediaType.fromMediaType(response.entity.contentType) xContentType.xContent().createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, response.entity.content @@ -126,13 +126,19 @@ abstract class ODFERestTestCase : OpenSearchRestTestCase() { // create adminDN (super-admin) client val uri = javaClass.classLoader.getResource("sample.pem").toURI() val configPath = PathUtils.get(uri).parent.toAbsolutePath() - SecureRestClientBuilder(settings, configPath).setSocketTimeout(60000).build() + SecureRestClientBuilder(settings, configPath) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() } false -> { // create client with passed user val userName = System.getProperty("user") val password = System.getProperty("password") - SecureRestClientBuilder(hosts, isHttps(), userName, password).setSocketTimeout(60000).build() + SecureRestClientBuilder(hosts, isHttps(), userName, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() } } } else { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt index 65606c225..143a77afd 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/TestHelpers.kt @@ -6,43 +6,17 @@ package org.opensearch.alerting import junit.framework.TestCase.assertNull -import org.apache.http.Header -import org.apache.http.HttpEntity -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter -import org.opensearch.alerting.core.model.ClusterMetricsInput -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.core.model.Input -import org.opensearch.alerting.core.model.IntervalSchedule -import org.opensearch.alerting.core.model.Schedule -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.model.ActionExecutionResult +import org.apache.hc.core5.http.Header +import org.apache.hc.core5.http.HttpEntity import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.AggregationResultBucket -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.BucketLevelTrigger import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.DocumentLevelTrigger import org.opensearch.alerting.model.DocumentLevelTriggerRunResult -import org.opensearch.alerting.model.Finding import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.Monitor import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.QueryLevelTriggerRunResult -import org.opensearch.alerting.model.Trigger -import org.opensearch.alerting.model.action.Action -import org.opensearch.alerting.model.action.ActionExecutionPolicy -import org.opensearch.alerting.model.action.ActionExecutionScope -import org.opensearch.alerting.model.action.AlertCategory -import org.opensearch.alerting.model.action.PerAlertActionScope -import org.opensearch.alerting.model.action.PerExecutionActionScope -import org.opensearch.alerting.model.action.Throttle import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.model.destination.email.EmailEntry import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.opensearchapi.string import org.opensearch.alerting.util.getBucketKeysHash import org.opensearch.client.Request import org.opensearch.client.RequestOptions @@ -50,16 +24,50 @@ import org.opensearch.client.Response import org.opensearch.client.RestClient import org.opensearch.client.WarningsHandler import org.opensearch.common.UUIDs -import org.opensearch.common.settings.SecureString import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentParser import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ChainedMonitorFindings +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Input +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.Schedule +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Sequence +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.Workflow.WorkflowType +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.ActionExecutionScope +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.commons.alerting.model.action.Throttle +import org.opensearch.commons.alerting.util.string import org.opensearch.commons.authuser.User +import org.opensearch.core.common.settings.SecureString +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser import org.opensearch.index.query.QueryBuilders import org.opensearch.script.Script import org.opensearch.script.ScriptType @@ -134,6 +142,32 @@ fun randomBucketLevelMonitor( ) } +fun randomBucketLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User = randomUser(), + inputs: List = listOf( + SearchInput( + emptyList(), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) + ) + ), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomBucketLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false, + dataSources: DataSources +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.BUCKET_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), + dataSources = dataSources + ) +} + fun randomClusterMetricsMonitor( name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), user: User = randomUser(), @@ -170,6 +204,91 @@ fun randomDocumentLevelMonitor( ) } +fun randomDocumentLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + inputs: List = listOf(DocLevelMonitorInput("description", listOf("index"), emptyList())), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false, + dataSources: DataSources, + owner: String? = null +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), dataSources = dataSources, owner = owner + ) +} + +fun randomWorkflow( + id: String = Workflow.NO_ID, + monitorIds: List, + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + triggers: List = emptyList(), + auditDelegateMonitorAlerts: Boolean? = true +): Workflow { + val delegates = mutableListOf() + if (!monitorIds.isNullOrEmpty()) { + delegates.add(Delegate(1, monitorIds[0])) + for (i in 1 until monitorIds.size) { + // Order of monitors in workflow will be the same like forwarded meaning that the first monitorId will be used as second monitor chained finding + delegates.add(Delegate(i + 1, monitorIds [i], ChainedMonitorFindings(monitorIds[i - 1]))) + } + } + + return Workflow( + id = id, + name = name, + enabled = enabled, + schedule = schedule, + lastUpdateTime = lastUpdateTime, + enabledTime = enabledTime, + workflowType = WorkflowType.COMPOSITE, + user = user, + inputs = listOf(CompositeInput(Sequence(delegates))), + version = -1L, + schemaVersion = 0, + triggers = triggers, + auditDelegateMonitorAlerts = auditDelegateMonitorAlerts + ) +} + +fun randomWorkflowWithDelegates( + id: String = Workflow.NO_ID, + delegates: List, + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + triggers: List = emptyList() +): Workflow { + return Workflow( + id = id, + name = name, + enabled = enabled, + schedule = schedule, + lastUpdateTime = lastUpdateTime, + enabledTime = enabledTime, + workflowType = WorkflowType.COMPOSITE, + user = user, + inputs = listOf(CompositeInput(Sequence(delegates))), + version = -1L, + schemaVersion = 0, + triggers = triggers + ) +} + fun randomQueryLevelTrigger( id: String = UUIDs.base64UUID(), name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), @@ -278,6 +397,7 @@ fun randomScript(source: String = "return " + OpenSearchRestTestCase.randomBoole val ADMIN = "admin" val ALERTING_BASE_URI = "/_plugins/_alerting/monitors" +val WORKFLOW_ALERTING_BASE_URI = "/_plugins/_alerting/workflows" val DESTINATION_BASE_URI = "/_plugins/_alerting/destinations" val LEGACY_OPENDISTRO_ALERTING_BASE_URI = "/_opendistro/_alerting/monitors" val LEGACY_OPENDISTRO_DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" @@ -355,7 +475,7 @@ fun randomDocLevelQuery( name: String = "${randomInt(5)}", tags: List = mutableListOf(0..randomInt(10)).map { OpenSearchRestTestCase.randomAlphaOfLength(10) } ): DocLevelQuery { - return DocLevelQuery(id = id, query = query, name = name, tags = tags) + return DocLevelQuery(id = id, query = query, name = name, tags = tags, fields = listOf()) } fun randomDocLevelMonitorInput( @@ -528,16 +648,6 @@ fun randomActionRunResult(): ActionRunResult { ) } -fun Monitor.toJsonString(): String { - val builder = XContentFactory.jsonBuilder() - return this.toXContent(builder, ToXContent.EMPTY_PARAMS).string() -} - -fun Monitor.toJsonStringWithUser(): String { - val builder = XContentFactory.jsonBuilder() - return this.toXContentWithUser(builder, ToXContent.EMPTY_PARAMS).string() -} - fun Alert.toJsonString(): String { val builder = XContentFactory.jsonBuilder() return this.toXContent(builder, ToXContent.EMPTY_PARAMS).string() @@ -649,3 +759,26 @@ fun assertUserNull(map: Map) { fun assertUserNull(monitor: Monitor) { assertNull("User is not null", monitor.user) } + +fun assertUserNull(workflow: Workflow) { + assertNull("User is not null", workflow.user) +} + +fun randomChainedAlertTrigger( + id: String = UUIDs.base64UUID(), + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + condition: Script = randomScript(), + actions: List = mutableListOf(), + destinationId: String = "" +): ChainedAlertTrigger { + return ChainedAlertTrigger( + id = id, + name = name, + severity = severity, + condition = condition, + actions = if (actions.isEmpty() && destinationId.isNotBlank()) { + (0..randomInt(10)).map { randomAction(destinationId = destinationId) } + } else actions + ) +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt index d4b7d63a4..6076ebac6 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/TriggerServiceTests.kt @@ -11,9 +11,9 @@ import org.opensearch.alerting.model.BucketLevelTriggerRunResult import org.opensearch.alerting.model.InputRunResults import org.opensearch.alerting.model.MonitorRunResult import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext -import org.opensearch.common.xcontent.DeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.xcontent.DeprecationHandler +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.script.ScriptService import org.opensearch.test.OpenSearchTestCase import java.time.Instant @@ -37,9 +37,75 @@ class TriggerServiceTests : OpenSearchTestCase() { val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - val inputResultsStr = "{\"_shards\":{\"total\":1,\"failed\":0,\"successful\":1,\"skipped\":0},\"hits\":{\"hits\":[{\"_index\":\"sample-http-responses\",\"_type\":\"http\",\"_source\":{\"status_code\":100,\"http_4xx\":0,\"http_3xx\":0,\"http_5xx\":0,\"http_2xx\":0,\"timestamp\":100000,\"http_1xx\":1},\"_id\":1,\"_score\":1}],\"total\":{\"value\":4,\"relation\":\"eq\"},\"max_score\":1},\"took\":37,\"timed_out\":false,\"aggregations\":{\"status_code\":{\"doc_count_error_upper_bound\":0,\"sum_other_doc_count\":0,\"buckets\":[{\"doc_count\":2,\"key\":100},{\"doc_count\":1,\"key\":102},{\"doc_count\":1,\"key\":201}]},\"${trigger.id}\":{\"parent_bucket_path\":\"status_code\",\"bucket_indices\":[0,1,2]}}}" + val inputResultsStr = "{\n" + + " \"_shards\": {\n" + + " \"total\": 1,\n" + + " \"failed\": 0,\n" + + " \"successful\": 1,\n" + + " \"skipped\": 0\n" + + " },\n" + + " \"hits\": {\n" + + " \"hits\": [\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 100000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 1,\n" + + " \"_score\": 1\n" + + " }\n" + + " ],\n" + + " \"total\": {\n" + + " \"value\": 4,\n" + + " \"relation\": \"eq\"\n" + + " },\n" + + " \"max_score\": 1\n" + + " },\n" + + " \"took\": 37,\n" + + " \"timed_out\": false,\n" + + " \"aggregations\": {\n" + + " \"status_code\": {\n" + + " \"doc_count_error_upper_bound\": 0,\n" + + " \"sum_other_doc_count\": 0,\n" + + " \"buckets\": [\n" + + " {\n" + + " \"doc_count\": 2,\n" + + " \"key\": 100\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": 102\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": 201\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"${trigger.id}\": {\n" + + " \"parent_bucket_path\": \"status_code\",\n" + + " \"bucket_indices\": [\n" + + " 0,\n" + + " 1,\n" + + " 2\n" + + " ]\n" + + " }\n" + + " }\n" + + "}" - val parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, inputResultsStr) + val parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inputResultsStr + ) val inputResults = parser.map() @@ -60,9 +126,127 @@ class TriggerServiceTests : OpenSearchTestCase() { val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - val inputResultsStr = "{\"_shards\":{\"total\":1, \"failed\":0, \"successful\":1, \"skipped\":0}, \"hits\":{\"hits\":[{\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":100, \"http_4xx\":0, \"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":0, \"timestamp\":100000, \"http_1xx\":1}, \"_id\":1, \"_score\":1.0}, {\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":102, \"http_4xx\":0, \"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":0, \"timestamp\":160000, \"http_1xx\":1}, \"_id\":2, \"_score\":1.0}, {\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":100, \"http_4xx\":0, \"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":0, \"timestamp\":220000, \"http_1xx\":1}, \"_id\":4, \"_score\":1.0}, {\"_index\":\"sample-http-responses\", \"_type\":\"http\", \"_source\":{\"status_code\":201, \"http_4xx\":0, \"http_3xx\":0, \"http_5xx\":0, \"http_2xx\":1, \"timestamp\":280000, \"http_1xx\":0}, \"_id\":5, \"_score\":1.0}], \"total\":{\"value\":4, \"relation\":\"eq\"}, \"max_score\":1.0}, \"took\":15, \"timed_out\":false, \"aggregations\":{\"${trigger.id}\":{\"parent_bucket_path\":\"status_code\", \"bucket_indices\":[0, 1, 2]}, \"status_code\":{\"buckets\":[{\"doc_count\":2, \"key\":{\"status_code\":100}}, {\"doc_count\":1, \"key\":{\"status_code\":102}}, {\"doc_count\":1, \"key\":{\"status_code\":201}}], \"after_key\":{\"status_code\":201}}}}" + val inputResultsStr = "{\n" + + " \"_shards\": {\n" + + " \"total\": 1,\n" + + " \"failed\": 0,\n" + + " \"successful\": 1,\n" + + " \"skipped\": 0\n" + + " },\n" + + " \"hits\": {\n" + + " \"hits\": [\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 100000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 1,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 102,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 160000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 2,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 220000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 4,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 201,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 1,\n" + + " \"timestamp\": 280000,\n" + + " \"http_1xx\": 0\n" + + " },\n" + + " \"_id\": 5,\n" + + " \"_score\": 1\n" + + " }\n" + + " ],\n" + + " \"total\": {\n" + + " \"value\": 4,\n" + + " \"relation\": \"eq\"\n" + + " },\n" + + " \"max_score\": 1\n" + + " },\n" + + " \"took\": 15,\n" + + " \"timed_out\": false,\n" + + " \"aggregations\": {\n" + + " \"${trigger.id}\": {\n" + + " \"parent_bucket_path\": \"status_code\",\n" + + " \"bucket_indices\": [\n" + + " 0,\n" + + " 1,\n" + + " 2\n" + + " ]\n" + + " },\n" + + " \"status_code\": {\n" + + " \"buckets\": [\n" + + " {\n" + + " \"doc_count\": 2,\n" + + " \"key\": {\n" + + " \"status_code\": 100\n" + + " }\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": {\n" + + " \"status_code\": 102\n" + + " }\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": {\n" + + " \"status_code\": 201\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"after_key\": {\n" + + " \"status_code\": 201\n" + + " }\n" + + " }\n" + + " }\n" + + "}" - val parser = XContentType.JSON.xContent().createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, inputResultsStr) + val parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inputResultsStr + ) val inputResults = parser.map() diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertActionTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertActionTests.kt deleted file mode 100644 index 15c0f1cd3..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertActionTests.kt +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.test.OpenSearchTestCase - -class AcknowledgeAlertActionTests : OpenSearchTestCase() { - - fun `test ack alert action name`() { - Assert.assertNotNull(AcknowledgeAlertAction.INSTANCE.name()) - Assert.assertEquals(AcknowledgeAlertAction.INSTANCE.name(), AcknowledgeAlertAction.NAME) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertRequestTests.kt deleted file mode 100644 index 0e699146f..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertRequestTests.kt +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.action.support.WriteRequest -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase - -class AcknowledgeAlertRequestTests : OpenSearchTestCase() { - - fun `test acknowledge alert request`() { - val req = AcknowledgeAlertRequest("1234", mutableListOf("1", "2", "3", "4"), WriteRequest.RefreshPolicy.IMMEDIATE) - Assert.assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = AcknowledgeAlertRequest(sin) - Assert.assertEquals("1234", newReq.monitorId) - Assert.assertEquals(4, newReq.alertIds.size) - Assert.assertEquals(WriteRequest.RefreshPolicy.IMMEDIATE, newReq.refreshPolicy) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponseTests.kt deleted file mode 100644 index 13d98f599..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/AcknowledgeAlertResponseTests.kt +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.alerting.alerts.AlertError -import org.opensearch.alerting.model.ActionExecutionResult -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.randomUser -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant - -class AcknowledgeAlertResponseTests : OpenSearchTestCase() { - - fun `test acknowledge alert response`() { - - val acknowledged = mutableListOf( - Alert( - "1234", 0L, 1, "monitor-1234", "test-monitor", 0L, randomUser(), - "trigger-14", "test-trigger", ArrayList(), ArrayList(), Alert.State.ACKNOWLEDGED, - Instant.now(), Instant.now(), Instant.now(), Instant.now(), null, ArrayList(), - "sev-2", ArrayList(), null - ) - ) - val failed = mutableListOf( - Alert( - "1234", 0L, 1, "monitor-1234", "test-monitor", 0L, randomUser(), - "trigger-14", "test-trigger", ArrayList(), ArrayList(), Alert.State.ERROR, Instant.now(), Instant.now(), - Instant.now(), Instant.now(), null, mutableListOf(AlertError(Instant.now(), "Error msg")), - "sev-2", mutableListOf(ActionExecutionResult("7890", null, 0)), null - ) - ) - val missing = mutableListOf("1", "2", "3", "4") - - val req = AcknowledgeAlertResponse(acknowledged, failed, missing) - Assert.assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = AcknowledgeAlertResponse(sin) - Assert.assertEquals(1, newReq.acknowledged.size) - Assert.assertEquals(1, newReq.failed.size) - Assert.assertEquals(4, newReq.missing.size) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/DeleteMonitorActionTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/DeleteMonitorActionTests.kt deleted file mode 100644 index 8db7761cd..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/DeleteMonitorActionTests.kt +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.test.OpenSearchTestCase - -class DeleteMonitorActionTests : OpenSearchTestCase() { - - fun `test delete monitor action name`() { - Assert.assertNotNull(DeleteMonitorAction.INSTANCE.name()) - Assert.assertEquals(DeleteMonitorAction.INSTANCE.name(), DeleteMonitorAction.NAME) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/DeleteMonitorRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/DeleteMonitorRequestTests.kt deleted file mode 100644 index e66940460..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/DeleteMonitorRequestTests.kt +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.action.support.WriteRequest -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase - -class DeleteMonitorRequestTests : OpenSearchTestCase() { - - fun `test delete monitor request`() { - - val req = DeleteMonitorRequest("1234", WriteRequest.RefreshPolicy.IMMEDIATE) - Assert.assertNotNull(req) - Assert.assertEquals("1234", req.monitorId) - Assert.assertEquals("true", req.refreshPolicy.value) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = DeleteMonitorRequest(sin) - Assert.assertEquals("1234", newReq.monitorId) - Assert.assertEquals("true", newReq.refreshPolicy.value) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt index ba22c5c46..f54b6fea6 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt @@ -5,11 +5,11 @@ package org.opensearch.alerting.action -import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.randomQueryLevelMonitor import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.test.OpenSearchTestCase diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt index 34b41bc2b..10ccd7038 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt @@ -9,7 +9,7 @@ import org.junit.Assert import org.opensearch.alerting.randomBucketLevelMonitorRunResult import org.opensearch.alerting.randomQueryLevelMonitorRunResult import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.test.OpenSearchTestCase class ExecuteMonitorResponseTests : OpenSearchTestCase() { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsActionTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsActionTests.kt deleted file mode 100644 index 73ee6a37c..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class GetAlertsActionTests : OpenSearchTestCase() { - - fun `test get alerts action name`() { - assertNotNull(GetAlertsAction.INSTANCE.name()) - assertEquals(GetAlertsAction.INSTANCE.name(), GetAlertsAction.NAME) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsRequestTests.kt deleted file mode 100644 index 3b8934638..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsRequestTests.kt +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.model.Table -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase - -class GetAlertsRequestTests : OpenSearchTestCase() { - - fun `test get alerts request`() { - - val table = Table("asc", "sortString", null, 1, 0, "") - - val req = GetAlertsRequest(table, "1", "active", null) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetAlertsRequest(sin) - - assertEquals("1", newReq.severityLevel) - assertEquals("active", newReq.alertState) - assertNull(newReq.monitorId) - assertEquals(table, newReq.table) - } - - fun `test get alerts request with filter`() { - - val table = Table("asc", "sortString", null, 1, 0, "") - val req = GetAlertsRequest(table, "1", "active", null) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetAlertsRequest(sin) - - assertEquals("1", newReq.severityLevel) - assertEquals("active", newReq.alertState) - assertNull(newReq.monitorId) - assertEquals(table, newReq.table) - } - - fun `test validate returns null`() { - val table = Table("asc", "sortString", null, 1, 0, "") - - val req = GetAlertsRequest(table, "1", "active", null) - assertNotNull(req) - assertNull(req.validate()) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsResponseTests.kt deleted file mode 100644 index 277e8e9a2..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetAlertsResponseTests.kt +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.alerting.builder -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.opensearchapi.string -import org.opensearch.alerting.randomUser -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant -import java.util.Collections - -class GetAlertsResponseTests : OpenSearchTestCase() { - - fun `test get alerts response with no alerts`() { - val req = GetAlertsResponse(Collections.emptyList(), 0) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetAlertsResponse(sin) - Assert.assertTrue(newReq.alerts.isEmpty()) - assertEquals(0, newReq.totalAlerts) - } - - fun `test get alerts response with alerts`() { - val alert = Alert( - "id", - 0L, - 0, - "monitorId", - "monitorName", - 0L, - randomUser(), - "triggerId", - "triggerName", - Collections.emptyList(), - Collections.emptyList(), - Alert.State.ACKNOWLEDGED, - Instant.MIN, - null, - null, - null, - null, - Collections.emptyList(), - "severity", - Collections.emptyList(), - null - ) - val req = GetAlertsResponse(listOf(alert), 1) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetAlertsResponse(sin) - assertEquals(1, newReq.alerts.size) - assertEquals(alert, newReq.alerts[0]) - assertEquals(1, newReq.totalAlerts) - } - - fun `test toXContent for get alerts response`() { - val now = Instant.now() - - val alert = Alert( - "id", - 0L, - 0, - "monitorId", - "monitorName", - 0L, - null, - "triggerId", - "triggerName", - Collections.emptyList(), - Collections.emptyList(), - Alert.State.ACKNOWLEDGED, - now, - null, - null, - null, - null, - Collections.emptyList(), - "severity", - Collections.emptyList(), - null - ) - val req = GetAlertsResponse(listOf(alert), 1) - var actualXContentString = req.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val expectedXContentString = "{\"alerts\":[{\"id\":\"id\",\"version\":0,\"monitor_id\":\"monitorId\"," + - "\"schema_version\":0,\"monitor_version\":0,\"monitor_name\":\"monitorName\"," + - "\"trigger_id\":\"triggerId\",\"trigger_name\":\"triggerName\"," + - "\"finding_ids\":[],\"related_doc_ids\":[],\"state\":\"ACKNOWLEDGED\",\"error_message\":null,\"alert_history\":[]," + - "\"severity\":\"severity\",\"action_execution_results\":[],\"start_time\":" + now.toEpochMilli() + - ",\"last_notification_time\":null,\"end_time\":null,\"acknowledged_time\":null}],\"totalAlerts\":1}" - assertEquals(expectedXContentString, actualXContentString) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsRequestTests.kt index 42cf1736a..7c76621f9 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsRequestTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsRequestTests.kt @@ -5,9 +5,9 @@ package org.opensearch.alerting.action -import org.opensearch.alerting.model.Table import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.commons.alerting.model.Table +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.search.fetch.subphase.FetchSourceContext import org.opensearch.test.OpenSearchTestCase diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsResponseTests.kt index e87e8b722..ed837bdce 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetDestinationsResponseTests.kt @@ -9,8 +9,8 @@ import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.model.destination.Slack import org.opensearch.alerting.util.DestinationType import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.rest.RestStatus +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.rest.RestStatus import org.opensearch.test.OpenSearchTestCase import java.time.Instant import java.util.Collections diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt index 5f6f5ce57..02631a38b 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt @@ -6,7 +6,7 @@ package org.opensearch.alerting.action import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.rest.RestRequest import org.opensearch.search.fetch.subphase.FetchSourceContext import org.opensearch.test.OpenSearchTestCase diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt index 1febe1597..ed60c3439 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt @@ -7,8 +7,8 @@ package org.opensearch.alerting.action import org.opensearch.alerting.randomEmailAccount import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.rest.RestStatus +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.rest.RestStatus import org.opensearch.test.OpenSearchTestCase class GetEmailAccountResponseTests : OpenSearchTestCase() { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt index cc2043fb0..7fa8b2037 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt @@ -6,7 +6,7 @@ package org.opensearch.alerting.action import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.rest.RestRequest import org.opensearch.search.fetch.subphase.FetchSourceContext import org.opensearch.test.OpenSearchTestCase diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt index 968907aae..19612fe4a 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt @@ -7,8 +7,8 @@ package org.opensearch.alerting.action import org.opensearch.alerting.randomEmailGroup import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.rest.RestStatus +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.rest.RestStatus import org.opensearch.test.OpenSearchTestCase class GetEmailGroupResponseTests : OpenSearchTestCase() { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetFindingsRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetFindingsRequestTests.kt new file mode 100644 index 000000000..d1bd6f7e3 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetFindingsRequestTests.kt @@ -0,0 +1,41 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.model.Table +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.test.OpenSearchTestCase + +class GetFindingsRequestTests : OpenSearchTestCase() { + + fun `test get findings request`() { + + val table = Table("asc", "sortString", null, 1, 0, "") + + val req = GetFindingsRequest("2121", table, "1", "finding_index_name") + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetFindingsRequest(sin) + + assertEquals("1", newReq.monitorId) + assertEquals("2121", newReq.findingId) + assertEquals("finding_index_name", newReq.findingIndex) + assertEquals(table, newReq.table) + } + + fun `test validate returns null`() { + val table = Table("asc", "sortString", null, 1, 0, "") + + val req = GetFindingsRequest("2121", table, "1", "active") + assertNotNull(req) + assertNull(req.validate()) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorActionTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorActionTests.kt deleted file mode 100644 index 3d1a5dffa..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class GetMonitorActionTests : OpenSearchTestCase() { - - fun `test get monitor action name`() { - assertNotNull(GetMonitorAction.INSTANCE.name()) - assertEquals(GetMonitorAction.INSTANCE.name(), GetMonitorAction.NAME) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorRequestTests.kt deleted file mode 100644 index c3d6831ba..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorRequestTests.kt +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.rest.RestRequest -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.test.OpenSearchTestCase - -class GetMonitorRequestTests : OpenSearchTestCase() { - - fun `test get monitor request`() { - - val req = GetMonitorRequest("1234", 1L, RestRequest.Method.GET, FetchSourceContext.FETCH_SOURCE) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetMonitorRequest(sin) - assertEquals("1234", newReq.monitorId) - assertEquals(1L, newReq.version) - assertEquals(RestRequest.Method.GET, newReq.method) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - } - - fun `test get monitor request without src context`() { - - val req = GetMonitorRequest("1234", 1L, RestRequest.Method.GET, null) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetMonitorRequest(sin) - assertEquals("1234", newReq.monitorId) - assertEquals(1L, newReq.version) - assertEquals(RestRequest.Method.GET, newReq.method) - assertEquals(null, newReq.srcContext) - } - - fun `test head monitor request`() { - - val req = GetMonitorRequest("1234", 2L, RestRequest.Method.HEAD, FetchSourceContext.FETCH_SOURCE) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetMonitorRequest(sin) - assertEquals("1234", newReq.monitorId) - assertEquals(2L, newReq.version) - assertEquals(RestRequest.Method.HEAD, newReq.method) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorResponseTests.kt deleted file mode 100644 index 2bd14a45f..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/GetMonitorResponseTests.kt +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.core.model.CronSchedule -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.randomUser -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.rest.RestStatus -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant -import java.time.ZoneId - -class GetMonitorResponseTests : OpenSearchTestCase() { - - fun `test get monitor response`() { - val req = GetMonitorResponse("1234", 1L, 2L, 0L, RestStatus.OK, null) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetMonitorResponse(sin) - assertEquals("1234", newReq.id) - assertEquals(1L, newReq.version) - assertEquals(RestStatus.OK, newReq.status) - assertEquals(null, newReq.monitor) - } - - fun `test get monitor response with monitor`() { - val cronExpression = "31 * * * *" // Run at minute 31. - val testInstance = Instant.ofEpochSecond(1538164858L) - - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Kolkata"), testInstance) - val monitor = Monitor( - id = "123", - version = 0L, - name = "test-monitor", - enabled = true, - schedule = cronSchedule, - lastUpdateTime = Instant.now(), - enabledTime = Instant.now(), - monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, - user = randomUser(), - schemaVersion = 0, - inputs = mutableListOf(), - triggers = mutableListOf(), - uiMetadata = mutableMapOf() - ) - val req = GetMonitorResponse("1234", 1L, 2L, 0L, RestStatus.OK, monitor) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetMonitorResponse(sin) - assertEquals("1234", newReq.id) - assertEquals(1L, newReq.version) - assertEquals(RestStatus.OK, newReq.status) - assertNotNull(newReq.monitor) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorActionTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorActionTests.kt deleted file mode 100644 index c115aa0cf..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class IndexMonitorActionTests : OpenSearchTestCase() { - - fun `test index monitor action name`() { - assertNotNull(IndexMonitorAction.INSTANCE.name()) - assertEquals(IndexMonitorAction.INSTANCE.name(), IndexMonitorAction.NAME) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorRequestTests.kt deleted file mode 100644 index c840e130e..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorRequestTests.kt +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.rest.RestRequest -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase - -class IndexMonitorRequestTests : OpenSearchTestCase() { - - fun `test index monitor post request`() { - - val req = IndexMonitorRequest( - "1234", 1L, 2L, WriteRequest.RefreshPolicy.IMMEDIATE, RestRequest.Method.POST, - randomQueryLevelMonitor().copy(inputs = listOf(SearchInput(emptyList(), SearchSourceBuilder()))) - ) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = IndexMonitorRequest(sin) - assertEquals("1234", newReq.monitorId) - assertEquals(1L, newReq.seqNo) - assertEquals(2L, newReq.primaryTerm) - assertEquals(RestRequest.Method.POST, newReq.method) - assertNotNull(newReq.monitor) - } - - fun `test index monitor put request`() { - - val req = IndexMonitorRequest( - "1234", 1L, 2L, WriteRequest.RefreshPolicy.IMMEDIATE, RestRequest.Method.PUT, - randomQueryLevelMonitor().copy(inputs = listOf(SearchInput(emptyList(), SearchSourceBuilder()))) - ) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = IndexMonitorRequest(sin) - assertEquals("1234", newReq.monitorId) - assertEquals(1L, newReq.seqNo) - assertEquals(2L, newReq.primaryTerm) - assertEquals(RestRequest.Method.PUT, newReq.method) - assertNotNull(newReq.monitor) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorResponseTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorResponseTests.kt deleted file mode 100644 index 00210dce6..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/IndexMonitorResponseTests.kt +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.core.model.CronSchedule -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.randomUser -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.rest.RestStatus -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant -import java.time.ZoneId - -class IndexMonitorResponseTests : OpenSearchTestCase() { - - fun `test index monitor response with monitor`() { - val cronExpression = "31 * * * *" // Run at minute 31. - val testInstance = Instant.ofEpochSecond(1538164858L) - - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Kolkata"), testInstance) - val monitor = Monitor( - id = "123", - version = 0L, - name = "test-monitor", - enabled = true, - schedule = cronSchedule, - lastUpdateTime = Instant.now(), - enabledTime = Instant.now(), - monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, - user = randomUser(), - schemaVersion = 0, - inputs = mutableListOf(), - triggers = mutableListOf(), - uiMetadata = mutableMapOf() - ) - val req = IndexMonitorResponse("1234", 1L, 2L, 0L, RestStatus.OK, monitor) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = IndexMonitorResponse(sin) - assertEquals("1234", newReq.id) - assertEquals(1L, newReq.version) - assertEquals(RestStatus.OK, newReq.status) - assertNotNull(newReq.monitor) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/SearchMonitorActionTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/SearchMonitorActionTests.kt deleted file mode 100644 index 61f96529d..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/SearchMonitorActionTests.kt +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.test.OpenSearchTestCase - -class SearchMonitorActionTests : OpenSearchTestCase() { - - fun `test search monitor action name`() { - Assert.assertNotNull(SearchMonitorAction.INSTANCE.name()) - Assert.assertEquals(SearchMonitorAction.INSTANCE.name(), SearchMonitorAction.NAME) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/action/SearchMonitorRequestTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/action/SearchMonitorRequestTests.kt deleted file mode 100644 index dc593e480..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/action/SearchMonitorRequestTests.kt +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.search.SearchRequest -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.unit.TimeValue -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase -import org.opensearch.test.rest.OpenSearchRestTestCase -import java.util.concurrent.TimeUnit - -class SearchMonitorRequestTests : OpenSearchTestCase() { - - fun `test search monitors request`() { - val searchSourceBuilder = SearchSourceBuilder().from(0).size(100).timeout(TimeValue(60, TimeUnit.SECONDS)) - val searchRequest = SearchRequest().indices(OpenSearchRestTestCase.randomAlphaOfLength(10)).source(searchSourceBuilder) - val searchMonitorRequest = SearchMonitorRequest(searchRequest) - assertNotNull(searchMonitorRequest) - - val out = BytesStreamOutput() - searchMonitorRequest.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = SearchMonitorRequest(sin) - - assertNotNull(newReq.searchRequest) - assertEquals(1, newReq.searchRequest.indices().size) - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt index 4b7155bce..60021e20b 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt @@ -6,6 +6,8 @@ package org.opensearch.alerting.aggregation.bucketselectorext import org.opensearch.alerting.AlertingPlugin +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter import org.opensearch.plugins.SearchPlugin import org.opensearch.script.Script import org.opensearch.script.ScriptType diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt index 742cc2069..257a0a705 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt @@ -16,6 +16,9 @@ import org.apache.lucene.util.BytesRef import org.hamcrest.CoreMatchers import org.opensearch.common.CheckedConsumer import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType import org.opensearch.index.mapper.MappedFieldType import org.opensearch.index.mapper.NumberFieldMapper diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt index e1b780491..69a7e0363 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/alerts/AlertIndicesIT.kt @@ -5,15 +5,12 @@ package org.opensearch.alerting.alerts -import org.apache.http.entity.ContentType.APPLICATION_JSON -import org.apache.http.entity.StringEntity +import org.apache.hc.core5.http.ContentType.APPLICATION_JSON +import org.apache.hc.core5.http.io.entity.StringEntity import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.ALWAYS_RUN import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.NEVER_RUN -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.makeRequest import org.opensearch.alerting.randomDocumentLevelMonitor import org.opensearch.alerting.randomDocumentLevelTrigger @@ -22,7 +19,12 @@ import org.opensearch.alerting.randomQueryLevelTrigger import org.opensearch.alerting.settings.AlertingSettings import org.opensearch.common.xcontent.XContentType import org.opensearch.common.xcontent.json.JsonXContent.jsonXContent -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.OpenSearchTestCase +import java.util.concurrent.TimeUnit class AlertIndicesIT : AlertingRestTestCase() { @@ -35,7 +37,7 @@ class AlertIndicesIT : AlertingRestTestCase() { fun `test create finding index`() { val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -52,7 +54,7 @@ class AlertIndicesIT : AlertingRestTestCase() { putAlertMappings( AlertIndices.alertMapping().trimStart('{').trimEnd('}') - .replace("\"schema_version\": 4", "\"schema_version\": 0") + .replace("\"schema_version\": 5", "\"schema_version\": 0") ) assertIndexExists(AlertIndices.ALERT_INDEX) assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) @@ -62,9 +64,9 @@ class AlertIndicesIT : AlertingRestTestCase() { executeMonitor(createRandomMonitor()) assertIndexExists(AlertIndices.ALERT_INDEX) assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 5) - verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 4) - verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 4) + verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 8) + verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 5) + verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 5) } fun `test update finding index mapping with new schema version`() { @@ -73,21 +75,21 @@ class AlertIndicesIT : AlertingRestTestCase() { putFindingMappings( AlertIndices.findingMapping().trimStart('{').trimEnd('}') - .replace("\"schema_version\": 1", "\"schema_version\": 0") + .replace("\"schema_version\": 4", "\"schema_version\": 0") ) assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 0) wipeAllODFEIndices() val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) executeMonitor(trueMonitor.id) assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 5) - verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 1) + verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 8) + verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 4) } fun `test alert index gets recreated automatically if deleted`() { @@ -112,7 +114,7 @@ class AlertIndicesIT : AlertingRestTestCase() { wipeAllODFEIndices() assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -138,7 +140,9 @@ class AlertIndicesIT : AlertingRestTestCase() { executeMonitor(trueMonitor) // Allow for a rollover index. - Thread.sleep(2000) + OpenSearchTestCase.waitUntil({ + return@waitUntil (getAlertIndices().size >= 3) + }, 2, TimeUnit.SECONDS) assertTrue("Did not find 3 alert indices", getAlertIndices().size >= 3) } @@ -148,14 +152,16 @@ class AlertIndicesIT : AlertingRestTestCase() { client().updateSettings(AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.key, "1s") val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) executeMonitor(trueMonitor.id) // Allow for a rollover index. - Thread.sleep(2000) + OpenSearchTestCase.waitUntil({ + return@waitUntil (getFindingIndices().size >= 2) + }, 2, TimeUnit.SECONDS) assertTrue("Did not find 2 alert indices", getFindingIndices().size >= 2) } @@ -214,6 +220,9 @@ class AlertIndicesIT : AlertingRestTestCase() { // Check if alert is active and alert index is created val activeAlert = searchAlerts(monitor) assertEquals("1 alert should be active", 1, activeAlert.size) + + waitUntil { return@waitUntil getAlertIndices().size == 2 } + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) // History index is created but is empty assertEquals(0, getAlertHistoryDocCount()) @@ -235,7 +244,14 @@ class AlertIndicesIT : AlertingRestTestCase() { client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "1s") // Give some time for history to be rolled over and cleared - Thread.sleep(5000) + OpenSearchTestCase.waitUntil({ + val alertIndices = getAlertIndices().size + val docCount = getAlertHistoryDocCount() + if (alertIndices > 2 || docCount > 0) { + return@waitUntil false + } + return@waitUntil true + }, 30, TimeUnit.SECONDS) // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. // This leaves two indices: alert index and an empty history write index @@ -248,7 +264,7 @@ class AlertIndicesIT : AlertingRestTestCase() { // Create monitor and execute val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -264,6 +280,9 @@ class AlertIndicesIT : AlertingRestTestCase() { // Check if alert is active and alert index is created val activeAlert = searchAlerts(monitor) assertEquals("1 alert should be active", 1, activeAlert.size) + + waitUntil { return@waitUntil getAlertIndices().size == 2 } + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) // History index is created but is empty assertEquals(0, getAlertHistoryDocCount()) @@ -284,7 +303,14 @@ class AlertIndicesIT : AlertingRestTestCase() { client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "1s") // Give some time for history to be rolled over and cleared - Thread.sleep(5000) + OpenSearchTestCase.waitUntil({ + val alertIndices = getAlertIndices().size + val docCount = getAlertHistoryDocCount() + if (alertIndices > 2 || docCount > 0) { + return@waitUntil false + } + return@waitUntil true + }, 30, TimeUnit.SECONDS) // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. // This leaves two indices: alert index and an empty history write index diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt index 0346272cd..2c77fd480 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt @@ -5,18 +5,19 @@ package org.opensearch.alerting.bwc -import org.apache.http.entity.ContentType.APPLICATION_JSON -import org.apache.http.entity.StringEntity +import org.apache.hc.core5.http.ContentType.APPLICATION_JSON +import org.apache.hc.core5.http.io.entity.StringEntity import org.opensearch.alerting.ALERTING_BASE_URI import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.LEGACY_OPENDISTRO_ALERTING_BASE_URI import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.Monitor import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.core.rest.RestStatus import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import java.util.concurrent.TimeUnit class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { @@ -53,15 +54,15 @@ class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { val pluginNames = plugins.map { plugin -> plugin["name"] }.toSet() when (CLUSTER_TYPE) { ClusterType.OLD -> { - assertTrue(pluginNames.contains("opendistro-alerting")) + assertTrue(pluginNames.contains("opensearch-alerting")) createBasicMonitor() } ClusterType.MIXED -> { assertTrue(pluginNames.contains("opensearch-alerting")) - verifyMonitorExists(LEGACY_OPENDISTRO_ALERTING_BASE_URI) + verifyMonitorExists(ALERTING_BASE_URI) // TODO: Need to move the base URI being used here into a constant and rename ALERTING_BASE_URI to // MONITOR_BASE_URI - verifyMonitorStats("/_opendistro/_alerting") + verifyMonitorStats("/_plugins/_alerting") } ClusterType.UPGRADED -> { assertTrue(pluginNames.contains("opensearch-alerting")) @@ -70,8 +71,21 @@ class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { // the test execution by a lot (might have to wait for Job Scheduler plugin integration first) // Waiting a minute to ensure the Monitor ran again at least once before checking if the job is running // on time - Thread.sleep(60000) - verifyMonitorStats("/_plugins/_alerting") + var passed = false + OpenSearchTestCase.waitUntil({ + try { + // Run verifyMonitorStats until all assertion test passes + verifyMonitorStats("/_plugins/_alerting") + passed = true + return@waitUntil true + } catch (e: AssertionError) { + return@waitUntil false + } + }, 1, TimeUnit.MINUTES) + if (!passed) { + // if it hit the max time (1 minute), run verifyMonitorStats again to make sure all the tests pass + verifyMonitorStats("/_plugins/_alerting") + } } } break @@ -112,7 +126,7 @@ class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { @Throws(Exception::class) private fun createBasicMonitor() { val indexName = "test_bwc_index" - val legacyMonitorString = """ + val bwcMonitorString = """ { "type": "monitor", "name": "test_bwc_monitor", @@ -123,43 +137,38 @@ class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { "unit": "MINUTES" } }, - "inputs": [ - { - "search": { - "indices": [ - "$indexName" - ], + "inputs": [{ + "search": { + "indices": ["$indexName"], + "query": { + "size": 0, + "aggregations": {}, "query": { - "size": 0, - "query": { - "match_all": {} - } + "match_all": {} } } } - ], - "triggers": [ - { - "name": "abc", - "severity": "1", - "condition": { - "script": { - "source": "ctx.results[0].hits.total.value > 100000", - "lang": "painless" - } - }, - "actions": [] - } - ] + }], + "triggers": [{ + "name": "abc", + "severity": "1", + "condition": { + "script": { + "source": "ctx.results[0].hits.total.value > 100000", + "lang": "painless" + } + }, + "actions": [] + }] } """.trimIndent() createIndex(indexName, Settings.EMPTY) val createResponse = client().makeRequest( method = "POST", - endpoint = "$LEGACY_OPENDISTRO_ALERTING_BASE_URI?refresh=true", + endpoint = "$ALERTING_BASE_URI?refresh=true", params = emptyMap(), - entity = StringEntity(legacyMonitorString, APPLICATION_JSON) + entity = StringEntity(bwcMonitorString, APPLICATION_JSON) ) assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) @@ -167,7 +176,7 @@ class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { val createdId = responseBody["_id"] as String val createdVersion = responseBody["_version"] as Int assertNotEquals("Create monitor response is missing id", Monitor.NO_ID, createdId) - assertTrue("Create monitor reponse has incorrect version", createdVersion > 0) + assertTrue("Create monitor response has incorrect version", createdVersion > 0) } @Throws(Exception::class) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt new file mode 100644 index 000000000..7ebc82697 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt @@ -0,0 +1,84 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition + +import org.junit.Assert +import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser +import org.opensearch.test.OpenSearchTestCase + +class ChainedAlertsExpressionParserTests : OpenSearchTestCase() { + + fun `test trigger expression posix parsing simple AND`() { + val eqString = "(monitor[id=abc] && monitor[id=xyz])" + val equation = ChainedAlertExpressionParser(eqString).parse() + val expectedEquation = "monitor[id=abc] monitor[id=xyz] && " + Assert.assertTrue(expectedEquation == equation.toString()) + } + + fun `test trigger expression posix parsing simple AND without parentheses`() { + val eqString = "monitor[id=abc] && monitor[id=xyz]" + val equation = ChainedAlertExpressionParser(eqString).parse() + val expectedEquation = "monitor[id=abc] monitor[id=xyz] && " + Assert.assertTrue(expectedEquation == equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND`() { + val eqString = "(monitor[id=abc] && monitor[id=def]) && monitor[id=ghi]" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=abc] monitor[id=def] && monitor[id=ghi] && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND with parenthesis`() { + val eqString = "(monitor[id=sigma-123] && monitor[id=sigma-456]) && (monitor[id=sigma-789] && monitor[id=id-2aw34])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=sigma-123] monitor[id=sigma-456] && monitor[id=sigma-789] monitor[id=id-2aw34] && && ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple OR`() { + val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR`() { + val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456]) || monitor[id=sigma-789]" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] || monitor[id=sigma-789] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR with parenthesis`() { + val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456]) || (monitor[id=sigma-789] || monitor[id=id-2aw34])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=sigma-123] monitor[id=sigma-456] || monitor[id=sigma-789] monitor[id=id-2aw34] || || ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple NOT`() { + val eqString = "(monitor[id=sigma-123] || !monitor[id=sigma-456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] ! || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple NOT`() { + val eqString = "(monitor[id=sigma-123] && !monitor[tag=tag-456]) && !(monitor[id=sigma-789])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[tag=tag-456] ! && monitor[id=sigma-789] ! && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple operators with parenthesis`() { + val eqString = "(monitor[id=sigma-123] && monitor[tag=sev1]) || !(!monitor[id=sigma-789] || monitor[id=id-2aw34])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=sigma-123] monitor[tag=sev1] && monitor[id=sigma-789] ! monitor[id=id-2aw34] || ! || ", + equation.toString() + ) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt new file mode 100644 index 000000000..a0851d58d --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt @@ -0,0 +1,118 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition + +import org.junit.Assert +import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser +import org.opensearch.test.OpenSearchTestCase + +class ChainedAlertsExpressionResolveTests : OpenSearchTestCase() { + + fun `test chained alert trigger expression evaluation simple AND`() { + val eqString = "(monitor[id=123] && monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] && ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "123", + "789" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation AND with NOT`() { + val eqString = "(monitor[id=123] && !monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] ! && ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors1: Set = setOf( + "123", + "223" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors1)) + } + + fun `test chained alert trigger expression evaluation simple OR`() { + val eqString = "(monitor[id=123] || monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] || ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "234", + "567" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation OR with NOT`() { + val eqString = "(monitor[id=123] || !monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] ! || ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "456" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation simple NOT`() { + val eqString = "!(monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=456] ! ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "456" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation with multiple operators with parenthesis`() { + val eqString = "(monitor[id=123] && monitor[id=456]) || !(!monitor[id=789] || monitor[id=abc])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=123] monitor[id=456] && monitor[id=789] ! monitor[id=abc] || ! || ", + equation.toString() + ) + // part 1 evaluates, part 2 evaluates + val alertGeneratingMonitors1: Set = setOf( + "123", + "456", + "789", + "abc" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors1)) + // part 1 not evaluates, part 2 not evaluates + val alertGeneratingMonitors2: Set = setOf( + "789", + "abc" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + // part 1 not evaluates, part 2 evaluates + val alertGeneratingMonitors3: Set = setOf( + "789" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors3)) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/AlertTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/AlertTests.kt index 698754055..08fba74cb 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/model/AlertTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/AlertTests.kt @@ -8,6 +8,7 @@ package org.opensearch.alerting.model import org.junit.Assert import org.opensearch.alerting.randomAlert import org.opensearch.alerting.randomAlertWithAggregationResultBucket +import org.opensearch.commons.alerting.model.Alert import org.opensearch.test.OpenSearchTestCase class AlertTests : OpenSearchTestCase() { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/DestinationTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/DestinationTests.kt index 777206449..7dac05b2a 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/model/DestinationTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/DestinationTests.kt @@ -16,7 +16,7 @@ import org.opensearch.alerting.parser import org.opensearch.alerting.randomUser import org.opensearch.alerting.util.DestinationType import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.test.OpenSearchTestCase import java.time.Instant diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/DocLevelMonitorInputTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/DocLevelMonitorInputTests.kt deleted file mode 100644 index 4c6f1825d..000000000 --- a/alerting/src/test/kotlin/org/opensearch/alerting/model/DocLevelMonitorInputTests.kt +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.opensearchapi.string -import org.opensearch.alerting.randomDocLevelMonitorInput -import org.opensearch.alerting.randomDocLevelQuery -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentType -import org.opensearch.test.OpenSearchTestCase -import java.lang.IllegalArgumentException - -class DocLevelMonitorInputTests : OpenSearchTestCase() { - fun `test DocLevelQuery asTemplateArgs`() { - // GIVEN - val query = randomDocLevelQuery() - - // WHEN - val templateArgs = query.asTemplateArg() - - // THEN - assertEquals("Template args 'id' field does not match:", templateArgs[DocLevelQuery.QUERY_ID_FIELD], query.id) - assertEquals("Template args 'query' field does not match:", templateArgs[DocLevelQuery.QUERY_FIELD], query.query) - assertEquals("Template args 'name' field does not match:", templateArgs[DocLevelQuery.NAME_FIELD], query.name) - assertEquals("Template args 'tags' field does not match:", templateArgs[DocLevelQuery.TAGS_FIELD], query.tags) - } - - fun `test create Doc Level Query with invalid characters for name`() { - val badString = "query with space" - try { - randomDocLevelQuery(name = badString) - fail("Expecting an illegal argument exception") - } catch (e: IllegalArgumentException) { - assertEquals( - "They query name or tag, $badString, contains an invalid character: [' ','[',']','{','}','(',')']", - e.message - ) - } - } - - @Throws(IllegalArgumentException::class) - fun `test create Doc Level Query with invalid characters for tags`() { - val badString = "[(){}]" - try { - randomDocLevelQuery(tags = listOf(badString)) - fail("Expecting an illegal argument exception") - } catch (e: IllegalArgumentException) { - assertEquals( - "They query name or tag, $badString, contains an invalid character: [' ','[',']','{','}','(',')']", - e.message - ) - } - } - - fun `test DocLevelMonitorInput asTemplateArgs`() { - // GIVEN - val input = randomDocLevelMonitorInput() - - // test - val inputString = input.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() - // assertEquals("test", inputString) - // test end - // WHEN - val templateArgs = input.asTemplateArg() - - // THEN - assertEquals( - "Template args 'description' field does not match:", - templateArgs[DocLevelMonitorInput.DESCRIPTION_FIELD], - input.description - ) - assertEquals( - "Template args 'indices' field does not match:", - templateArgs[DocLevelMonitorInput.INDICES_FIELD], - input.indices - ) - assertEquals( - "Template args 'queries' field does not contain the expected number of queries:", - input.queries.size, - (templateArgs[DocLevelMonitorInput.QUERIES_FIELD] as List<*>).size - ) - input.queries.forEach { - assertTrue( - "Template args 'queries' field does not match:", - (templateArgs[DocLevelMonitorInput.QUERIES_FIELD] as List<*>).contains(it.asTemplateArg()) - ) - } - } -} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt index 5078beb2d..f77ca3ddc 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/FindingTests.kt @@ -6,6 +6,7 @@ package org.opensearch.alerting.model import org.opensearch.alerting.randomFinding +import org.opensearch.commons.alerting.model.Finding import org.opensearch.test.OpenSearchTestCase class FindingTests : OpenSearchTestCase() { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt index 9e01cd09b..6ef15f8d8 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/WriteableTests.kt @@ -5,120 +5,26 @@ package org.opensearch.alerting.model -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.model.action.Action -import org.opensearch.alerting.model.action.ActionExecutionPolicy -import org.opensearch.alerting.model.action.Throttle import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.randomAction -import org.opensearch.alerting.randomActionExecutionPolicy import org.opensearch.alerting.randomActionRunResult import org.opensearch.alerting.randomBucketLevelMonitorRunResult -import org.opensearch.alerting.randomBucketLevelTrigger import org.opensearch.alerting.randomBucketLevelTriggerRunResult import org.opensearch.alerting.randomDocumentLevelMonitorRunResult -import org.opensearch.alerting.randomDocumentLevelTrigger import org.opensearch.alerting.randomDocumentLevelTriggerRunResult import org.opensearch.alerting.randomEmailAccount import org.opensearch.alerting.randomEmailGroup import org.opensearch.alerting.randomInputRunResults -import org.opensearch.alerting.randomQueryLevelMonitor import org.opensearch.alerting.randomQueryLevelMonitorRunResult -import org.opensearch.alerting.randomQueryLevelTrigger import org.opensearch.alerting.randomQueryLevelTriggerRunResult -import org.opensearch.alerting.randomThrottle -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.randomUserEmpty import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.commons.authuser.User +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.test.OpenSearchTestCase class WriteableTests : OpenSearchTestCase() { - fun `test throttle as stream`() { - val throttle = randomThrottle() - val out = BytesStreamOutput() - throttle.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newThrottle = Throttle(sin) - assertEquals("Round tripping Throttle doesn't work", throttle, newThrottle) - } - - fun `test action as stream`() { - val action = randomAction() - val out = BytesStreamOutput() - action.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newAction = Action(sin) - assertEquals("Round tripping Action doesn't work", action, newAction) - } - - fun `test action as stream with null subject template`() { - val action = randomAction().copy(subjectTemplate = null) - val out = BytesStreamOutput() - action.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newAction = Action(sin) - assertEquals("Round tripping Action doesn't work", action, newAction) - } - - fun `test action as stream with null throttle`() { - val action = randomAction().copy(throttle = null) - val out = BytesStreamOutput() - action.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newAction = Action(sin) - assertEquals("Round tripping Action doesn't work", action, newAction) - } - - fun `test action as stream with throttled enabled and null throttle`() { - val action = randomAction().copy(throttle = null).copy(throttleEnabled = true) - val out = BytesStreamOutput() - action.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newAction = Action(sin) - assertEquals("Round tripping Action doesn't work", action, newAction) - } - - fun `test query-level monitor as stream`() { - val monitor = randomQueryLevelMonitor().copy(inputs = listOf(SearchInput(emptyList(), SearchSourceBuilder()))) - val out = BytesStreamOutput() - monitor.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newMonitor = Monitor(sin) - assertEquals("Round tripping QueryLevelMonitor doesn't work", monitor, newMonitor) - } - - fun `test query-level trigger as stream`() { - val trigger = randomQueryLevelTrigger() - val out = BytesStreamOutput() - trigger.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newTrigger = QueryLevelTrigger.readFrom(sin) - assertEquals("Round tripping QueryLevelTrigger doesn't work", trigger, newTrigger) - } - - fun `test bucket-level trigger as stream`() { - val trigger = randomBucketLevelTrigger() - val out = BytesStreamOutput() - trigger.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newTrigger = BucketLevelTrigger.readFrom(sin) - assertEquals("Round tripping BucketLevelTrigger doesn't work", trigger, newTrigger) - } - - fun `test doc-level trigger as stream`() { - val trigger = randomDocumentLevelTrigger() - val out = BytesStreamOutput() - trigger.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newTrigger = DocumentLevelTrigger.readFrom(sin) - assertEquals("Round tripping DocumentLevelTrigger doesn't work", trigger, newTrigger) - } - fun `test actionrunresult as stream`() { val actionRunResult = randomActionRunResult() val out = BytesStreamOutput() @@ -134,7 +40,10 @@ class WriteableTests : OpenSearchTestCase() { runResult.writeTo(out) val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) val newRunResult = QueryLevelTriggerRunResult(sin) - assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) + assertEquals(runResult.triggerName, newRunResult.triggerName) + assertEquals(runResult.triggered, newRunResult.triggered) + assertEquals(runResult.error, newRunResult.error) + assertEquals(runResult.actionResults, newRunResult.actionResults) } fun `test bucket-level triggerrunresult as stream`() { @@ -200,24 +109,6 @@ class WriteableTests : OpenSearchTestCase() { assertEquals("Round tripping MonitorRunResult doesn't work", input, newInput) } - fun `test user as stream`() { - val user = randomUser() - val out = BytesStreamOutput() - user.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newUser = User(sin) - assertEquals("Round tripping User doesn't work", user, newUser) - } - - fun `test empty user as stream`() { - val user = randomUserEmpty() - val out = BytesStreamOutput() - user.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newUser = User(sin) - assertEquals("Round tripping User doesn't work", user, newUser) - } - fun `test emailaccount as stream`() { val emailAccount = randomEmailAccount() val out = BytesStreamOutput() @@ -235,13 +126,4 @@ class WriteableTests : OpenSearchTestCase() { val newEmailGroup = EmailGroup.readFrom(sin) assertEquals("Round tripping EmailGroup doesn't work", emailGroup, newEmailGroup) } - - fun `test action execution policy as stream`() { - val actionExecutionPolicy = randomActionExecutionPolicy() - val out = BytesStreamOutput() - actionExecutionPolicy.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newActionExecutionPolicy = ActionExecutionPolicy.readFrom(sin) - assertEquals("Round tripping ActionExecutionPolicy doesn't work", actionExecutionPolicy, newActionExecutionPolicy) - } } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/model/XContentTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/model/XContentTests.kt index 17e772890..7d07af331 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/model/XContentTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/model/XContentTests.kt @@ -6,175 +6,23 @@ package org.opensearch.alerting.model import org.opensearch.alerting.builder -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.model.action.Action -import org.opensearch.alerting.model.action.ActionExecutionPolicy -import org.opensearch.alerting.model.action.PerExecutionActionScope -import org.opensearch.alerting.model.action.Throttle import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.opensearchapi.string import org.opensearch.alerting.parser -import org.opensearch.alerting.randomAction -import org.opensearch.alerting.randomActionExecutionPolicy import org.opensearch.alerting.randomActionExecutionResult -import org.opensearch.alerting.randomActionWithPolicy import org.opensearch.alerting.randomAlert -import org.opensearch.alerting.randomBucketLevelMonitor -import org.opensearch.alerting.randomBucketLevelTrigger import org.opensearch.alerting.randomEmailAccount import org.opensearch.alerting.randomEmailGroup -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.alerting.randomQueryLevelMonitorWithoutUser -import org.opensearch.alerting.randomQueryLevelTrigger -import org.opensearch.alerting.randomThrottle -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.randomUserEmpty import org.opensearch.alerting.toJsonString -import org.opensearch.alerting.toJsonStringWithUser -import org.opensearch.common.xcontent.ToXContent import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.commons.authuser.User -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.xcontent.ToXContent import org.opensearch.test.OpenSearchTestCase -import java.time.temporal.ChronoUnit -import kotlin.test.assertFailsWith class XContentTests : OpenSearchTestCase() { - fun `test action parsing`() { - val action = randomAction() - val actionString = action.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedAction = Action.parse(parser(actionString)) - assertEquals("Round tripping Action doesn't work", action, parsedAction) - } - - fun `test action parsing with null subject template`() { - val action = randomAction().copy(subjectTemplate = null) - val actionString = action.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedAction = Action.parse(parser(actionString)) - assertEquals("Round tripping Action doesn't work", action, parsedAction) - } - - fun `test action parsing with null throttle`() { - val action = randomAction().copy(throttle = null) - val actionString = action.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedAction = Action.parse(parser(actionString)) - assertEquals("Round tripping Action doesn't work", action, parsedAction) - } - - fun `test action parsing with throttled enabled and null throttle`() { - val action = randomAction().copy(throttle = null).copy(throttleEnabled = true) - val actionString = action.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - assertFailsWith("Action throttle enabled but not set throttle value") { - Action.parse(parser(actionString)) - } - } - - fun `test action with per execution scope does not support throttling`() { - try { - randomActionWithPolicy().copy( - throttleEnabled = true, - throttle = Throttle(value = 5, unit = ChronoUnit.MINUTES), - actionExecutionPolicy = ActionExecutionPolicy(PerExecutionActionScope()) - ) - fail("Creating an action with per execution scope and throttle enabled did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test throttle parsing`() { - val throttle = randomThrottle() - val throttleString = throttle.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedThrottle = Throttle.parse(parser(throttleString)) - assertEquals("Round tripping Monitor doesn't work", throttle, parsedThrottle) - } - - fun `test throttle parsing with wrong unit`() { - val throttle = randomThrottle() - val throttleString = throttle.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val wrongThrottleString = throttleString.replace("MINUTES", "wrongunit") - - assertFailsWith("Only support MINUTES throttle unit") { Throttle.parse(parser(wrongThrottleString)) } - } - - fun `test throttle parsing with negative value`() { - val throttle = randomThrottle().copy(value = -1) - val throttleString = throttle.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - - assertFailsWith("Can only set positive throttle period") { Throttle.parse(parser(throttleString)) } - } - - fun `test query-level monitor parsing`() { - val monitor = randomQueryLevelMonitor() - - val monitorString = monitor.toJsonStringWithUser() - val parsedMonitor = Monitor.parse(parser(monitorString)) - assertEquals("Round tripping QueryLevelMonitor doesn't work", monitor, parsedMonitor) - } - - fun `test monitor parsing with no name`() { - val monitorStringWithoutName = """ - { - "type": "monitor", - "enabled": false, - "schedule": { - "period": { - "interval": 1, - "unit": "MINUTES" - } - }, - "inputs": [], - "triggers": [] - } - """.trimIndent() - - assertFailsWith("Monitor name is null") { Monitor.parse(parser(monitorStringWithoutName)) } - } - - fun `test monitor parsing with no schedule`() { - val monitorStringWithoutSchedule = """ - { - "type": "monitor", - "name": "asdf", - "enabled": false, - "inputs": [], - "triggers": [] - } - """.trimIndent() - - assertFailsWith("Monitor schedule is null") { - Monitor.parse(parser(monitorStringWithoutSchedule)) - } - } - - fun `test bucket-level monitor parsing`() { - val monitor = randomBucketLevelMonitor() - - val monitorString = monitor.toJsonStringWithUser() - val parsedMonitor = Monitor.parse(parser(monitorString)) - assertEquals("Round tripping BucketLevelMonitor doesn't work", monitor, parsedMonitor) - } - - fun `test query-level trigger parsing`() { - val trigger = randomQueryLevelTrigger() - - val triggerString = trigger.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedTrigger = Trigger.parse(parser(triggerString)) - - assertEquals("Round tripping QueryLevelTrigger doesn't work", trigger, parsedTrigger) - } - - fun `test bucket-level trigger parsing`() { - val trigger = randomBucketLevelTrigger() - - val triggerString = trigger.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedTrigger = Trigger.parse(parser(triggerString)) - - assertEquals("Round tripping BucketLevelTrigger doesn't work", trigger, parsedTrigger) - } - fun `test alert parsing`() { val alert = randomAlert() @@ -215,41 +63,6 @@ class XContentTests : OpenSearchTestCase() { assertEquals("Round tripping alert doesn't work", actionExecutionResult, parsedActionExecutionResultString) } - fun `test creating a monitor with duplicate trigger ids fails`() { - try { - val repeatedTrigger = randomQueryLevelTrigger() - randomQueryLevelMonitor().copy(triggers = listOf(repeatedTrigger, repeatedTrigger)) - fail("Creating a monitor with duplicate triggers did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test user parsing`() { - val user = randomUser() - val userString = user.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedUser = User.parse(parser(userString)) - assertEquals("Round tripping user doesn't work", user, parsedUser) - } - - fun `test empty user parsing`() { - val user = randomUserEmpty() - val userString = user.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - - val parsedUser = User.parse(parser(userString)) - assertEquals("Round tripping user doesn't work", user, parsedUser) - assertEquals("", parsedUser.name) - assertEquals(0, parsedUser.roles.size) - } - - fun `test query-level monitor parsing without user`() { - val monitor = randomQueryLevelMonitorWithoutUser() - - val monitorString = monitor.toJsonString() - val parsedMonitor = Monitor.parse(parser(monitorString)) - assertEquals("Round tripping QueryLevelMonitor doesn't work", monitor, parsedMonitor) - assertNull(parsedMonitor.user) - } - fun `test email account parsing`() { val emailAccount = randomEmailAccount() @@ -266,124 +79,14 @@ class XContentTests : OpenSearchTestCase() { assertEquals("Round tripping EmailGroup doesn't work", emailGroup, parsedEmailGroup) } - fun `test old monitor format parsing`() { - val monitorString = """ - { - "type": "monitor", - "schema_version": 3, - "name": "asdf", - "user": { - "name": "admin123", - "backend_roles": [], - "roles": [ - "all_access", - "security_manager" - ], - "custom_attribute_names": [], - "user_requested_tenant": null - }, - "enabled": true, - "enabled_time": 1613530078244, - "schedule": { - "period": { - "interval": 1, - "unit": "MINUTES" - } - }, - "inputs": [ - { - "search": { - "indices": [ - "test_index" - ], - "query": { - "size": 0, - "query": { - "bool": { - "filter": [ - { - "range": { - "order_date": { - "from": "{{period_end}}||-1h", - "to": "{{period_end}}", - "include_lower": true, - "include_upper": true, - "format": "epoch_millis", - "boost": 1.0 - } - } - } - ], - "adjust_pure_negative": true, - "boost": 1.0 - } - }, - "aggregations": {} - } - } - } - ], - "triggers": [ - { - "id": "e_sc0XcB98Q42rHjTh4K", - "name": "abc", - "severity": "1", - "condition": { - "script": { - "source": "ctx.results[0].hits.total.value > 100000", - "lang": "painless" - } - }, - "actions": [] - } - ], - "last_update_time": 1614121489719 - } - """.trimIndent() - val parsedMonitor = Monitor.parse(parser(monitorString)) - assertEquals("Incorrect monitor type", Monitor.MonitorType.QUERY_LEVEL_MONITOR, parsedMonitor.monitorType) - assertEquals("Incorrect trigger count", 1, parsedMonitor.triggers.size) - val trigger = parsedMonitor.triggers.first() - assertTrue("Incorrect trigger type", trigger is QueryLevelTrigger) - assertEquals("Incorrect name for parsed trigger", "abc", trigger.name) - } - - fun `test creating an query-level monitor with invalid trigger type fails`() { - try { - val bucketLevelTrigger = randomBucketLevelTrigger() - randomQueryLevelMonitor().copy(triggers = listOf(bucketLevelTrigger)) - fail("Creating a query-level monitor with bucket-level triggers did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test creating an bucket-level monitor with invalid trigger type fails`() { - try { - val queryLevelTrigger = randomQueryLevelTrigger() - randomBucketLevelMonitor().copy(triggers = listOf(queryLevelTrigger)) - fail("Creating a bucket-level monitor with query-level triggers did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test creating an bucket-level monitor with invalid input fails`() { - try { - val invalidInput = SearchInput(emptyList(), SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) - randomBucketLevelMonitor().copy(inputs = listOf(invalidInput)) - fail("Creating an bucket-level monitor with an invalid input did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test action execution policy`() { - val actionExecutionPolicy = randomActionExecutionPolicy() - val actionExecutionPolicyString = actionExecutionPolicy.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedActionExecutionPolicy = ActionExecutionPolicy.parse(parser(actionExecutionPolicyString)) - assertEquals("Round tripping ActionExecutionPolicy doesn't work", actionExecutionPolicy, parsedActionExecutionPolicy) - } - fun `test MonitorMetadata`() { - val monitorMetadata = MonitorMetadata("monitorId-metadata", "monitorId", emptyList(), emptyMap()) + val monitorMetadata = MonitorMetadata( + id = "monitorId-metadata", + monitorId = "monitorId", + lastActionExecutionTimes = emptyList(), + lastRunContext = emptyMap(), + sourceToQueryIndexMapping = mutableMapOf() + ) val monitorMetadataString = monitorMetadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS).string() val parsedMonitorMetadata = MonitorMetadata.parse(parser(monitorMetadataString)) assertEquals("Round tripping MonitorMetadata doesn't work", monitorMetadata, parsedMonitorMetadata) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt index fe7f1a22c..c86270f5b 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt @@ -5,8 +5,8 @@ package org.opensearch.alerting.resthandler -import org.apache.http.entity.ContentType -import org.apache.http.nio.entity.NStringEntity +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.io.entity.StringEntity import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_ACCOUNT_BASE_URI import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.makeRequest @@ -14,8 +14,8 @@ import org.opensearch.alerting.model.destination.email.EmailAccount import org.opensearch.alerting.randomEmailAccount import org.opensearch.client.ResponseException import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.rest.RestStatus import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.test.OpenSearchTestCase import org.opensearch.test.junit.annotations.TestLogging @@ -110,7 +110,7 @@ class EmailAccountRestApiIT : AlertingRestTestCase() { "GET", "$EMAIL_ACCOUNT_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -127,7 +127,7 @@ class EmailAccountRestApiIT : AlertingRestTestCase() { "POST", "$EMAIL_ACCOUNT_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -151,7 +151,7 @@ class EmailAccountRestApiIT : AlertingRestTestCase() { "GET", "$EMAIL_ACCOUNT_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -170,7 +170,7 @@ class EmailAccountRestApiIT : AlertingRestTestCase() { "GET", "$EMAIL_ACCOUNT_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) fail("Expected 403 Method FORBIDDEN response") } catch (e: ResponseException) { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt index 4fd9f37af..c60a09d6b 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt @@ -5,8 +5,8 @@ package org.opensearch.alerting.resthandler -import org.apache.http.entity.ContentType -import org.apache.http.nio.entity.NStringEntity +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.io.entity.StringEntity import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_GROUP_BASE_URI import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.makeRequest @@ -15,8 +15,8 @@ import org.opensearch.alerting.model.destination.email.EmailGroup import org.opensearch.alerting.randomEmailGroup import org.opensearch.client.ResponseException import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.rest.RestStatus import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.test.OpenSearchTestCase import org.opensearch.test.junit.annotations.TestLogging @@ -103,7 +103,7 @@ class EmailGroupRestApiIT : AlertingRestTestCase() { "GET", "$EMAIL_GROUP_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -120,7 +120,7 @@ class EmailGroupRestApiIT : AlertingRestTestCase() { "POST", "$EMAIL_GROUP_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -144,7 +144,7 @@ class EmailGroupRestApiIT : AlertingRestTestCase() { "GET", "$EMAIL_GROUP_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -163,7 +163,7 @@ class EmailGroupRestApiIT : AlertingRestTestCase() { "GET", "$EMAIL_GROUP_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) fail("Expected 403 Method FORBIDDEN response") } catch (e: ResponseException) { diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt index ebaf45a41..bc9f1261c 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt @@ -7,10 +7,10 @@ package org.opensearch.alerting.resthandler import org.opensearch.alerting.ALWAYS_RUN import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.randomDocumentLevelMonitor import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery import org.opensearch.test.junit.annotations.TestLogging @TestLogging("level:DEBUG", reason = "Debug for tests.") @@ -19,7 +19,7 @@ class FindingsRestApiIT : AlertingRestTestCase() { fun `test find Finding where doc is not retrieved`() { val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -31,6 +31,33 @@ class FindingsRestApiIT : AlertingRestTestCase() { assertFalse(response.findings[0].documents[0].found) } + fun `test find Finding where source docData is null`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) + + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val responseBeforeDelete = searchFindings() + assertEquals(1, responseBeforeDelete.totalFindings) + assertEquals(1, responseBeforeDelete.findings[0].documents.size) + assertTrue(responseBeforeDelete.findings[0].documents[0].found) + + deleteDoc(testIndex, "someId") + val responseAfterDelete = searchFindings() + assertEquals(1, responseAfterDelete.totalFindings) + assertEquals(1, responseAfterDelete.findings[0].documents.size) + assertFalse(responseAfterDelete.findings[0].documents[0].found) + } + fun `test find Finding where doc is retrieved`() { val testIndex = createTestIndex() val testDoc = """{ @@ -44,7 +71,7 @@ class FindingsRestApiIT : AlertingRestTestCase() { }""" indexDoc(testIndex, "someId2", testDoc2) - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -84,7 +111,7 @@ class FindingsRestApiIT : AlertingRestTestCase() { }""" indexDoc(testIndex, "someId2", testDoc2) - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -115,7 +142,7 @@ class FindingsRestApiIT : AlertingRestTestCase() { }""" indexDoc(testIndex, "someId2", testDoc2) - val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", tags = listOf("sigma")) + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -150,7 +177,7 @@ class FindingsRestApiIT : AlertingRestTestCase() { }""" indexDoc(testIndex, "someId2", testDoc2) - val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", tags = listOf("sigma")) + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) @@ -185,7 +212,7 @@ class FindingsRestApiIT : AlertingRestTestCase() { }""" indexDoc(testIndex, "someId2", testDoc2) - val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", tags = listOf("sigma")) + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt index 66804bfb1..25e8d319d 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt @@ -4,10 +4,10 @@ */ package org.opensearch.alerting.resthandler -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.message.BasicHeader -import org.apache.http.nio.entity.NStringEntity +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.HttpHeaders +import org.apache.hc.core5.http.io.entity.StringEntity +import org.apache.hc.core5.http.message.BasicHeader import org.opensearch.alerting.ALERTING_BASE_URI import org.opensearch.alerting.ALWAYS_RUN import org.opensearch.alerting.ANOMALY_DETECTOR_INDEX @@ -15,17 +15,8 @@ import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.LEGACY_OPENDISTRO_ALERTING_BASE_URI import org.opensearch.alerting.alerts.AlertIndices import org.opensearch.alerting.anomalyDetectorIndexMapping -import org.opensearch.alerting.core.model.CronSchedule -import org.opensearch.alerting.core.model.DocLevelMonitorInput -import org.opensearch.alerting.core.model.DocLevelQuery -import org.opensearch.alerting.core.model.ScheduledJob -import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.core.settings.ScheduledJobSettings import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.Alert -import org.opensearch.alerting.model.DocumentLevelTrigger -import org.opensearch.alerting.model.Monitor -import org.opensearch.alerting.model.QueryLevelTrigger import org.opensearch.alerting.model.destination.Chime import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.randomADMonitor @@ -33,6 +24,7 @@ import org.opensearch.alerting.randomAction import org.opensearch.alerting.randomAlert import org.opensearch.alerting.randomAnomalyDetector import org.opensearch.alerting.randomAnomalyDetectorWithUser +import org.opensearch.alerting.randomBucketLevelMonitor import org.opensearch.alerting.randomBucketLevelTrigger import org.opensearch.alerting.randomDocumentLevelMonitor import org.opensearch.alerting.randomDocumentLevelTrigger @@ -45,21 +37,33 @@ import org.opensearch.alerting.toJsonString import org.opensearch.alerting.util.DestinationType import org.opensearch.client.ResponseException import org.opensearch.client.WarningFailureException -import org.opensearch.common.bytes.BytesReference import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.CronSchedule +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus import org.opensearch.script.Script +import org.opensearch.search.aggregations.AggregationBuilders import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortOrder import org.opensearch.test.OpenSearchTestCase import org.opensearch.test.junit.annotations.TestLogging import org.opensearch.test.rest.OpenSearchRestTestCase import java.time.Instant import java.time.ZoneId import java.time.temporal.ChronoUnit +import java.util.concurrent.TimeUnit @TestLogging("level:DEBUG", reason = "Debug for tests.") @Suppress("UNCHECKED_CAST") @@ -107,6 +111,21 @@ class MonitorRestApiIT : AlertingRestTestCase() { assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) } + @Throws(Exception::class) + fun `test creating a bucket monitor`() { + val monitor = randomBucketLevelMonitor() + + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + } + fun `test creating a monitor with legacy ODFE`() { val monitor = randomQueryLevelMonitor() val createResponse = client().makeRequest("POST", LEGACY_OPENDISTRO_ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) @@ -431,7 +450,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { val searchResponse = client().makeRequest( "GET", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -447,7 +466,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { val searchResponse = client().makeRequest( "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -470,7 +489,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { "GET", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -487,7 +506,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { "GET", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON), + StringEntity(search, ContentType.APPLICATION_JSON), header ) assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) @@ -513,7 +532,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { "GET", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) @@ -792,7 +811,11 @@ class MonitorRestApiIT : AlertingRestTestCase() { assertEquals("Delete request not successful", RestStatus.OK, deleteResponse.restStatus()) // Wait 5 seconds for event to be processed and alerts moved - Thread.sleep(5000) + OpenSearchTestCase.waitUntil({ + val alerts = searchAlerts(monitor) + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) + return@waitUntil (alerts.isEmpty() && historyAlerts.size == 1) + }, 5, TimeUnit.SECONDS) val alerts = searchAlerts(monitor) assertEquals("Active alert was not deleted", 0, alerts.size) @@ -806,6 +829,38 @@ class MonitorRestApiIT : AlertingRestTestCase() { ) } + fun `test delete trigger moves alerts then try to search alert by monitorId to find alert in history index`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val trigger = randomQueryLevelTrigger() + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + val alert = createAlert(randomAlert(monitor).copy(triggerId = trigger.id, state = Alert.State.ACTIVE)) + refreshIndex("*") + val updatedMonitor = monitor.copy(triggers = emptyList()) + val updateResponse = client().makeRequest( + "PUT", + "$ALERTING_BASE_URI/${monitor.id}", + emptyMap(), + updatedMonitor.toHttpEntity() + ) + assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) + + // Wait 5 seconds for event to be processed and alerts moved + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 5, TimeUnit.SECONDS) + + val alerts = searchAlerts(monitor) + assertEquals("Active alert was not deleted", 0, alerts.size) + + // Find alert by id and make sure it checks the history of alerts as well + val inputMap = HashMap() + inputMap["monitorId"] = monitor.id + val responseMap = getAlerts(inputMap).asMap() + + assertEquals(1, responseMap["totalAlerts"]) + } + fun `test delete trigger moves alerts`() { client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) putAlertMappings() @@ -821,7 +876,11 @@ class MonitorRestApiIT : AlertingRestTestCase() { assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) // Wait 5 seconds for event to be processed and alerts moved - Thread.sleep(5000) + OpenSearchTestCase.waitUntil({ + val alerts = searchAlerts(monitor) + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) + return@waitUntil (alerts.isEmpty() && historyAlerts.size == 1) + }, 5, TimeUnit.SECONDS) val alerts = searchAlerts(monitor) assertEquals("Active alert was not deleted", 0, alerts.size) @@ -850,15 +909,27 @@ class MonitorRestApiIT : AlertingRestTestCase() { updatedMonitor.toHttpEntity() ) assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) - - // Wait 5 seconds for event to be processed and alerts moved - Thread.sleep(5000) - + // Wait until postIndex hook is executed due to monitor update + waitUntil({ + val alerts = searchAlerts(monitor) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 60, TimeUnit.SECONDS) val alerts = searchAlerts(monitor) // We have two alerts from above, 1 for each trigger, there should be only 1 left in active index assertEquals("One alert should be in active index", 1, alerts.size) assertEquals("Wrong alert in active index", alertKeep.toJsonString(), alerts.single().toJsonString()) + waitUntil({ + val alerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 60, TimeUnit.SECONDS) + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) // Only alertDelete should of been moved to history index assertEquals("One alert should be in history index", 1, historyAlerts.size) @@ -895,13 +966,21 @@ class MonitorRestApiIT : AlertingRestTestCase() { fun `test monitor stats when disabling and re-enabling scheduled jobs with existing monitor`() { // Enable Monitor jobs + enableScheduledJob() val monitorId = createMonitor(randomQueryLevelMonitor(enabled = true), refresh = true).id + if (isMultiNode) OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 2, TimeUnit.SECONDS) var alertingStats = getAlertingStats() assertAlertingStatsSweeperEnabled(alertingStats, true) assertEquals("Scheduled job index does not exist", true, alertingStats["scheduled_job_index_exists"]) - assertEquals("Scheduled job index is not yellow", "yellow", alertingStats["scheduled_job_index_status"]) + if (isMultiNode) { + assertEquals("Scheduled job index is not green", "green", alertingStats["scheduled_job_index_status"]) + } else { + assertEquals("Scheduled job index is not yellow", "yellow", alertingStats["scheduled_job_index_status"]) + } assertEquals("Nodes are not on schedule", numberOfNodes, alertingStats["nodes_on_schedule"]) val _nodes = alertingStats["_nodes"] as Map @@ -926,7 +1005,9 @@ class MonitorRestApiIT : AlertingRestTestCase() { enableScheduledJob() // Sleep briefly so sweep can reschedule the Monitor - Thread.sleep(2000) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 2, TimeUnit.SECONDS) alertingStats = getAlertingStats() assertAlertingStatsSweeperEnabled(alertingStats, true) @@ -949,13 +1030,21 @@ class MonitorRestApiIT : AlertingRestTestCase() { fun `test monitor stats jobs`() { // Enable the Monitor plugin. + enableScheduledJob() createRandomMonitor(refresh = true) + if (isMultiNode) OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 2, TimeUnit.SECONDS) val responseMap = getAlertingStats() assertAlertingStatsSweeperEnabled(responseMap, true) assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) - assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + if (isMultiNode) { + assertEquals("Scheduled job index is not green", "green", responseMap["scheduled_job_index_status"]) + } else { + assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + } assertEquals("Nodes are not on schedule", numberOfNodes, responseMap["nodes_on_schedule"]) val _nodes = responseMap["_nodes"] as Map @@ -980,10 +1069,17 @@ class MonitorRestApiIT : AlertingRestTestCase() { enableScheduledJob() createRandomMonitor(refresh = true) + if (isMultiNode) OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 2, TimeUnit.SECONDS) val responseMap = getAlertingStats("/jobs_info") assertAlertingStatsSweeperEnabled(responseMap, true) assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) - assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + if (isMultiNode) { + assertEquals("Scheduled job index is not green", "green", responseMap["scheduled_job_index_status"]) + } else { + assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + } assertEquals("Nodes not on schedule", numberOfNodes, responseMap["nodes_on_schedule"]) val _nodes = responseMap["_nodes"] as Map @@ -1043,7 +1139,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { "GET", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) @@ -1068,7 +1164,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { "GET", "$ALERTING_BASE_URI/_search", params, - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) } catch (e: ResponseException) { assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) @@ -1078,7 +1174,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { @Throws(Exception::class) fun `test creating a document monitor`() { val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) @@ -1099,7 +1195,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { @Throws(Exception::class) fun `test getting a document level monitor`() { val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) @@ -1115,7 +1211,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { @Throws(Exception::class) fun `test updating conditions for a doc-level monitor`() { val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) @@ -1146,7 +1242,7 @@ class MonitorRestApiIT : AlertingRestTestCase() { @Throws(Exception::class) fun `test deleting a document level monitor`() { val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3") + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) @@ -1189,6 +1285,118 @@ class MonitorRestApiIT : AlertingRestTestCase() { } } + /** + * This use case is needed by the frontend plugin for displaying alert counts on the Monitors list page. + * https://github.com/opensearch-project/alerting-dashboards-plugin/blob/main/server/services/MonitorService.js#L235 + */ + fun `test get acknowledged, active, error, and ignored alerts counts`() { + putAlertMappings() + val monitorAlertCounts = hashMapOf>() + val numMonitors = randomIntBetween(1, 10) + repeat(numMonitors) { + val monitor = createRandomMonitor(refresh = true) + + val numAcknowledgedAlerts = randomIntBetween(1, 10) + val numActiveAlerts = randomIntBetween(1, 10) + var numCompletedAlerts = randomIntBetween(1, 10) + val numErrorAlerts = randomIntBetween(1, 10) + val numIgnoredAlerts = randomIntBetween(1, numCompletedAlerts) + numCompletedAlerts -= numIgnoredAlerts + + val alertCounts = hashMapOf( + Alert.State.ACKNOWLEDGED.name to numAcknowledgedAlerts, + Alert.State.ACTIVE.name to numActiveAlerts, + Alert.State.COMPLETED.name to numCompletedAlerts, + Alert.State.ERROR.name to numErrorAlerts, + "IGNORED" to numIgnoredAlerts + ) + monitorAlertCounts[monitor.id] = alertCounts + + repeat(numAcknowledgedAlerts) { + createAlert(randomAlert(monitor).copy(acknowledgedTime = Instant.now(), state = Alert.State.ACKNOWLEDGED)) + } + repeat(numActiveAlerts) { + createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + } + repeat(numCompletedAlerts) { + createAlert(randomAlert(monitor).copy(acknowledgedTime = Instant.now(), state = Alert.State.COMPLETED)) + } + repeat(numErrorAlerts) { + createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + } + repeat(numIgnoredAlerts) { + createAlert(randomAlert(monitor).copy(acknowledgedTime = null, state = Alert.State.COMPLETED)) + } + } + + val sourceBuilder = SearchSourceBuilder() + .size(0) + .query(QueryBuilders.termsQuery("monitor_id", monitorAlertCounts.keys)) + .aggregation( + AggregationBuilders + .terms("uniq_monitor_ids").field("monitor_id") + .subAggregation(AggregationBuilders.filter("active", QueryBuilders.termQuery("state", "ACTIVE"))) + .subAggregation(AggregationBuilders.filter("acknowledged", QueryBuilders.termQuery("state", "ACKNOWLEDGED"))) + .subAggregation(AggregationBuilders.filter("errors", QueryBuilders.termQuery("state", "ERROR"))) + .subAggregation( + AggregationBuilders.filter( + "ignored", + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("state", "COMPLETED")) + .mustNot(QueryBuilders.existsQuery("acknowledged_time")) + ) + ) + .subAggregation(AggregationBuilders.max("last_notification_time").field("last_notification_time")) + .subAggregation( + AggregationBuilders.topHits("latest_alert") + .size(1) + .sort("start_time", SortOrder.DESC) + .fetchSource(arrayOf("last_notification_time", "trigger_name"), null) + ) + ) + + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + hashMapOf("index" to AlertIndices.ALL_ALERT_INDEX_PATTERN), + StringEntity(sourceBuilder.toString(), ContentType.APPLICATION_JSON) + ) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content).map() + val aggregations = (xcp["aggregations"]!! as Map>) + val uniqMonitorIds = aggregations["uniq_monitor_ids"]!! + val buckets = uniqMonitorIds["buckets"]!! as ArrayList> + + assertEquals("Incorrect number of monitors returned", monitorAlertCounts.keys.size, buckets.size) + buckets.forEach { bucket -> + val id = bucket["key"]!! + val monitorCounts = monitorAlertCounts[id]!! + + val acknowledged = (bucket["acknowledged"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect ${Alert.State.ACKNOWLEDGED} count returned for monitor $id", + monitorCounts[Alert.State.ACKNOWLEDGED.name], acknowledged + ) + + val active = (bucket["active"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect ${Alert.State.ACTIVE} count returned for monitor $id", + monitorCounts[Alert.State.ACTIVE.name], active + ) + + val errors = (bucket["errors"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect ${Alert.State.ERROR} count returned for monitor $id", + monitorCounts[Alert.State.ERROR.name], errors + ) + + val ignored = (bucket["ignored"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect IGNORED count returned for monitor $id", + monitorCounts["IGNORED"], ignored + ) + } + } + private fun validateAlertingStatsNodeResponse(nodesResponse: Map) { assertEquals("Incorrect number of nodes", numberOfNodes, nodesResponse["total"]) assertEquals("Failed nodes found during monitor stats call", 0, nodesResponse["failed"]) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt index 598b4f98c..1cf20bf47 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt @@ -5,8 +5,8 @@ package org.opensearch.alerting.resthandler -import org.apache.http.HttpHeaders -import org.apache.http.message.BasicHeader +import org.apache.hc.core5.http.HttpHeaders +import org.apache.hc.core5.http.message.BasicHeader import org.junit.After import org.junit.Before import org.junit.BeforeClass @@ -24,7 +24,7 @@ import org.opensearch.alerting.randomUser import org.opensearch.alerting.util.DestinationType import org.opensearch.client.RestClient import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.test.junit.annotations.TestLogging import java.time.Instant @@ -41,15 +41,18 @@ class SecureDestinationRestApiIT : AlertingRestTestCase() { } } - val user = "userOne" + val user = "userA" var userClient: RestClient? = null @Before fun create() { if (userClient == null) { - createUser(user, user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, user).setSocketTimeout(60000).build() + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() } } @@ -139,7 +142,7 @@ class SecureDestinationRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_DESTINATION_ACCESS) ) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt index 74bb75ff7..25c27b861 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt @@ -5,14 +5,15 @@ package org.opensearch.alerting.resthandler -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.entity.StringEntity -import org.apache.http.message.BasicHeader +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.HttpHeaders +import org.apache.hc.core5.http.io.entity.StringEntity +import org.apache.hc.core5.http.message.BasicHeader import org.junit.After import org.junit.Before import org.junit.BeforeClass import org.opensearch.alerting.ALERTING_GET_EMAIL_ACCOUNT_ACCESS +import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE import org.opensearch.alerting.ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS import org.opensearch.alerting.AlertingPlugin import org.opensearch.alerting.AlertingRestTestCase @@ -20,9 +21,10 @@ import org.opensearch.alerting.TEST_HR_BACKEND_ROLE import org.opensearch.alerting.TEST_HR_INDEX import org.opensearch.alerting.TEST_HR_ROLE import org.opensearch.alerting.makeRequest +import org.opensearch.client.ResponseException import org.opensearch.client.RestClient import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus val SEARCH_EMAIL_ACCOUNT_DSL = """ { @@ -50,15 +52,18 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { } } - val user = "userOne" + val user = "userB" var userClient: RestClient? = null @Before fun create() { if (userClient == null) { - createUser(user, user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, user).setSocketTimeout(60000).build() + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() } } @@ -76,7 +81,7 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_ACCOUNT_ACCESS) ) @@ -105,7 +110,7 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS) ) @@ -126,18 +131,16 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { /* TODO: https://github.com/opensearch-project/alerting/issues/300 - + */ fun `test get email accounts with an user without get email account role`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) - val emailAccount = createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) - try { userClient?.makeRequest( "GET", @@ -155,19 +158,15 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { deleteRoleAndRoleMapping(TEST_HR_ROLE) } } - fun `test search email accounts with an user without search email account role`() { - createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) - createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) - try { userClient?.makeRequest( "POST", @@ -182,6 +181,4 @@ class SecureEmailAccountRestApiIT : AlertingRestTestCase() { deleteRoleAndRoleMapping(TEST_HR_ROLE) } } - - */ } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt index 72fb317e1..614b9d5a6 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt @@ -5,10 +5,10 @@ package org.opensearch.alerting.resthandler -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.entity.StringEntity -import org.apache.http.message.BasicHeader +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.HttpHeaders +import org.apache.hc.core5.http.io.entity.StringEntity +import org.apache.hc.core5.http.message.BasicHeader import org.junit.After import org.junit.Before import org.junit.BeforeClass @@ -22,7 +22,7 @@ import org.opensearch.alerting.TEST_HR_ROLE import org.opensearch.alerting.makeRequest import org.opensearch.client.RestClient import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.rest.RestStatus +import org.opensearch.core.rest.RestStatus import org.opensearch.test.junit.annotations.TestLogging val SEARCH_EMAIL_GROUP_DSL = """ @@ -52,15 +52,18 @@ class SecureEmailGroupsRestApiIT : AlertingRestTestCase() { } } - val user = "userOne" + val user = "userC" var userClient: RestClient? = null @Before fun create() { if (userClient == null) { - createUser(user, user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, user).setSocketTimeout(60000).build() + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() } } @@ -78,7 +81,7 @@ class SecureEmailGroupsRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_GROUP_ACCESS) ) @@ -105,7 +108,7 @@ class SecureEmailGroupsRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_GROUP_ACCESS) ) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt index 151be8d4d..4d4f17617 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt @@ -5,10 +5,10 @@ package org.opensearch.alerting.resthandler -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.message.BasicHeader -import org.apache.http.nio.entity.NStringEntity +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.HttpHeaders +import org.apache.hc.core5.http.io.entity.StringEntity +import org.apache.hc.core5.http.message.BasicHeader import org.junit.After import org.junit.Before import org.junit.BeforeClass @@ -20,35 +20,48 @@ import org.opensearch.alerting.ALERTING_FULL_ACCESS_ROLE import org.opensearch.alerting.ALERTING_GET_ALERTS_ACCESS import org.opensearch.alerting.ALERTING_GET_MONITOR_ACCESS import org.opensearch.alerting.ALERTING_INDEX_MONITOR_ACCESS +import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_READ_ONLY_ACCESS import org.opensearch.alerting.ALERTING_SEARCH_MONITOR_ONLY_ACCESS import org.opensearch.alerting.ALL_ACCESS_ROLE import org.opensearch.alerting.ALWAYS_RUN import org.opensearch.alerting.AlertingRestTestCase import org.opensearch.alerting.DRYRUN_MONITOR +import org.opensearch.alerting.READALL_AND_MONITOR_ROLE +import org.opensearch.alerting.TERM_DLS_QUERY import org.opensearch.alerting.TEST_HR_BACKEND_ROLE import org.opensearch.alerting.TEST_HR_INDEX import org.opensearch.alerting.TEST_HR_ROLE import org.opensearch.alerting.TEST_NON_HR_INDEX import org.opensearch.alerting.assertUserNull -import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.Alert import org.opensearch.alerting.randomAction import org.opensearch.alerting.randomAlert +import org.opensearch.alerting.randomBucketLevelMonitor +import org.opensearch.alerting.randomBucketLevelTrigger +import org.opensearch.alerting.randomDocumentLevelMonitor import org.opensearch.alerting.randomQueryLevelMonitor import org.opensearch.alerting.randomQueryLevelTrigger import org.opensearch.alerting.randomTemplateScript import org.opensearch.client.Response import org.opensearch.client.ResponseException import org.opensearch.client.RestClient +import org.opensearch.common.settings.Settings import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentType import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.SearchInput import org.opensearch.commons.authuser.User import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.test.junit.annotations.TestLogging @@ -65,15 +78,18 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { } } - val user = "userOne" + val user = "userD" var userClient: RestClient? = null @Before fun create() { if (userClient == null) { - createUser(user, user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, user).setSocketTimeout(60000).build() + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() } } @@ -85,11 +101,15 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { } // Create Monitor related security tests - fun `test create monitor with an user with alerting role`() { - createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) + ) try { // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. val monitor = randomQueryLevelMonitor().copy( @@ -110,13 +130,14 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { /* TODO: https://github.com/opensearch-project/alerting/issues/300 + */ fun `test create monitor with an user without alerting role`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) try { @@ -138,13 +159,9 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { fun `test create monitor with an user with read-only role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, - getClusterPermissionsFromCustomRole(ALERTING_READ_ONLY_ACCESS) - ) + createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) + createUserRolesMapping(ALERTING_READ_ONLY_ACCESS, arrayOf(user)) + try { val monitor = randomQueryLevelMonitor().copy( inputs = listOf( @@ -159,9 +176,9 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) } finally { deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_READ_ONLY_ACCESS) } } - */ fun `test query monitors with an user with only search monitor cluster permission`() { @@ -169,7 +186,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_SEARCH_MONITOR_ONLY_ACCESS) ) val monitor = createRandomMonitor(true) @@ -178,7 +195,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { val searchResponse = client().makeRequest( "GET", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) @@ -186,17 +203,19 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { val hits = xcp.map()["hits"]!! as Map> val numberDocsFound = hits["total"]?.get("value") assertEquals("Monitor not found during search", 1, numberDocsFound) + deleteRoleAndRoleMapping(TEST_HR_ROLE) } /* TODO: https://github.com/opensearch-project/alerting/issues/300 + */ fun `test query monitors with an user without search monitor cluster permission`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) try { @@ -215,7 +234,6 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { deleteRoleAndRoleMapping(TEST_HR_ROLE) } } - */ fun `test create monitor with an user without index read role`() { @@ -223,7 +241,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) ) try { @@ -235,6 +253,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { ) ) val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + Thread.sleep(30000) assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) fail("Expected 403 Method FORBIDDEN response") } catch (e: ResponseException) { @@ -257,7 +276,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) ) @@ -278,12 +297,13 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { /* TODO: https://github.com/opensearch-project/alerting/issues/300 + */ fun `test get monitor with an user without get monitor role`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) @@ -303,7 +323,6 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { deleteRoleAndRoleMapping(TEST_HR_ROLE) } } - */ fun getDocs(response: Response?): Any? { val hits = createParser( @@ -349,6 +368,528 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { assertFalse("The monitor was not disabled", updatedMonitor.enabled) } + fun `test create monitor with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role and ensure no access is granted after + patchUserBackendRoles(getUser, arrayOf("role1")) + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test create monitor with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createMonitorWithClient(userClient!!, monitor = monitor, listOf()) + fail("Expected exception since a non-admin user is trying to create a monitor with no backend roles") + } catch (e: ResponseException) { + assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf()) + assertNotNull("The monitor was not created", createdMonitor) + + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor with enable filter by with roles user has no access and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + fail("Expected create monitor to fail as user does not have role1 backend role") + } catch (e: ResponseException) { + assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor as admin with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // user should have access to the admin monitor + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + + val getMonitorResponse = userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove good backend role and ensure no access is granted after + patchUserBackendRoles(user, arrayOf("role5")) + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test update monitor with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role from monitor + val updatedMonitor = updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE)) + + // getUser should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf("role2")) + assertNotNull("The monitor was not created", createdMonitor) + + try { + updateMonitorWithClient(userClient!!, createdMonitor, listOf()) + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + val getMonitorResponse = userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf()) + + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor with enable filter by with updating with a permission user has no access to and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + try { + updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE, "role1")) + fail("Expected update monitor to fail as user doesn't have access to role1") + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as another user with enable filter by with removing a permission and adding permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + // Remove backend role from monitor with new user and add role5 + val updateUser = "updateUser" + createUserWithRoles( + updateUser, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role5"), + false + ) + + val updateUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), updateUser, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() + val updatedMonitor = updateMonitorWithClient(updateUserClient, createdMonitor, listOf("role5")) + + // old user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteUser(updateUser) + updateUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as admin with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role1", "role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000) + .setConnectionRequestTimeout(180000) + .build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role from monitor + val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf("role4")) + + // original user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + + // get user should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + fun `test delete monitor with disable filter by`() { disableFilterBy() val monitor = randomQueryLevelMonitor(enabled = true) @@ -366,7 +907,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) @@ -400,7 +941,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) @@ -414,8 +955,8 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { /* TODO: https://github.com/opensearch-project/alerting/issues/300 + */ fun `test query monitors with disable filter by`() { - disableFilterBy() // creates monitor as "admin" user. @@ -427,7 +968,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) @@ -437,26 +978,31 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { userClient?.makeRequest( "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) fail("Expected 403 FORBIDDEN response") - } catch (e: AssertionError) { - assertEquals("Unexpected status", "Expected 403 FORBIDDEN response", e.message) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) } - // add alerting roles and search as userOne - must return 1 docs - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_SEARCH_MONITOR_ONLY_ACCESS) + ) try { val userOneSearchResponse = userClient?.makeRequest( "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, userOneSearchResponse?.restStatus()) assertEquals("Monitor not found during search", 1, getDocs(userOneSearchResponse)) } finally { - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + deleteRoleAndRoleMapping(TEST_HR_ROLE) } } @@ -473,7 +1019,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) @@ -483,11 +1029,11 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { userClient?.makeRequest( "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) fail("Expected 403 FORBIDDEN response") - } catch (e: AssertionError) { - assertEquals("Unexpected status", "Expected 403 FORBIDDEN response", e.message) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) } // add alerting roles and search as userOne - must return 0 docs @@ -497,7 +1043,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, userOneSearchResponse?.restStatus()) assertEquals("Monitor not found during search", 0, getDocs(userOneSearchResponse)) @@ -506,14 +1052,12 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { } } - */ - fun `test execute monitor with an user with execute monitor access`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_EXECUTE_MONITOR_ACCESS) ) @@ -533,12 +1077,13 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { /* TODO: https://github.com/opensearch-project/alerting/issues/300 + */ fun `test execute monitor with an user without execute monitor access`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) @@ -558,14 +1103,13 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { deleteRoleAndRoleMapping(TEST_HR_ROLE) } } - */ fun `test delete monitor with an user with delete monitor access`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_DELETE_MONITOR_ACCESS) ) @@ -587,12 +1131,13 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { /* TODO: https://github.com/opensearch-project/alerting/issues/300 + */ fun `test delete monitor with an user without delete monitor access`() { createUserWithTestDataAndCustomRole( user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) ) @@ -636,8 +1181,8 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { try { getAlerts(userClient as RestClient, inputMap).asMap() fail("Expected 403 FORBIDDEN response") - } catch (e: AssertionError) { - assertEquals("Unexpected status", "Expected 403 FORBIDDEN response", e.message) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) } // add alerting roles and search as userOne - must return 0 docs @@ -673,10 +1218,9 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { try { getAlerts(userClient as RestClient, inputMap).asMap() fail("Expected 403 FORBIDDEN response") - } catch (e: AssertionError) { - assertEquals("Unexpected status", "Expected 403 FORBIDDEN response", e.message) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) } - // add alerting roles and search as userOne - must return 0 docs createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) try { @@ -687,7 +1231,49 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { } } - */ + fun `test query all alerts in all states with filter by1`() { + enableFilterBy() + putAlertMappings() + val adminUser = User(ADMIN, listOf(ADMIN), listOf(ALL_ACCESS_ROLE), listOf()) + var monitor = createRandomMonitor(refresh = true).copy(user = adminUser) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + randomAlert(monitor).copy(id = "foobar") + + val inputMap = HashMap() + inputMap["missing"] = "_last" + inputMap["monitorId"] = monitor.id + + // search as "admin" - must get 4 docs + val adminResponseMap = getAlerts(client(), inputMap).asMap() + assertEquals(4, adminResponseMap["totalAlerts"]) + + // search as userOne without alerting roles - must return 403 Forbidden + try { + getAlerts(userClient as RestClient, inputMap).asMap() + fail("Expected 403 FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } +// createUserWithTestDataAndCustomRole( +// user, +// TEST_HR_INDEX, +// TEST_HR_ROLE, +// listOf(ADMIN), +// getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) +// ) + createUserWithRoles(user, listOf(ALERTING_FULL_ACCESS_ROLE), listOf(ADMIN), false) + // add alerting roles and search as userOne - must return 0 docs +// createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() + assertEquals(4, responseMap["totalAlerts"]) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } fun `test get alerts with an user with get alerts role`() { @@ -712,7 +1298,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { user, TEST_HR_INDEX, TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + listOf(TEST_HR_BACKEND_ROLE), getClusterPermissionsFromCustomRole(ALERTING_GET_ALERTS_ACCESS) ) try { @@ -746,6 +1332,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { try { val response = executeMonitor(userClient as RestClient, modifiedMonitor, params = DRYRUN_MONITOR) + Thread.sleep(20000) val output = entityAsMap(response) val inputResults = output.stringMap("input_results") assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) @@ -784,7 +1371,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "POST", "$ALERTING_BASE_URI/_search", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) @@ -795,7 +1382,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "GET", "$ALERTING_BASE_URI/$id", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Get monitor failed", RestStatus.OK, adminGetResponse.restStatus()) @@ -804,26 +1391,28 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { "DELETE", "$ALERTING_BASE_URI/$id", emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) + StringEntity(search, ContentType.APPLICATION_JSON) ) assertEquals("Delete monitor failed", RestStatus.OK, adminDeleteResponse.restStatus()) } finally { deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) } } /* TODO: https://github.com/opensearch-project/alerting/issues/300 + */ fun `test execute query-level monitor with user having partial index permissions`() { - - createUserWithDocLevelSecurityTestDataAndCustomRole( - user, - TEST_HR_INDEX, + createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) + createTestIndex(TEST_HR_INDEX) + createIndexRoleWithDocLevelSecurity( TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + TEST_HR_INDEX, TERM_DLS_QUERY, - getClusterPermissionsFromCustomRole(ALERTING_FULL_ACCESS_ROLE) + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) ) + createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) // Add a doc that is accessible to the user indexDoc( @@ -831,7 +1420,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { """ { "test_field": "a", - "accessible": true + "accessible": true } """.trimIndent() ) @@ -850,7 +1439,7 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { val input = SearchInput(indices = listOf(TEST_HR_INDEX), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) val triggerScript = """ // make sure there is exactly one hit - return ctx.results[0].hits.hits.size() == 1 + return ctx.results[0].hits.hits.size() == 1 """.trimIndent() val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)).copy(actions = listOf()) @@ -869,15 +1458,15 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { } fun `test execute bucket-level monitor with user having partial index permissions`() { - - createUserWithDocLevelSecurityTestDataAndCustomRole( - user, - TEST_HR_INDEX, + createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) + createTestIndex(TEST_HR_INDEX) + createIndexRoleWithDocLevelSecurity( TEST_HR_ROLE, - TEST_HR_BACKEND_ROLE, + TEST_HR_INDEX, TERM_DLS_QUERY, - getClusterPermissionsFromCustomRole(ALERTING_FULL_ACCESS_ROLE) + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) ) + createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) // Add a doc that is accessible to the user indexDoc( @@ -937,5 +1526,66 @@ class SecureMonitorRestApiIT : AlertingRestTestCase() { deleteRoleAndRoleMapping(TEST_HR_ROLE) } } + + /** + * We want to verify that user roles/permissions do not affect clean up of monitors during partial monitor creation failure */ + fun `test create monitor failure clean up with a user without delete monitor access`() { + enableFilterBy() + createUser(user, listOf(TEST_HR_BACKEND_ROLE, "role2").toTypedArray()) + createTestIndex(TEST_HR_INDEX) + createCustomIndexRole( + ALERTING_INDEX_MONITOR_ACCESS, + TEST_HR_INDEX, + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) + ) + createUserWithRoles( + user, + listOf(ALERTING_INDEX_MONITOR_ACCESS, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + val docLevelQueryIndex = ".opensearch-alerting-queries-000001" + createIndex( + docLevelQueryIndex, Settings.EMPTY, + """ + "properties" : { + "query": { + "type": "percolator_ext" + }, + "monitor_id": { + "type": "text" + }, + "index": { + "type": "text" + } + } + } + """.trimIndent(), + ".opensearch-alerting-queries" + ) + closeIndex(docLevelQueryIndex) // close index to simulate doc level query indexing failure + try { + val monitor = randomDocumentLevelMonitor( + withMetadata = false, + triggers = listOf(), + inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), emptyList())) + ) + userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Monitor creation should have failed due to error in indexing doc level queries") + } catch (e: ResponseException) { + val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(10).toString() + val searchResponse = client().makeRequest( + "GET", "$ALERTING_BASE_URI/_search", + emptyMap(), + StringEntity(search, ContentType.APPLICATION_JSON) + ) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitors found. Clean up unsuccessful", 0, numberDocsFound) + } finally { + deleteRoleAndRoleMapping(ALERTING_INDEX_MONITOR_ACCESS) + } + } } diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt new file mode 100644 index 000000000..25b49d3dd --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt @@ -0,0 +1,1424 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.hc.core5.http.ContentType +import org.apache.hc.core5.http.HttpHeaders +import org.apache.hc.core5.http.io.entity.StringEntity +import org.apache.hc.core5.http.message.BasicHeader +import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix +import org.junit.After +import org.junit.Before +import org.junit.BeforeClass +import org.opensearch.alerting.ALERTING_BASE_URI +import org.opensearch.alerting.ALERTING_DELETE_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_EXECUTE_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_FULL_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_GET_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_INDEX_MONITOR_ACCESS +import org.opensearch.alerting.ALERTING_INDEX_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_READ_ONLY_ACCESS +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.READALL_AND_MONITOR_ROLE +import org.opensearch.alerting.TERM_DLS_QUERY +import org.opensearch.alerting.TEST_HR_BACKEND_ROLE +import org.opensearch.alerting.TEST_HR_INDEX +import org.opensearch.alerting.TEST_HR_ROLE +import org.opensearch.alerting.TEST_NON_HR_INDEX +import org.opensearch.alerting.WORKFLOW_ALERTING_BASE_URI +import org.opensearch.alerting.assertUserNull +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.randomBucketLevelMonitor +import org.opensearch.alerting.randomBucketLevelTrigger +import org.opensearch.alerting.randomDocLevelQuery +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.alerting.randomWorkflow +import org.opensearch.client.Response +import org.opensearch.client.ResponseException +import org.opensearch.client.RestClient +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.junit.annotations.TestLogging +import java.time.Instant + +// TODO investigate flaky nature of tests. not reproducible in local but fails in jenkins CI +@AwaitsFix(bugUrl = "https://github.com/opensearch-project/alerting/issues/1246") +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class SecureWorkflowRestApiIT : AlertingRestTestCase() { + + companion object { + + @BeforeClass + @JvmStatic + fun setup() { + // things to execute once and keep around for the class + org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) + } + } + + val user = "userD" + var userClient: RestClient? = null + + @Before + fun create() { + if (userClient == null) { + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() + } + } + + @After + fun cleanup() { + userClient?.close() + deleteUser(user) + } + + // Create Workflow related security tests + fun `test create workflow with an user with alerting role`() { + val clusterPermissions = listOf( + getClusterPermissionsFromCustomRole(ALERTING_INDEX_WORKFLOW_ACCESS) + ) + + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + clusterPermissions + ) + try { + val monitor = createMonitor( + randomQueryLevelMonitor( + inputs = listOf(SearchInput(listOf(TEST_HR_INDEX), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), + ), + true + ) + + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + + val createResponse = userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse?.restStatus()) + + assertUserNull(createResponse?.asMap()!!["workflow"] as HashMap) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test create workflow with an user without alerting role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + try { + val monitor = createRandomMonitor(true) + + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + + userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test create workflow with an user with read-only role`() { + createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) + createUserRolesMapping(ALERTING_READ_ONLY_ACCESS, arrayOf(user)) + + try { + val monitor = createRandomMonitor(true) + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_READ_ONLY_ACCESS) + } + } + + fun `test create workflow with delegate with an user without index read role`() { + createTestIndex(TEST_NON_HR_INDEX) + val clusterPermissions = listOf( + getClusterPermissionsFromCustomRole(ALERTING_INDEX_WORKFLOW_ACCESS) + ) + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + clusterPermissions + ) + try { + val query = randomDocLevelQuery(tags = listOf()) + val triggers = listOf(randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]"))) + + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf( + DocLevelMonitorInput( + indices = listOf(TEST_NON_HR_INDEX), + queries = listOf(query) + ) + ), + triggers = triggers + ), + true + ) + + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + + userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteIndex(TEST_NON_HR_INDEX) + } + } + + fun `test create workflow with disable filter by`() { + disableFilterBy() + val monitor = createRandomMonitor(true) + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + assertUserNull(createResponse.asMap()["workflow"] as HashMap) + } + + fun `test get workflow with an user with get workflow role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createWorkflow(randomWorkflow(monitorIds = listOf(monitor.id))) + + try { + val getWorkflowResponse = userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test get workflow with an user without get monitor role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + val workflow = createWorkflow(randomWorkflow(monitorIds = listOf(monitor.id))) + + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun getDocs(response: Response?): Any? { + val hits = createParser( + XContentType.JSON.xContent(), + response?.entity?.content + ).map()["hits"]!! as Map> + return hits["total"]?.get("value") + } + + // Query Monitors related security tests + fun `test update workflow with disable filter by`() { + disableFilterBy() + + val createdMonitor = createMonitor(monitor = randomQueryLevelMonitor(enabled = true)) + val createdWorkflow = createWorkflow( + randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true, enabledTime = Instant.now()) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + val workflowV2 = createdWorkflow.copy(enabled = false, enabledTime = null) + val updatedWorkflow = updateWorkflow(workflowV2) + + assertFalse("The monitor was not disabled", updatedWorkflow.enabled) + } + + fun `test update workflow with enable filter by`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + + val createdMonitor = createMonitorWithClient( + client = client(), + monitor = randomQueryLevelMonitor(enabled = true), + rbacRoles = listOf("admin") + ) + val createdWorkflow = createWorkflow( + randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true, enabledTime = Instant.now()) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + val workflowV2 = createdWorkflow.copy(enabled = false, enabledTime = null) + val updatedWorkflow = updateWorkflow(workflow = workflowV2) + + assertFalse("The monitor was not disabled", updatedWorkflow.enabled) + } + + fun `test create workflow with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + return + } + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient( + userClient!!, + monitor = randomQueryLevelMonitor(enabled = true), + listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true), + listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove backend role and ensure no access is granted after + patchUserBackendRoles(getUser, arrayOf("role1")) + try { + getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test create workflow with enable filter by with a user with a backend role doesn't have access to monitor`() { + enableFilterBy() + if (!isHttps()) { + return + } + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient( + userClient!!, + monitor = randomQueryLevelMonitor(enabled = true), + listOf("role2") + ) + + assertNotNull("The monitor was not created", createdMonitor) + + val userWithDifferentRole = "role3User" + + createUserWithRoles( + userWithDifferentRole, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role3"), + false + ) + + val userWithDifferentRoleClient = SecureRestClientBuilder( + clusterHosts.toTypedArray(), isHttps(), userWithDifferentRole, password + ) + .setSocketTimeout(60000).build() + + try { + createWorkflowWithClient( + userWithDifferentRoleClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true), + listOf("role3") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + deleteUser(userWithDifferentRole) + userWithDifferentRoleClient?.close() + } + } + + fun `test create workflow with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) + + val workflow = randomWorkflow(monitorIds = listOf(monitor.id)) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createWorkflowWithClient(userClient!!, workflow, listOf()) + fail("Expected exception since a non-admin user is trying to create a workflow with no backend roles") + } catch (e: ResponseException) { + assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create workflow as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitor(monitor = monitor) + val createdWorkflow = createWorkflow(randomWorkflow(monitorIds = listOf(createdMonitor.id))) + assertNotNull("The workflow was not created", createdWorkflow) + + try { + + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create workflow with enable filter by with roles user has no access and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) + val workflow = randomWorkflow(monitorIds = listOf(monitor.id)) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createWorkflowWithClient(userClient!!, workflow = workflow, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + fail("Expected create workflow to fail as user does not have role1 backend role") + } catch (e: ResponseException) { + assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create workflow as admin with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + val createdWorkflow = createWorkflowWithClient( + client(), + randomWorkflow(monitorIds = listOf(createdMonitor.id)), + listOf(TEST_HR_BACKEND_ROLE, "role1", "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + // user should have access to the admin monitor + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + + val getWorkflowResponse = userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove good backend role and ensure no access is granted after + patchUserBackendRoles(user, arrayOf("role5")) + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test update workflow with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, randomQueryLevelMonitor(), listOf(TEST_HR_BACKEND_ROLE, "role2")) + val createdWorkflow = createWorkflowWithClient( + client = userClient!!, workflow = randomWorkflow(enabled = true, monitorIds = listOf(createdMonitor.id)), + rbacRoles = listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove backend role from monitor + val updatedWorkflow = updateWorkflowWithClient(userClient!!, createdWorkflow, listOf(TEST_HR_BACKEND_ROLE)) + + // getUser should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf("role2")) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + randomWorkflow(monitorIds = listOf(createdMonitor.id)), + listOf("role2") + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + try { + updateWorkflowWithClient(userClient!!, createdWorkflow, listOf()) + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + val createdMonitorResponse = createMonitor(monitor, true) + assertNotNull("The monitor was not created", createdMonitorResponse) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + val workflow = randomWorkflow( + monitorIds = listOf(createdMonitorResponse.id) + ) + + val createdWorkflow = createWorkflowWithClient( + client(), + workflow = workflow, + rbacRoles = listOf(TEST_HR_BACKEND_ROLE) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + val getWorkflowResponse = userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + val updatedWorkflow = updateWorkflowWithClient(client(), createdWorkflow, listOf()) + + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow with enable filter by with updating with a permission user has no access to and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id)), listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + try { + updateWorkflowWithClient(userClient!!, createdWorkflow, listOf(TEST_HR_BACKEND_ROLE, "role1")) + fail("Expected update workflow to fail as user doesn't have access to role1") + } catch (e: ResponseException) { + assertEquals("Update workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow as another user with enable filter by with removing a permission and adding permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + // Remove backend role from workflow with new user and add role5 + val updateUser = "updateUser" + createUserWithRoles( + updateUser, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role5"), + false + ) + + val updateUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), updateUser, password) + .setSocketTimeout(60000).build() + val updatedWorkflow = updateWorkflowWithClient(updateUserClient, createdWorkflow, listOf("role5")) + + // old user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteUser(updateUser) + updateUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow as admin with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id)), + listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role1", "role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove backend role from monitor + val updatedWorkflow = updateWorkflowWithClient(client(), createdWorkflow, listOf("role4")) + + // original user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + + // get user should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test delete workflow with disable filter by`() { + disableFilterBy() + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitor(monitor = monitor) + val createdWorkflow = createWorkflow(workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true)) + + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + deleteWorkflow(workflow = createdWorkflow, deleteDelegates = true) + + val searchMonitor = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() + // Verify if the delegate monitors are deleted + // search as "admin" - must get 0 docs + val adminMonitorSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + StringEntity(searchMonitor, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminMonitorSearchResponse.restStatus()) + + val adminMonitorHits = createParser( + XContentType.JSON.xContent(), + adminMonitorSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminMonitorDocsFound = adminMonitorHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminMonitorDocsFound) + + // Verify workflow deletion + try { + client().makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + emptyMap(), + null + ) + fail("Workflow found during search") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.NOT_FOUND.status, e.response.statusLine.statusCode) + } + } + + fun `test delete workflow with enable filter by`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val createdMonitor = createMonitorWithClient( + monitor = randomQueryLevelMonitor(), + client = client(), + rbacRoles = listOf("admin") + ) + + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflow(workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true)) + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + deleteWorkflow(workflow = createdWorkflow, true) + + // Verify underlying delegates deletion + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() + // search as "admin" - must get 0 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + StringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + + val adminHits = createParser( + XContentType.JSON.xContent(), + adminSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminDocsFound = adminHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminDocsFound) + + // Verify workflow deletion + try { + client().makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + emptyMap(), + null + ) + fail("Workflow found during search") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.NOT_FOUND.status, e.response.statusLine.statusCode) + } + } + + fun `test delete workflow with enable filter with user that doesn't have delete_monitor cluster privilege failed`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val deleteUser = "deleteUser" + createUserWithTestDataAndCustomRole( + deleteUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role1", "role3"), + listOf( + getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + ) + val deleteUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), deleteUser, password) + .setSocketTimeout(60000).build() + + try { + val createdMonitor = createMonitorWithClient(userClient!!, monitor = randomQueryLevelMonitor()) + + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + client = userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true) + ) + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + try { + deleteWorkflowWithClient(deleteUserClient, workflow = createdWorkflow, true) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } + patchUserBackendRoles(deleteUser, arrayOf("role2")) + + val response = deleteWorkflowWithClient(deleteUserClient!!, workflow = createdWorkflow, true) + assertEquals("Delete workflow failed", RestStatus.OK, response?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + deleteUser(deleteUser) + deleteUserClient?.close() + } + } + + fun `test execute workflow with an user with execute workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_EXECUTE_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(listOf(monitor.id), true) + + try { + val executeWorkflowResponse = userClient?.makeRequest( + "POST", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}/_execute", + mutableMapOf() + ) + assertEquals("Executing workflow failed", RestStatus.OK, executeWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test execute workflow with an user without execute workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(listOf(monitor.id), true) + + try { + userClient?.makeRequest( + "POST", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}/_execute", + mutableMapOf() + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Execute workflow failed", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test delete workflow with an user with delete workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + val refresh = true + + try { + val deleteWorkflowResponse = userClient?.makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh", + emptyMap(), + monitor.toHttpEntity() + ) + assertEquals("DELETE workflow failed", RestStatus.OK, deleteWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test delete workflow with deleting delegates with an user with delete workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + + try { + val deleteWorkflowResponse = deleteWorkflowWithClient( + userClient!!, + workflow, + deleteDelegates = true, + refresh = true + ) + assertEquals("DELETE workflow failed", RestStatus.OK, deleteWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + // Verify delegate deletion + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + // search as "admin" - must get 0 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + StringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + + val adminHits = createParser( + XContentType.JSON.xContent(), + adminSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminDocsFound = adminHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminDocsFound) + } + + fun `test delete workflow with an user without delete monitor access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + + try { + userClient?.makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=true", + emptyMap(), + monitor.toHttpEntity() + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("DELETE workflow failed", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test admin all access with enable filter by`() { + enableFilterBy() + createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + + val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) + val monitorJson = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + createResponse?.entity?.content + ).map() + val monitorId = monitorJson["_id"] as String + + val workflow = randomWorkflow(monitorIds = listOf(monitorId)) + val createWorkflowResponse = userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + assertEquals("Create workflow failed", RestStatus.CREATED, createWorkflowResponse?.restStatus()) + + val workflowJson = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + createWorkflowResponse?.entity?.content + ).map() + + val id: String = workflowJson["_id"] as String + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", id)).toString() + + // get as "admin" - must get 1 docs + val adminGetResponse = client().makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/$id", + emptyMap(), + StringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Get workflow failed", RestStatus.OK, adminGetResponse.restStatus()) + + // delete as "admin" + val adminDeleteResponse = client().makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/$id", + emptyMap(), + StringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Delete workflow failed", RestStatus.OK, adminDeleteResponse.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + fun `test execute workflow with bucket-level and doc-level chained monitors with user having partial index permissions`() { + createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) + createTestIndex(TEST_HR_INDEX) + + createIndexRoleWithDocLevelSecurity( + TEST_HR_ROLE, + TEST_HR_INDEX, + TERM_DLS_QUERY, + listOf(ALERTING_INDEX_WORKFLOW_ACCESS, ALERTING_INDEX_MONITOR_ACCESS) + ) + createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) + + // Add a doc that is accessible to the user + indexDoc( + TEST_HR_INDEX, + "1", + """ + { + "test_field": "a", + "accessible": true + } + """.trimIndent() + ) + + // Add a second doc that is not accessible to the user + indexDoc( + TEST_HR_INDEX, + "2", + """ + { + "test_field": "b", + "accessible": false + } + """.trimIndent() + ) + + indexDoc( + TEST_HR_INDEX, + "3", + """ + { + "test_field": "c", + "accessible": true + } + """.trimIndent() + ) + + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) + ) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ), + actions = listOf() + ) + val bucketMonitor = createMonitorWithClient( + userClient!!, + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + assertNotNull("The bucket monitor was not created", bucketMonitor) + + val docQuery1 = DocLevelQuery(query = "test_field:\"a\"", name = "3", fields = listOf()) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), listOf(docQuery1))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) + ) + val docMonitor = createMonitorWithClient(userClient!!, monitor1)!! + assertNotNull("The doc level monitor was not created", docMonitor) + + val workflow = randomWorkflow(monitorIds = listOf(bucketMonitor.id, docMonitor.id)) + val workflowResponse = createWorkflowWithClient(userClient!!, workflow) + assertNotNull("The workflow was not created", workflowResponse) + + try { + executeWorkflow(workflowId = workflowResponse.id) + val bucketAlerts = searchAlerts(bucketMonitor) + assertEquals("Incorrect number of alerts", 0, bucketAlerts.size) + + val docAlerts = searchAlerts(docMonitor) + assertEquals("Incorrect number of alerts", 0, docAlerts.size) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt new file mode 100644 index 000000000..cf48720af --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt @@ -0,0 +1,1193 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.junit.Assert +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.WORKFLOW_ALERTING_BASE_URI +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.randomBucketLevelMonitor +import org.opensearch.alerting.randomChainedAlertTrigger +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.alerting.randomQueryLevelTrigger +import org.opensearch.alerting.randomUser +import org.opensearch.alerting.randomWorkflow +import org.opensearch.alerting.randomWorkflowWithDelegates +import org.opensearch.client.ResponseException +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ChainedMonitorFindings +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.test.junit.annotations.TestLogging +import java.time.Instant +import java.time.temporal.ChronoUnit +import java.util.Collections +import java.util.Locale +import java.util.UUID +import java.util.concurrent.TimeUnit + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class WorkflowRestApiIT : AlertingRestTestCase() { + + fun `test create workflow success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + } + + fun `test create workflow with different monitor types success`() { + val index = createTestIndex() + val docQuery = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docLevelMonitorResponse = createMonitor(monitor) + + val bucketLevelMonitor = randomBucketLevelMonitor( + inputs = listOf( + SearchInput( + listOf(index), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) + ) + ) + ) + val bucketLevelMonitorResponse = createMonitor(bucketLevelMonitor) + + val workflow = randomWorkflow( + monitorIds = listOf(docLevelMonitorResponse.id, bucketLevelMonitorResponse.id), + triggers = listOf( + randomChainedAlertTrigger(condition = Script("trigger1")), + randomChainedAlertTrigger(condition = Script("trigger2")) + ) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + val workflowById = getWorkflow(createdId) + assertNotNull(workflowById) + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", docLevelMonitorResponse.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", bucketLevelMonitorResponse.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", docLevelMonitorResponse.id, delegate2.chainedMonitorFindings!!.monitorId + ) + + assertEquals(workflowById.triggers.size, 2) + assertTrue(workflowById.triggers[0] is ChainedAlertTrigger) + assertTrue(workflowById.triggers[1] is ChainedAlertTrigger) + assertTrue((workflowById.triggers[0] as ChainedAlertTrigger).condition == Script("trigger1")) + assertTrue((workflowById.triggers[1] as ChainedAlertTrigger).condition == Script("trigger2")) + } + + fun `test create workflow without delegate failure`() { + val workflow = randomWorkflow( + monitorIds = Collections.emptyList() + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be empty.") + ) + } + } + } + + fun `test create workflow duplicate delegate failure`() { + val workflow = randomWorkflow( + monitorIds = listOf("1", "1", "2") + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test create workflow delegate monitor doesn't exist failure`() { + val index = createTestIndex() + val docQuery = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docLevelMonitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf("-1", docLevelMonitorResponse.id) + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test create workflow sequence order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test create workflow chained findings monitor not in sequence failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test create workflow chained findings order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + fun `test create workflow when monitor index not initialized failure`() { + val delegates = listOf( + Delegate(1, "monitor-1") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Monitors not found") + ) + } + } + } + + fun `test create workflow delegate and chained finding monitor different indices failure`() { + val index = randomAlphaOfLength(10).lowercase(Locale.ROOT) + createTestIndex(index) + + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor) + + val index1 = "$index-1" + createTestIndex(index1) + + val docLevelInput1 = DocLevelMonitorInput( + "description", listOf(index1), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + + val docMonitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger) + ) + val docMonitorResponse1 = createMonitor(docMonitor1) + + val workflow = randomWorkflow( + monitorIds = listOf(docMonitorResponse1.id, docMonitorResponse.id) + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("doesn't query all of chained findings monitor's indices") + ) + } + } + } + + fun `test create workflow query monitor chained findings monitor failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor) + + val queryMonitor = randomQueryLevelMonitor() + val queryMonitorResponse = createMonitor(queryMonitor) + + val workflow = randomWorkflow( + monitorIds = listOf(queryMonitorResponse.id, docMonitorResponse.id) + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Query level monitor can't be part of chained findings") + ) + } + } + } + + fun `test create workflow with 26 delegates failure`() { + val monitorsIds = mutableListOf() + for (i in 0..25) { + monitorsIds.add(UUID.randomUUID().toString()) + } + val workflow = randomWorkflow( + monitorIds = monitorsIds + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be larger then 25.") + ) + } + } + } + + fun `test update workflow add monitor success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse2 = createMonitor(monitor2) + + val updatedWorkflow = randomWorkflow( + id = createdId, + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + + val updateResponse = client().makeRequest("PUT", updatedWorkflow.relativeUrl(), emptyMap(), updatedWorkflow.toHttpEntity()) + + assertEquals("Update workflow failed", RestStatus.OK, updateResponse.restStatus()) + + val updateResponseBody = updateResponse.asMap() + val updatedId = updateResponseBody["_id"] as String + val updatedVersion = updateResponseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, updatedId) + assertTrue("incorrect version", updatedVersion > 0) + + val workflowById = getWorkflow(updatedId) + assertNotNull(workflowById) + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow remove monitor success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse2 = createMonitor(monitor2) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + var workflowById = getWorkflow(createdId) + assertNotNull(workflowById) + // Delegate verification + @Suppress("UNCHECKED_CAST") + var delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val updatedWorkflow = randomWorkflow( + id = createdId, + monitorIds = listOf(monitorResponse.id) + ) + + val updateResponse = client().makeRequest("PUT", updatedWorkflow.relativeUrl(), emptyMap(), updatedWorkflow.toHttpEntity()) + + assertEquals("Update workflow failed", RestStatus.OK, updateResponse.restStatus()) + + val updateResponseBody = updateResponse.asMap() + val updatedId = updateResponseBody["_id"] as String + val updatedVersion = updateResponseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, updatedId) + assertTrue("incorrect version", updatedVersion > 0) + + workflowById = getWorkflow(updatedId) + assertNotNull(workflowById) + // Delegate verification + @Suppress("UNCHECKED_CAST") + delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 1, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse.id, delegate1.monitorId) + } + + fun `test update workflow change order of delegate monitors`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse1 = createMonitor(monitor1) + val monitorResponse2 = createMonitor(monitor2) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + + var workflowById = getWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + val updatedWorkflowResponse = updateWorkflow( + randomWorkflow( + id = workflowById.id, + monitorIds = listOf(monitorResponse2.id, monitorResponse1.id) + ) + ) + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse) + assertEquals( + "Workflow id changed", + workflowResponse.id, + updatedWorkflowResponse.id + ) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = getWorkflow(updatedWorkflowResponse.id) + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals( + "Workflow name not correct", + updatedWorkflowResponse.name, + workflowById.name + ) + assertEquals( + "Workflow owner not correct", + updatedWorkflowResponse.owner, + workflowById.owner + ) + assertEquals( + "Workflow input not correct", + updatedWorkflowResponse.inputs, + workflowById.inputs + ) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse2.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse1.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse2.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow doesn't exist failure`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) + ) + + val monitorResponse1 = createMonitor(monitor1) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + try { + updateWorkflow(workflow.copy(id = "testId")) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow with testId is not found") + ) + } + } + val updatedWorkflow = updateWorkflow(workflowResponse.copy(enabled = true, enabledTime = Instant.now())) + assertNotNull(updatedWorkflow) + val getWorkflow = getWorkflow(workflowId = updatedWorkflow.id) + assertTrue(getWorkflow.enabled) + } + + fun `test update workflow duplicate delegate failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("1", "1", "2") + ) + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test update workflow delegate monitor doesn't exist failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("-1", monitorResponse.id) + ) + + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test update workflow sequence order not correct failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test update workflow chained findings monitor not in sequence failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test update workflow chained findings order not correct failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + @Throws(Exception::class) + fun `test getting a workflow`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + val storedMonitor = getMonitor(monitor.id) + + assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) + + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + + val storedWorkflow = getWorkflow(workflow.id) + + assertEquals("Indexed and retrieved workflow differ", workflow.id, storedWorkflow.id) + val delegates = (storedWorkflow.inputs[0] as CompositeInput).sequence.delegates + assertEquals("Delegate list not correct", 1, delegates.size) + assertEquals("Delegate order id not correct", 1, delegates[0].order) + assertEquals("Delegate id list not correct", monitor.id, delegates[0].monitorId) + } + + @Throws(Exception::class) + fun `test getting a workflow that doesn't exist`() { + try { + getWorkflow(randomAlphaOfLength(20)) + fail("expected response exception") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test delete workflow`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val workflowResponse = createWorkflow(workflowRequest) + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflow(workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + client().makeRequest("DELETE", getWorkflowResponse.relativeUrl()) + + // Verify that the workflow is deleted + try { + getWorkflow(workflowId) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + } + + fun `test delete workflow delete delegate monitors`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val workflowResponse = createWorkflow(workflowRequest) + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflow(workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + client().makeRequest("DELETE", getWorkflowResponse.relativeUrl().plus("?deleteDelegateMonitors=true")) + + // Verify that the workflow is deleted + try { + getWorkflow(workflowId) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + + // Verify that delegate monitor is deleted + try { + getMonitor(monitor.id) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Monitor not found.") + ) + } + } + } + + fun `test delete workflow preserve delegate monitors`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val workflowResponse = createWorkflow(workflowRequest) + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflow(workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + client().makeRequest("DELETE", getWorkflowResponse.relativeUrl().plus("?deleteDelegateMonitors=false")) + + // Verify that the workflow is deleted + try { + getWorkflow(workflowId) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + + // Verify that delegate monitor is not deleted + val delegateMonitor = getMonitor(monitor.id) + assertNotNull(delegateMonitor) + } + + @Throws(Exception::class) + fun `test deleting a workflow that doesn't exist`() { + try { + client().makeRequest("DELETE", "$WORKFLOW_ALERTING_BASE_URI/foobarbaz") + fail("expected 404 ResponseException") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test chained alerts and audit alerts for workflows with query level monitor`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "test_field:\"test_value_1\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + enabled = false + ) + val monitorResponse = createMonitor(monitor1)!! + var monitor2 = randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = Script("return true"))), + enabled = false + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + + val workflow = Workflow( + id = "", + version = 2, + name = "test", + enabled = false, + schedule = IntervalSchedule(5, ChronoUnit.MINUTES), + lastUpdateTime = Instant.now(), + enabledTime = null, + workflowType = Workflow.WorkflowType.COMPOSITE, + user = randomUser(), + schemaVersion = -1, + inputs = listOf( + CompositeInput( + org.opensearch.commons.alerting.model.Sequence( + delegates = listOf( + Delegate(1, monitorResponse.id), + Delegate(2, monitorResponse2.id) + ) + ) + ) + ), + owner = "alerting", + triggers = listOf(andTrigger) + ) + val workflowById = createWorkflow(workflow) + assertNotNull(workflowById) + val workflowId = workflowById.id + + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1" + ) + ) + val searchMonitorResponse = searchMonitors() + logger.error(searchMonitorResponse) + val jobsList = searchMonitorResponse.hits.toList() + var numMonitors = 0 + var numWorkflows = 0 + jobsList.forEach { + val map = it.sourceAsMap + if (map["type"] == "workflow") numWorkflows++ + else if (map["type"] == "monitor") numMonitors++ + } + Assert.assertEquals(numMonitors, 2) + Assert.assertEquals(numWorkflows, 1) + val response = executeWorkflow(workflowId = workflowId, params = emptyMap()) + val executeWorkflowResponse = entityAsMap(response) + logger.info(executeWorkflowResponse) + val executionId = executeWorkflowResponse["execution_id"] + Assert.assertTrue(executeWorkflowResponse.containsKey("trigger_results")) + val workflowTriggerResults = executeWorkflowResponse["trigger_results"] as Map + assertEquals(workflowTriggerResults.size, 1) + assertTrue( + (workflowTriggerResults[andTrigger.id] as Map)["triggered"] as Boolean + ) + val res = getWorkflowAlerts(workflowId = workflowId, getAssociatedAlerts = true) + val getWorkflowAlerts = entityAsMap(res) + Assert.assertTrue(getWorkflowAlerts.containsKey("alerts")) + Assert.assertTrue(getWorkflowAlerts.containsKey("associatedAlerts")) + val alerts = getWorkflowAlerts["alerts"] as List> + assertEquals(alerts.size, 1) + Assert.assertEquals(alerts[0]["execution_id"], executionId) + Assert.assertEquals(alerts[0]["workflow_id"], workflowId) + Assert.assertEquals(alerts[0]["monitor_id"], "") + val associatedAlerts = getWorkflowAlerts["associatedAlerts"] as List> + assertEquals(associatedAlerts.size, 2) + + val res1 = getWorkflowAlerts(workflowId = workflowId, alertId = alerts[0]["id"].toString(), getAssociatedAlerts = true) + val getWorkflowAlerts1 = entityAsMap(res1) + Assert.assertTrue(getWorkflowAlerts1.containsKey("alerts")) + Assert.assertTrue(getWorkflowAlerts1.containsKey("associatedAlerts")) + val alerts1 = getWorkflowAlerts1["alerts"] as List> + assertEquals(alerts1.size, 1) + Assert.assertEquals(alerts1[0]["execution_id"], executionId) + Assert.assertEquals(alerts1[0]["workflow_id"], workflowId) + Assert.assertEquals(alerts1[0]["monitor_id"], "") + val associatedAlerts1 = getWorkflowAlerts1["associatedAlerts"] as List> + assertEquals(associatedAlerts1.size, 2) + + val getAlertsRes = getAlerts() + val getAlertsMap = getAlertsRes.asMap() + Assert.assertTrue(getAlertsMap.containsKey("alerts")) + val getAlertsAlerts = (getAlertsMap["alerts"] as ArrayList>) + assertEquals(getAlertsAlerts.size, 1) + Assert.assertEquals(getAlertsAlerts[0]["execution_id"], executionId) + Assert.assertEquals(getAlertsAlerts[0]["workflow_id"], workflowId) + Assert.assertEquals(getAlertsAlerts[0]["monitor_id"], "") + Assert.assertEquals(getAlertsAlerts[0]["id"], alerts1[0]["id"]) + + val ackRes = acknowledgeChainedAlerts(workflowId, alerts1[0]["id"].toString()) + val acknowledgeChainedAlertsResponse = entityAsMap(ackRes) + val acknowledged = acknowledgeChainedAlertsResponse["success"] as List + Assert.assertEquals(acknowledged[0], alerts1[0]["id"]) + } + + fun `test run workflow as scheduled job success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + enabled = false + ) + val monitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), + enabled = true, + schedule = IntervalSchedule(1, ChronoUnit.MINUTES) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + + indexDoc(index, "1", testDoc) + OpenSearchTestCase.waitUntil({ + val findings = searchFindings(monitor.copy(id = monitorResponse.id)) + return@waitUntil (findings.size == 1) + }, 80, TimeUnit.SECONDS) + + val findings = searchFindings(monitor.copy(id = monitorResponse.id)) + assertEquals("Findings saved for test monitor", 1, findings.size) + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt b/alerting/src/test/kotlin/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt new file mode 100644 index 000000000..526adaa34 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt @@ -0,0 +1,503 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope +import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexRequestBuilder +import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.action.admin.indices.refresh.RefreshAction +import org.opensearch.action.admin.indices.refresh.RefreshRequest +import org.opensearch.action.support.IndicesOptions +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.ExecuteMonitorAction +import org.opensearch.alerting.action.ExecuteMonitorRequest +import org.opensearch.alerting.action.ExecuteMonitorResponse +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.alerting.action.ExecuteWorkflowRequest +import org.opensearch.alerting.action.ExecuteWorkflowResponse +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest +import org.opensearch.commons.alerting.action.DeleteWorkflowRequest +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.action.GetFindingsResponse +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsResponse +import org.opensearch.commons.alerting.action.GetWorkflowRequest +import org.opensearch.commons.alerting.action.GetWorkflowResponse +import org.opensearch.commons.alerting.action.IndexMonitorRequest +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.action.IndexWorkflowRequest +import org.opensearch.commons.alerting.action.IndexWorkflowResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.index.IndexService +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.TermQueryBuilder +import org.opensearch.index.reindex.ReindexModulePlugin +import org.opensearch.index.seqno.SequenceNumbers +import org.opensearch.join.ParentJoinModulePlugin +import org.opensearch.painless.PainlessModulePlugin +import org.opensearch.plugins.Plugin +import org.opensearch.rest.RestRequest +import org.opensearch.script.mustache.MustacheModulePlugin +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.test.OpenSearchSingleNodeTestCase +import java.time.Instant +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit +import java.util.Locale +import java.util.concurrent.TimeUnit + +/** + * A test that keep a singleton node started for all tests that can be used to get + * references to Guice injectors in unit tests. + */ +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) +abstract class AlertingSingleNodeTestCase : OpenSearchSingleNodeTestCase() { + + protected val index: String = randomAlphaOfLength(10).lowercase(Locale.ROOT) + + override fun setUp() { + super.setUp() + createTestIndex() + } + + protected fun getAllIndicesFromPattern(pattern: String): List { + val getIndexResponse = ( + client().admin().indices().prepareGetIndex() + .setIndices(pattern) as GetIndexRequestBuilder + ).get() as GetIndexResponse + getIndexResponse + return getIndexResponse.indices().toList() + } + + protected fun executeMonitor(monitor: Monitor, id: String?, dryRun: Boolean = true): ExecuteMonitorResponse? { + val request = ExecuteMonitorRequest(dryRun, TimeValue(Instant.now().toEpochMilli()), id, monitor) + return client().execute(ExecuteMonitorAction.INSTANCE, request).get() + } + + protected fun insertSampleTimeSerializedData(index: String, data: List) { + data.forEachIndexed { i, value -> + val twoMinsAgo = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(twoMinsAgo) + val testDoc = """ + { + "test_strict_date_time": "$testTime", + "test_field_1": "$value", + "number": "$i" + } + """.trimIndent() + // Indexing documents with deterministic doc id to allow for easy selected deletion during testing + indexDoc(index, (i + 1).toString(), testDoc) + } + } + + @Suppress("UNCHECKED_CAST") + fun Map.stringMap(key: String): Map? { + val map = this as Map> + return map[key] + } + + /** A test index that can be used across tests. Feel free to add new fields but don't remove any. */ + protected fun createTestIndex() { + val mapping = XContentFactory.jsonBuilder() + mapping.startObject() + .startObject("properties") + .startObject("test_strict_date_time") + .field("type", "date") + .field("format", "strict_date_time") + .endObject() + .startObject("test_field_1") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + + createIndex( + index, Settings.EMPTY, mapping + ) + } + + protected fun createTestIndex(index: String) { + val mapping = XContentFactory.jsonBuilder() + mapping.startObject() + .startObject("properties") + .startObject("test_strict_date_time") + .field("type", "date") + .field("format", "strict_date_time") + .endObject() + .startObject("test_field_1") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + + createIndex( + index, Settings.EMPTY, mapping + ) + } + + private fun createIndex( + index: String?, + settings: Settings?, + mappings: XContentBuilder?, + ): IndexService? { + val createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings) + if (mappings != null) { + createIndexRequestBuilder.setMapping(mappings) + } + return this.createIndex(index, createIndexRequestBuilder) + } + + protected fun indexDoc(index: String, id: String, doc: String) { + client().prepareIndex(index).setId(id) + .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() + } + + protected fun assertIndexExists(index: String) { + val getIndexResponse = + client().admin().indices().getIndex( + GetIndexRequest().indices(index).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) + ).get() + assertTrue(getIndexResponse.indices.size > 0) + } + + protected fun assertIndexNotExists(index: String) { + val getIndexResponse = + client().admin().indices().getIndex( + GetIndexRequest().indices(index).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) + ).get() + assertFalse(getIndexResponse.indices.size > 0) + } + + protected fun assertAliasNotExists(alias: String) { + val aliasesResponse = client().admin().indices().getAliases(GetAliasesRequest()).get() + val foundAlias = aliasesResponse.aliases.values.forEach { + it.forEach { it1 -> + if (it1.alias == alias) { + fail("alias exists, but it shouldn't") + } + } + } + } + + protected fun assertAliasExists(alias: String) { + val aliasesResponse = client().admin().indices().getAliases(GetAliasesRequest()).get() + val foundAlias = aliasesResponse.aliases.values.forEach { + it.forEach { it1 -> + if (it1.alias == alias) { + return + } + } + } + fail("alias doesn't exists, but it should") + } + + protected fun createMonitor(monitor: Monitor): IndexMonitorResponse? { + val request = IndexMonitorRequest( + monitorId = Monitor.NO_ID, + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), + method = RestRequest.Method.POST, + monitor = monitor + ) + return client().execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, request).actionGet() + } + + protected fun updateMonitor(monitor: Monitor, monitorId: String): IndexMonitorResponse? { + val request = IndexMonitorRequest( + monitorId = monitorId, + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), + method = RestRequest.Method.PUT, + monitor = monitor + ) + return client().execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, request).actionGet() + } + + protected fun deleteMonitor(monitorId: String): Boolean { + client().execute( + AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorId, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + return true + } + + protected fun searchAlerts( + monitorId: String, + indices: String = AlertIndices.ALERT_INDEX, + refresh: Boolean = true, + executionId: String? = null, + ): List { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return emptyList() + } + val ssb = SearchSourceBuilder() + ssb.version(true) + val bqb = BoolQueryBuilder() + bqb.must(TermQueryBuilder(Alert.MONITOR_ID_FIELD, monitorId)) + if (executionId.isNullOrEmpty() == false) { + bqb.must(TermQueryBuilder(Alert.EXECUTION_ID_FIELD, executionId)) + } + ssb.query(bqb) + val searchResponse = client().prepareSearch(indices).setRouting(monitorId).setSource(ssb).get() + + return searchResponse.hits.hits.map { + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + Alert.parse(xcp, it.id, it.version) + } + } + + protected fun getWorkflowAlerts( + workflowId: String, + getAssociatedAlerts: Boolean? = true, + alertState: Alert.State? = Alert.State.ACTIVE, + alertIndex: String? = "", + associatedAlertsIndex: String? = "", + alertIds: List? = emptyList(), + table: Table? = Table("asc", "monitor_id", null, 100, 0, null), + ): GetWorkflowAlertsResponse { + return client().execute( + AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, + GetWorkflowAlertsRequest( + table = table!!, + severityLevel = "ALL", + alertState = alertState!!.name, + alertIndex = alertIndex, + associatedAlertsIndex = associatedAlertsIndex, + monitorIds = emptyList(), + workflowIds = listOf(workflowId), + alertIds = alertIds, + getAssociatedAlerts = getAssociatedAlerts!! + ) + ).get() + } + + protected fun refreshIndex(index: String) { + client().execute(RefreshAction.INSTANCE, RefreshRequest(index)).get() + } + + protected fun searchFindings( + id: String, + indices: String = AlertIndices.ALL_FINDING_INDEX_PATTERN, + refresh: Boolean = true, + ): List { + if (refresh) refreshIndex(indices) + + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder(Alert.MONITOR_ID_FIELD, id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + Finding.parse(xcp) + }.filter { finding -> finding.monitorId == id } + } + + protected fun getFindings( + findingId: String, + monitorId: String?, + findingIndexName: String?, + ): List { + + val getFindingsRequest = GetFindingsRequest( + findingId, + Table("asc", "monitor_id", null, 100, 0, null), + monitorId, + findingIndexName + ) + val getFindingsResponse: GetFindingsResponse = client().execute(AlertingActions.GET_FINDINGS_ACTION_TYPE, getFindingsRequest).get() + + return getFindingsResponse.findings.map { it.finding }.toList() + } + + protected fun getMonitorResponse( + monitorId: String, + version: Long = 1L, + fetchSourceContext: FetchSourceContext = FetchSourceContext.FETCH_SOURCE, + ) = client().execute( + AlertingActions.GET_MONITOR_ACTION_TYPE, + GetMonitorRequest(monitorId, version, RestRequest.Method.GET, fetchSourceContext) + ).get() + + override fun getPlugins(): List> { + return listOf( + AlertingPlugin::class.java, + ReindexModulePlugin::class.java, + MustacheModulePlugin::class.java, + PainlessModulePlugin::class.java, + ParentJoinModulePlugin::class.java + ) + } + + protected fun deleteIndex(index: String) { + val response = client().admin().indices().delete(DeleteIndexRequest(index)).get() + assertTrue("Unable to delete index", response.isAcknowledged()) + } + + override fun resetNodeAfterTest(): Boolean { + return false + } + + // merged WorkflowSingleNodeTestCase with this class as we are seeing test setup failures + // when multiple test classes implement AlertingSingleNodeTestCase or its child class + protected fun searchWorkflow( + id: String, + indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, + refresh: Boolean = true, + ): Workflow? { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return null + } + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder("_id", id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { it -> + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + lateinit var workflow: Workflow + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "workflow" -> workflow = Workflow.parse(xcp) + } + } + workflow.copy(id = it.id, version = it.version) + }.first() + } + + protected fun searchWorkflowMetadata( + id: String, + indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, + refresh: Boolean = true, + ): WorkflowMetadata? { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return null + } + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder("workflow_metadata.workflow_id", id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { it -> + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + lateinit var workflowMetadata: WorkflowMetadata + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "workflow_metadata" -> workflowMetadata = WorkflowMetadata.parse(xcp) + } + } + workflowMetadata.copy(id = it.id) + }.first() + } + + protected fun searchMonitorMetadata( + id: String, + indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, + refresh: Boolean = true, + ): MonitorMetadata? { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return null + } + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder("_id", id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { it -> + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + lateinit var monitorMetadata: MonitorMetadata + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "metadata" -> monitorMetadata = MonitorMetadata.parse(xcp) + } + } + monitorMetadata.copy(id = it.id) + }.first() + } + + protected fun upsertWorkflow( + workflow: Workflow, + id: String = Workflow.NO_ID, + method: RestRequest.Method = RestRequest.Method.POST, + ): IndexWorkflowResponse? { + val request = IndexWorkflowRequest( + workflowId = id, + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), + method = method, + workflow = workflow + ) + + return client().execute(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, request).actionGet() + } + + protected fun getWorkflowById(id: String): GetWorkflowResponse { + return client().execute( + AlertingActions.GET_WORKFLOW_ACTION_TYPE, + GetWorkflowRequest(id, RestRequest.Method.GET) + ).get() + } + + protected fun deleteWorkflow(workflowId: String, deleteDelegateMonitors: Boolean? = null) { + client().execute( + AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, + DeleteWorkflowRequest(workflowId, deleteDelegateMonitors) + ).get() + } + + protected fun executeWorkflow(workflow: Workflow? = null, id: String? = null, dryRun: Boolean = true): ExecuteWorkflowResponse? { + val request = ExecuteWorkflowRequest(dryRun, TimeValue(Instant.now().toEpochMilli()), id, workflow) + return client().execute(ExecuteWorkflowAction.INSTANCE, request).get() + } + + override fun nodeSettings(): Settings { + return Settings.builder() + .put(super.nodeSettings()) + .put("opendistro.scheduled_jobs.sweeper.period", TimeValue(5, TimeUnit.SECONDS)) + .put("opendistro.scheduled_jobs.enabled", true) + .build() + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt index 134073485..1dd19d9d1 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt @@ -6,8 +6,8 @@ package org.opensearch.alerting.triggeraction import org.junit.Assert -import org.opensearch.alerting.core.model.DocLevelQuery import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser +import org.opensearch.commons.alerting.model.DocLevelQuery import org.opensearch.test.OpenSearchTestCase class TriggerExpressionResolverTests : OpenSearchTestCase() { @@ -16,8 +16,8 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] && query[name=sigma-456])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && ", equation.toString()) Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) } @@ -26,8 +26,8 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] && query[id=id1456])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "3", "7") - queryToDocIds[DocLevelQuery("id1456", "", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") Assert.assertEquals("query[name=sigma-123] query[id=id1456] && ", equation.toString()) Assert.assertEquals(mutableSetOf("3"), equation.evaluate(queryToDocIds)) } @@ -36,8 +36,8 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] && query[tag=sev2])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "8", "7") - queryToDocIds[DocLevelQuery("", "", "", mutableListOf("tag=sev2"))] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "8", "7") + queryToDocIds[DocLevelQuery("", "", listOf(), "", mutableListOf("tag=sev2"))] = mutableSetOf("1", "2", "3") Assert.assertEquals("query[name=sigma-123] query[tag=sev2] && ", equation.toString()) Assert.assertEquals(emptySet(), equation.evaluate(queryToDocIds)) } @@ -46,8 +46,8 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] || query[name=sigma-456])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || ", equation.toString()) Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) } @@ -56,8 +56,8 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] || query[id=id1456])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "3", "7") - queryToDocIds[DocLevelQuery("id1456", "", "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") Assert.assertEquals("query[name=sigma-123] query[id=id1456] || ", equation.toString()) Assert.assertEquals(mutableSetOf("6", "3", "7", "1", "2", "3"), equation.evaluate(queryToDocIds)) } @@ -66,8 +66,8 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] || query[tag=sev2])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "8", "7") - queryToDocIds[DocLevelQuery("", "", "", mutableListOf("tag=sev2"))] = emptySet() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "8", "7") + queryToDocIds[DocLevelQuery("", "", listOf(), "", mutableListOf("tag=sev2"))] = emptySet() Assert.assertEquals("query[name=sigma-123] query[tag=sev2] || ", equation.toString()) Assert.assertEquals(mutableSetOf("6", "8", "7"), equation.evaluate(queryToDocIds)) } @@ -76,8 +76,8 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "!(query[name=sigma-456])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("4", "5", "6") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("4", "5", "6") Assert.assertEquals("query[name=sigma-456] ! ", equation.toString()) Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) } @@ -86,9 +86,9 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] && !query[name=sigma-456])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3", "11") - queryToDocIds[DocLevelQuery("", "sigma-456", "", emptyList())] = mutableSetOf("3", "4", "5") - queryToDocIds[DocLevelQuery("id_new", "", "", emptyList())] = mutableSetOf("11", "12", "13") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3", "11") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("3", "4", "5") + queryToDocIds[DocLevelQuery("id_new", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] ! && ", equation.toString()) Assert.assertEquals(mutableSetOf("1", "2", "11"), equation.evaluate(queryToDocIds)) } @@ -97,9 +97,9 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val eqString = "(query[name=sigma-123] || !query[id=id1456])" val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("6", "3", "7") - queryToDocIds[DocLevelQuery("id1456", "", "", emptyList())] = mutableSetOf("11", "12", "15") - queryToDocIds[DocLevelQuery("id_new", "", "", emptyList())] = mutableSetOf("11", "12", "13") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "15") + queryToDocIds[DocLevelQuery("id_new", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") Assert.assertEquals("query[name=sigma-123] query[id=id1456] ! || ", equation.toString()) Assert.assertEquals(mutableSetOf("6", "3", "7", "13"), equation.evaluate(queryToDocIds)) } @@ -109,10 +109,10 @@ class TriggerExpressionResolverTests : OpenSearchTestCase() { val equation = TriggerExpressionParser(eqString).parse() val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("id_random1", "", "", mutableListOf("sev1"))] = mutableSetOf("2", "3", "4") - queryToDocIds[DocLevelQuery("", "sigma-789", "", emptyList())] = mutableSetOf("11", "12", "13") - queryToDocIds[DocLevelQuery("id-2aw34", "", "", emptyList())] = mutableSetOf("13", "14", "15") + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("id_random1", "", listOf(), "", mutableListOf("sev1"))] = mutableSetOf("2", "3", "4") + queryToDocIds[DocLevelQuery("", "sigma-789", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") + queryToDocIds[DocLevelQuery("id-2aw34", "", listOf(), "", emptyList())] = mutableSetOf("13", "14", "15") Assert.assertEquals( "query[name=sigma-123] query[tag=sev1] && query[name=sigma-789] ! query[id=id-2aw34] || ! || ", diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt index 20ec8f983..65fd4d90b 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt @@ -8,17 +8,17 @@ package org.opensearch.alerting.util import org.junit.Assert import org.opensearch.action.search.SearchResponse import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.Trigger import org.opensearch.alerting.model.TriggerAfterKey import org.opensearch.alerting.randomBucketLevelTrigger import org.opensearch.alerting.randomBucketSelectorExtAggregationBuilder import org.opensearch.alerting.randomQueryLevelTrigger import org.opensearch.cluster.ClusterModule import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentParser import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.core.ParseField +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser import org.opensearch.search.aggregations.Aggregation import org.opensearch.search.aggregations.AggregationBuilder import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt index 931a24f35..2295c8b59 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt @@ -6,13 +6,13 @@ package org.opensearch.alerting.util import org.opensearch.alerting.ANOMALY_RESULT_INDEX -import org.opensearch.alerting.core.model.Input -import org.opensearch.alerting.core.model.SearchInput import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.commons.alerting.model.Input +import org.opensearch.commons.alerting.model.SearchInput import org.opensearch.commons.authuser.User +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import org.opensearch.index.query.QueryBuilders import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.test.OpenSearchTestCase diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/util/IndexUtilsTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/util/IndexUtilsTests.kt index e4db20639..03f03abeb 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/util/IndexUtilsTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/util/IndexUtilsTests.kt @@ -54,7 +54,7 @@ class IndexUtilsTests : OpenSearchTestCase() { val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":123," + - "\"settings_version\":123,\"mappings\":{\"_doc\":{\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}" + "\"settings_version\":123,\"aliases_version\":1,\"mappings\":{\"_doc\":{\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}" val newMapping = "{\"_meta\":{\"schema_version\":10},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" val index: IndexMetadata = IndexMetadata.fromXContent(parser(indexContent)) @@ -66,7 +66,7 @@ class IndexUtilsTests : OpenSearchTestCase() { val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":123," + - "\"settings_version\":123,\"mappings\":{\"_doc\":{\"_meta\":{\"schema_version\":1},\"properties\":" + + "\"settings_version\":123,\"aliases_version\":1,\"mappings\":{\"_doc\":{\"_meta\":{\"schema_version\":1},\"properties\":" + "{\"name\":{\"type\":\"keyword\"}}}}}}" val newMapping = "{\"_meta\":{\"schema_version\":10},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" val index: IndexMetadata = IndexMetadata.fromXContent(parser(indexContent)) diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt new file mode 100644 index 000000000..9712b4213 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt @@ -0,0 +1,173 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.randomClusterMetricsInput +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatIndicesResponseWrapper.Companion.WRAPPER_FIELD +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.core.common.Strings +import org.opensearch.test.OpenSearchSingleNodeTestCase + +class CatIndicesWrappersIT : OpenSearchSingleNodeTestCase() { + private val path = ClusterMetricsInput.ClusterMetricType.CAT_INDICES.defaultPath + + fun `test CatIndicesRequestWrapper validate valid pathParams`() { + // GIVEN + val pathParams = "index1,index-name-2,index-3" + + // WHEN + val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(3, requestWrapper.clusterHealthRequest.indices().size) + assertEquals(3, requestWrapper.clusterStateRequest.indices().size) + assertEquals(3, requestWrapper.indexSettingsRequest.indices().size) + assertEquals(3, requestWrapper.indicesStatsRequest.indices().size) + } + + fun `test CatIndicesRequestWrapper validate without providing pathParams`() { + // GIVEN & WHEN + val requestWrapper = CatIndicesRequestWrapper() + + // THEN + assertNull(requestWrapper.clusterHealthRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatIndicesRequestWrapper validate blank pathParams`() { + // GIVEN + val pathParams = " " + + // WHEN + val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) + + // THEN + assertNull(requestWrapper.clusterHealthRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatIndicesRequestWrapper validate empty pathParams`() { + // GIVEN + val pathParams = "" + + // WHEN + val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) + + // THEN + assertNull(requestWrapper.clusterHealthRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatIndicesRequestWrapper validate invalid pathParams`() { + // GIVEN + val pathParams = "_index1,index^2" + + // WHEN & THEN + assertThrows(IllegalArgumentException::class.java) { CatIndicesRequestWrapper(pathParams = pathParams) } + } + + suspend fun `test CatIndicesResponseWrapper returns with only indices in pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + /* + Creating a subset of indices to use for the pathParams to test that all indices on the cluster ARE NOT returned. + */ + val pathParamsIndices = testIndices.keys.toList().subList(1, testIndices.size - 1) + val pathParams = pathParamsIndices.joinToString(",") + val input = randomClusterMetricsInput(path = path, pathParams = pathParams) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(pathParamsIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + if (pathParamsIndices.contains(indexName)) { + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.DOCS_COUNT_FIELD) as String + ) + } + } + } + + suspend fun `test CatIndicesResponseWrapper returns with all indices when empty pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + val input = randomClusterMetricsInput(path = path) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(testIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.DOCS_COUNT_FIELD) as String + ) + } + } + + private fun indexDoc(index: String, id: String, doc: String) { + client().prepareIndex(index).setId(id) + .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt new file mode 100644 index 000000000..c8b5db561 --- /dev/null +++ b/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt @@ -0,0 +1,165 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.randomClusterMetricsInput +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatShardsResponseWrapper.Companion.WRAPPER_FIELD +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.core.common.Strings +import org.opensearch.test.OpenSearchSingleNodeTestCase + +class CatShardsWrappersIT : OpenSearchSingleNodeTestCase() { + private val path = ClusterMetricsInput.ClusterMetricType.CAT_SHARDS.defaultPath + + fun `test CatShardsRequestWrapper validate valid pathParams`() { + // GIVEN + val pathParams = "index1,index_2,index-3" + + // WHEN + val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(3, requestWrapper.clusterStateRequest.indices().size) + assertEquals(3, requestWrapper.indicesStatsRequest.indices().size) + } + + fun `test CatShardsRequestWrapper validate without providing pathParams`() { + // GIVEN & WHEN + val requestWrapper = CatShardsRequestWrapper() + + // THEN + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatShardsRequestWrapper validate blank pathParams`() { + // GIVEN + val pathParams = " " + + // WHEN + val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatShardsRequestWrapper validate empty pathParams`() { + // GIVEN + val pathParams = "" + + // WHEN + val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatShardsRequestWrapper validate invalid pathParams`() { + // GIVEN + val pathParams = "_index1,index^2" + + // WHEN & THEN + assertThrows(IllegalArgumentException::class.java) { CatShardsRequestWrapper(pathParams = pathParams) } + } + + suspend fun `test CatShardsResponseWrapper returns with only indices in pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + /* + Creating a subset of indices to use for the pathParams to test that all indices on the cluster ARE NOT returned. + */ + val pathParamsIndices = testIndices.keys.toList().subList(1, testIndices.size - 1) + val pathParams = pathParamsIndices.joinToString(",") + val input = randomClusterMetricsInput(path = path, pathParams = pathParams) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatShardsResponseWrapper.ShardInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(pathParamsIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + if (pathParamsIndices.contains(indexName)) { + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.DOCS_FIELD) as String + ) + } + } + } + + suspend fun `test CatShardsResponseWrapper returns with all indices when empty pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + val input = randomClusterMetricsInput(path = path) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatShardsResponseWrapper.ShardInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(testIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.DOCS_FIELD) as String + ) + } + } + + private fun indexDoc(index: String, id: String, doc: String) { + client().prepareIndex(index).setId(id) + .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() + } +} diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/util/SupportedClusterMetricsSettingsExtensionsTests.kt b/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt similarity index 98% rename from alerting/src/test/kotlin/org/opensearch/alerting/util/SupportedClusterMetricsSettingsExtensionsTests.kt rename to alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt index bb59ff7d1..bfe5b8dce 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/util/SupportedClusterMetricsSettingsExtensionsTests.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt @@ -3,7 +3,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -package org.opensearch.alerting.util +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers import org.opensearch.test.OpenSearchTestCase diff --git a/alerting/src/test/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt b/alerting/src/test/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt index 8dd942de3..903eedb44 100644 --- a/alerting/src/test/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt +++ b/alerting/src/test/kotlin/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt @@ -6,7 +6,6 @@ package org.opensearch.alerting.util.destinationmigration import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX import org.opensearch.alerting.makeRequest import org.opensearch.alerting.model.destination.Destination import org.opensearch.alerting.model.destination.email.Email @@ -15,12 +14,14 @@ import org.opensearch.alerting.model.destination.email.EmailEntry import org.opensearch.alerting.model.destination.email.EmailGroup import org.opensearch.alerting.model.destination.email.Recipient import org.opensearch.alerting.randomUser -import org.opensearch.alerting.toJsonString import org.opensearch.alerting.util.DestinationType import org.opensearch.client.ResponseException -import org.opensearch.rest.RestStatus +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.OpenSearchTestCase import java.time.Instant import java.util.UUID +import java.util.concurrent.TimeUnit class DestinationMigrationUtilServiceIT : AlertingRestTestCase() { @@ -81,7 +82,9 @@ class DestinationMigrationUtilServiceIT : AlertingRestTestCase() { // Create cluster change event and wait for migration service to complete migrating data over client().updateSettings("indices.recovery.max_bytes_per_sec", "40mb") - Thread.sleep(120000) + OpenSearchTestCase.waitUntil({ + return@waitUntil false + }, 2, TimeUnit.MINUTES) for (id in ids) { val response = client().makeRequest( diff --git a/alerting/src/test/resources/sample.pem b/alerting/src/test/resources/sample.pem index 7ba92534e..a1fc20a77 100644 --- a/alerting/src/test/resources/sample.pem +++ b/alerting/src/test/resources/sample.pem @@ -1,28 +1,25 @@ -----BEGIN CERTIFICATE----- -MIIEyTCCA7GgAwIBAgIGAWLrc1O2MA0GCSqGSIb3DQEBCwUAMIGPMRMwEQYKCZIm -iZPyLGQBGRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQ -RXhhbXBsZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290 -IENBMSEwHwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0EwHhcNMTgwNDIy -MDM0MzQ3WhcNMjgwNDE5MDM0MzQ3WjBeMRIwEAYKCZImiZPyLGQBGRYCZGUxDTAL -BgNVBAcMBHRlc3QxDTALBgNVBAoMBG5vZGUxDTALBgNVBAsMBG5vZGUxGzAZBgNV -BAMMEm5vZGUtMC5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAJa+f476vLB+AwK53biYByUwN+40D8jMIovGXm6wgT8+9Sbs899dDXgt -9CE1Beo65oP1+JUz4c7UHMrCY3ePiDt4cidHVzEQ2g0YoVrQWv0RedS/yx/DKhs8 -Pw1O715oftP53p/2ijD5DifFv1eKfkhFH+lwny/vMSNxellpl6NxJTiJVnQ9HYOL -gf2t971ITJHnAuuxUF48HcuNovW4rhtkXef8kaAN7cE3LU+A9T474ULNCKkEFPIl -ZAKN3iJNFdVsxrTU+CUBHzk73Do1cCkEvJZ0ZFjp0Z3y8wLY/gqWGfGVyA9l2CUq -eIZNf55PNPtGzOrvvONiui48vBKH1LsCAwEAAaOCAVkwggFVMIG8BgNVHSMEgbQw -gbGAFJI1DOAPHitF9k0583tfouYSl0BzoYGVpIGSMIGPMRMwEQYKCZImiZPyLGQB -GRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQRXhhbXBs -ZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290IENBMSEw -HwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0GCAQEwHQYDVR0OBBYEFKyv -78ZmFjVKM9g7pMConYH7FVBHMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgXg -MCAGA1UdJQEB/wQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA1BgNVHREELjAsiAUq -AwQFBYISbm9kZS0wLmV4YW1wbGUuY29tgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZI -hvcNAQELBQADggEBAIOKuyXsFfGv1hI/Lkpd/73QNqjqJdxQclX57GOMWNbOM5H0 -5/9AOIZ5JQsWULNKN77aHjLRr4owq2jGbpc/Z6kAd+eiatkcpnbtbGrhKpOtoEZy -8KuslwkeixpzLDNISSbkeLpXz4xJI1ETMN/VG8ZZP1bjzlHziHHDu0JNZ6TnNzKr -XzCGMCohFfem8vnKNnKUneMQMvXd3rzUaAgvtf7Hc2LTBlf4fZzZF1EkwdSXhaMA -1lkfHiqOBxtgeDLxCHESZ2fqgVqsWX+t3qHQfivcPW6txtDyrFPRdJOGhiMGzT/t -e/9kkAtQRgpTb3skYdIOOUOV0WGQ60kJlFhAzIs= +MIIEPDCCAySgAwIBAgIUZjrlDPP8azRDPZchA/XEsx0X2iIwDQYJKoZIhvcNAQEL +BQAwgY8xEzARBgoJkiaJk/IsZAEZFgNjb20xFzAVBgoJkiaJk/IsZAEZFgdleGFt +cGxlMRkwFwYDVQQKDBBFeGFtcGxlIENvbSBJbmMuMSEwHwYDVQQLDBhFeGFtcGxl +IENvbSBJbmMuIFJvb3QgQ0ExITAfBgNVBAMMGEV4YW1wbGUgQ29tIEluYy4gUm9v +dCBDQTAeFw0yMzA4MjkwNDIzMTJaFw0zMzA4MjYwNDIzMTJaMFcxCzAJBgNVBAYT +AmRlMQ0wCwYDVQQHDAR0ZXN0MQ0wCwYDVQQKDARub2RlMQ0wCwYDVQQLDARub2Rl +MRswGQYDVQQDDBJub2RlLTAuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCm93kXteDQHMAvbUPNPW5pyRHKDD42XGWSgq0k1D29C/Ud +yL21HLzTJa49ZU2ldIkSKs9JqbkHdyK0o8MO6L8dotLoYbxDWbJFW8bp1w6tDTU0 +HGkn47XVu3EwbfrTENg3jFu+Oem6a/501SzITzJWtS0cn2dIFOBimTVpT/4Zv5qr +XA6Cp4biOmoTYWhi/qQl8d0IaADiqoZ1MvZbZ6x76qTrRAbg+UWkpTEXoH1xTc8n +dibR7+HP6OTqCKvo1NhE8uP4pY+fWd6b6l+KLo3IKpfTbAIJXIO+M67FLtWKtttD +ao94B069skzKk6FPgW/OZh6PRCD0oxOavV+ld2SjAgMBAAGjgcYwgcMwRwYDVR0R +BEAwPogFKgMEBQWCEm5vZGUtMC5leGFtcGxlLmNvbYIJbG9jYWxob3N0hxAAAAAA +AAAAAAAAAAAAAAABhwR/AAABMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEF +BQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU0/qDQaY10jIo +wCjLUpz/HfQXyt8wHwYDVR0jBBgwFoAUF4ffoFrrZhKn1dD4uhJFPLcrAJwwDQYJ +KoZIhvcNAQELBQADggEBAD2hkndVih6TWxoe/oOW0i2Bq7ScNO/n7/yHWL04HJmR +MaHv/Xjc8zLFLgHuHaRvC02ikWIJyQf5xJt0Oqu2GVbqXH9PBGKuEP2kCsRRyU27 +zTclAzfQhqmKBTYQ/3lJ3GhRQvXIdYTe+t4aq78TCawp1nSN+vdH/1geG6QjMn5N +1FU8tovDd4x8Ib/0dv8RJx+n9gytI8n/giIaDCEbfLLpe4EkV5e5UNpOnRgJjjuy +vtZutc81TQnzBtkS9XuulovDE0qI+jQrKkKu8xgGLhgH0zxnPkKtUg2I3Aq6zl1L +zYkEOUF8Y25J6WeY88Yfnc0iigI+Pnz5NK8R9GL7TYo= -----END CERTIFICATE----- diff --git a/alerting/src/test/resources/test-kirk.jks b/alerting/src/test/resources/test-kirk.jks index 174dbda65..6dbc51e71 100644 Binary files a/alerting/src/test/resources/test-kirk.jks and b/alerting/src/test/resources/test-kirk.jks differ diff --git a/build-tools/merged-coverage.gradle b/build-tools/merged-coverage.gradle index c4e07ab75..a5bb24894 100644 --- a/build-tools/merged-coverage.gradle +++ b/build-tools/merged-coverage.gradle @@ -5,7 +5,7 @@ allprojects { plugins.withId('jacoco') { - jacoco.toolVersion = '0.8.7' + jacoco.toolVersion = '0.8.11' // For some reason this dependency isn't getting setup automatically by the jacoco plugin tasks.withType(JacocoReport) { dependsOn tasks.withType(Test) @@ -13,26 +13,18 @@ allprojects { } } -task jacocoMerge(type: JacocoMerge) { +task jacocoReport(type: JacocoReport, group: 'verification') { + description = 'Generates an aggregate report from all subprojects' gradle.projectsEvaluated { - subprojects.each { - jacocoMerge.dependsOn it.tasks.withType(JacocoReport) - jacocoMerge.executionData it.tasks.withType(JacocoReport).collect { it.executionData } + subprojects.each { + jacocoReport.dependsOn it.tasks.withType(JacocoReport) + jacocoReport.executionData it.tasks.withType(JacocoReport).collect { it.executionData } } } - doFirst { - executionData = files(executionData.findAll { it.exists() }) - } -} - -task jacocoReport(type: JacocoReport, group: 'verification') { - description = 'Generates an aggregate report from all subprojects' - dependsOn jacocoMerge - executionData jacocoMerge.destinationFile reports { - html.enabled = true // human readable - xml.enabled = true + html.required = true // human readable + xml.required = true } gradle.projectsEvaluated { diff --git a/build-tools/opensearchplugin-coverage.gradle b/build-tools/opensearchplugin-coverage.gradle index adea8414e..df2c0513b 100644 --- a/build-tools/opensearchplugin-coverage.gradle +++ b/build-tools/opensearchplugin-coverage.gradle @@ -61,8 +61,8 @@ jacocoTestReport { getSourceDirectories().from(sourceSets.main.allSource) getClassDirectories().from(sourceSets.main.output) reports { - html.enabled = true // human readable - xml.enabled = true // for coverlay + html.required = true // human readable + xml.required = true // for coverlay } } diff --git a/build-tools/pkgbuild.gradle b/build-tools/pkgbuild.gradle index 4ac3eb325..8a70c13e3 100644 --- a/build-tools/pkgbuild.gradle +++ b/build-tools/pkgbuild.gradle @@ -1,4 +1,4 @@ -apply plugin: 'nebula.ospackage' +apply plugin: 'com.netflix.nebula.ospackage' // This is afterEvaluate because the bundlePlugin ZIP task is updated afterEvaluate and changes the ZIP name to match the plugin name afterEvaluate { @@ -8,7 +8,7 @@ afterEvaluate { version = "${project.version}" - "-SNAPSHOT" into '/usr/share/opensearch/plugins' - from(zipTree(bundlePlugin.archivePath)) { + from(zipTree(bundlePlugin.archiveFile)) { into opensearchplugin.name } @@ -39,9 +39,8 @@ afterEvaluate { task renameRpm(type: Copy) { from("$buildDir/distributions") into("$buildDir/distributions") - include archiveName - rename archiveName, "${packageName}-${version}.rpm" - doLast { delete file("$buildDir/distributions/$archiveName") } + rename "$archiveFileName", "${packageName}-${archiveVersion}.rpm" + doLast { delete file("$buildDir/distributions/$archiveFileName") } } } @@ -52,9 +51,8 @@ afterEvaluate { task renameDeb(type: Copy) { from("$buildDir/distributions") into("$buildDir/distributions") - include archiveName - rename archiveName, "${packageName}-${version}.deb" - doLast { delete file("$buildDir/distributions/$archiveName") } + rename "$archiveFileName", "${packageName}-${archiveVersion}.deb" + doLast { delete file("$buildDir/distributions/$archiveFileName") } } } } \ No newline at end of file diff --git a/build.gradle b/build.gradle index 0e68e8f04..54c316d5c 100644 --- a/build.gradle +++ b/build.gradle @@ -7,10 +7,10 @@ buildscript { apply from: 'build-tools/repositories.gradle' ext { - opensearch_version = System.getProperty("opensearch.version", "2.1.0-SNAPSHOT") + opensearch_version = System.getProperty("opensearch.version", "3.0.0-SNAPSHOT") buildVersionQualifier = System.getProperty("build.version_qualifier", "") isSnapshot = "true" == System.getProperty("build.snapshot", "true") - // 2.1.0-SNAPSHOT -> 2.1.0.0-SNAPSHOT + // 3.0.0-SNAPSHOT -> 3.0.0.0-SNAPSHOT version_tokens = opensearch_version.tokenize('-') opensearch_build = version_tokens[0] + '.0' plugin_no_snapshot = opensearch_build @@ -23,7 +23,7 @@ buildscript { } opensearch_no_snapshot = opensearch_version.replace("-SNAPSHOT","") common_utils_version = System.getProperty("common_utils.version", opensearch_build) - kotlin_version = '1.6.10' + kotlin_version = '1.8.21' } repositories { @@ -39,7 +39,7 @@ buildscript { } plugins { - id 'nebula.ospackage' version "8.3.0" apply false + id 'com.netflix.nebula.ospackage' version "11.5.0" id "com.dorongold.task-tree" version "1.5" } @@ -48,7 +48,12 @@ apply plugin: 'jacoco' apply from: 'build-tools/merged-coverage.gradle' configurations { - ktlint + ktlint { + resolutionStrategy { + force "ch.qos.logback:logback-classic:1.3.14" + force "ch.qos.logback:logback-core:1.3.14" + } + } } dependencies { @@ -88,8 +93,29 @@ allprojects { compileKotlin.kotlinOptions.jvmTarget = compileTestKotlin.kotlinOptions.jvmTarget = JavaVersion.VERSION_11 compileKotlin.dependsOn ktlint } + tasks.withType(AbstractArchiveTask).configureEach { + preserveFileTimestamps = false + reproducibleFileOrder = true + } } evaluationDependsOnChildren() check.dependsOn subprojects*.check + +// updateVersion: Task to auto increment to the next development iteration +task updateVersion { + onlyIf { System.getProperty('newVersion') } + doLast { + ext.newVersion = System.getProperty('newVersion') + println "Setting version to ${newVersion}." + // String tokenization to support -SNAPSHOT + ant.replaceregexp(match: opensearch_version.tokenize('-')[0], replace: newVersion.tokenize('-')[0], flags:'g', byline:true) { + fileset(dir: projectDir) { + // Include the required files that needs to be updated with new Version + include(name: "alerting/build.gradle") + } + } + ant.replaceregexp(file:'build.gradle', match: '"opensearch.version", "\\d.*"', replace: '"opensearch.version", "' + newVersion.tokenize('-')[0] + '-SNAPSHOT"', flags:'g', byline:true) + } +} \ No newline at end of file diff --git a/core/build.gradle b/core/build.gradle index bf239f8c6..b1ecf7eac 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -4,6 +4,7 @@ */ apply plugin: 'java' +apply plugin: 'opensearch.java-rest-test' apply plugin: 'org.jetbrains.kotlin.jvm' apply plugin: 'jacoco' @@ -14,8 +15,11 @@ dependencies { api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${kotlin_version}" implementation "com.cronutils:cron-utils:9.1.6" api "org.opensearch.client:opensearch-rest-client:${opensearch_version}" - implementation 'com.google.googlejavaformat:google-java-format:1.10.0' - api "org.opensearch:common-utils:${common_utils_version}" + implementation('com.google.googlejavaformat:google-java-format:1.10.0') { + exclude group: 'com.google.guava' + } + implementation 'com.google.guava:guava:32.0.1-jre' + api "org.opensearch:common-utils:${common_utils_version}@jar" implementation 'commons-validator:commons-validator:1.7' testImplementation "org.opensearch.test:framework:${opensearch_version}" diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/JobRunner.kt b/core/src/main/kotlin/org/opensearch/alerting/core/JobRunner.kt index 381b35309..c251c8c6a 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/JobRunner.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/JobRunner.kt @@ -5,7 +5,7 @@ package org.opensearch.alerting.core -import org.opensearch.alerting.core.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob import java.time.Instant interface JobRunner { diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeper.kt b/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeper.kt index 1a82cbc0f..b67f278b2 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeper.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeper.kt @@ -8,7 +8,6 @@ package org.opensearch.alerting.core import org.apache.logging.log4j.LogManager import org.opensearch.action.bulk.BackoffPolicy import org.opensearch.action.search.SearchRequest -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.alerting.core.schedule.JobScheduler import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.REQUEST_TIMEOUT import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEPER_ENABLED @@ -24,26 +23,27 @@ import org.opensearch.cluster.ClusterStateListener import org.opensearch.cluster.routing.IndexShardRoutingTable import org.opensearch.cluster.routing.Murmur3HashFunction import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.Strings -import org.opensearch.common.bytes.BytesReference -import org.opensearch.common.component.LifecycleListener +import org.opensearch.common.lifecycle.LifecycleListener import org.opensearch.common.logging.Loggers import org.opensearch.common.lucene.uid.Versions import org.opensearch.common.settings.Settings import org.opensearch.common.unit.TimeValue import org.opensearch.common.util.concurrent.OpenSearchExecutors import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.common.Strings +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.index.shard.ShardId +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils import org.opensearch.index.engine.Engine import org.opensearch.index.query.BoolQueryBuilder import org.opensearch.index.query.QueryBuilders import org.opensearch.index.shard.IndexingOperationListener -import org.opensearch.index.shard.ShardId -import org.opensearch.rest.RestStatus import org.opensearch.search.builder.SearchSourceBuilder import org.opensearch.search.sort.FieldSortBuilder import org.opensearch.threadpool.Scheduler diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeperMetrics.kt b/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeperMetrics.kt index 1e2104b48..9a10586d1 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeperMetrics.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/JobSweeperMetrics.kt @@ -5,12 +5,12 @@ package org.opensearch.alerting.core -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentFragment -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder data class JobSweeperMetrics(val lastFullSweepTimeMillis: Long, val fullSweepOnTime: Boolean) : ToXContentFragment, Writeable { diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/ScheduledJobIndices.kt b/core/src/main/kotlin/org/opensearch/alerting/core/ScheduledJobIndices.kt index 621e2361f..a71a7e64f 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/ScheduledJobIndices.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/ScheduledJobIndices.kt @@ -5,14 +5,14 @@ package org.opensearch.alerting.core -import org.opensearch.action.ActionListener import org.opensearch.action.admin.indices.create.CreateIndexRequest import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.client.AdminClient import org.opensearch.cluster.health.ClusterIndexHealth import org.opensearch.cluster.service.ClusterService import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.action.ActionListener /** * Initialize the OpenSearch components required to run [ScheduledJobs]. diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt index 93413d8f7..07792d553 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt @@ -10,11 +10,11 @@ import org.opensearch.alerting.core.JobSweeperMetrics import org.opensearch.alerting.core.resthandler.RestScheduledJobStatsHandler import org.opensearch.alerting.core.schedule.JobSchedulerMetrics import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentFragment -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder import java.util.Locale /** diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt index a333d5e90..698c6c44e 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt @@ -6,7 +6,7 @@ package org.opensearch.alerting.core.action.node import org.opensearch.action.ActionType -import org.opensearch.common.io.stream.Writeable +import org.opensearch.core.common.io.stream.Writeable class ScheduledJobsStatsAction : ActionType(NAME, reader) { companion object { diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt index b0d736c96..6a82e8204 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt @@ -6,8 +6,8 @@ package org.opensearch.alerting.core.action.node import org.opensearch.action.support.nodes.BaseNodesRequest -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import java.io.IOException /** diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt index 5f215ef4a..edfcc0cce 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt @@ -11,11 +11,11 @@ import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSetting import org.opensearch.alerting.core.settings.ScheduledJobSettings import org.opensearch.cluster.ClusterName import org.opensearch.cluster.health.ClusterIndexHealth -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentFragment -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder /** * ScheduledJobsStatsResponse is a class that will contain all the response from each node. diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt index 39c8aad85..f2ed94623 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt @@ -8,7 +8,6 @@ package org.opensearch.alerting.core.action.node import org.apache.logging.log4j.LogManager import org.opensearch.action.FailedNodeException import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.nodes.BaseNodeRequest import org.opensearch.action.support.nodes.TransportNodesAction import org.opensearch.alerting.core.JobSweeper import org.opensearch.alerting.core.JobSweeperMetrics @@ -18,9 +17,10 @@ import org.opensearch.alerting.core.schedule.JobSchedulerMetrics import org.opensearch.cluster.health.ClusterIndexHealth import org.opensearch.cluster.service.ClusterService import org.opensearch.common.inject.Inject -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput import org.opensearch.threadpool.ThreadPool +import org.opensearch.transport.TransportRequest import org.opensearch.transport.TransportService import java.io.IOException @@ -116,7 +116,7 @@ class ScheduledJobsStatsTransportAction : TransportNodesAction', '<', ' ') - -/** - * This is a data class for a URI type of input for Monitors specifically for local clusters. - */ -data class ClusterMetricsInput( - var path: String, - var pathParams: String = "", - var url: String -) : Input { - val clusterMetricType: ClusterMetricType - val constructedUri: URI - - // Verify parameters are valid during creation - init { - require(validateFields()) { - "The uri.api_type field, uri.path field, or uri.uri field must be defined." - } - - // Create an UrlValidator that only accepts "http" and "https" as valid scheme and allows local URLs. - val urlValidator = UrlValidator(arrayOf("http", "https"), UrlValidator.ALLOW_LOCAL_URLS) - - // Build url field by field if not provided as whole. - constructedUri = toConstructedUri() - - require(urlValidator.isValid(constructedUri.toString())) { - "Invalid URI constructed from the path and path_params inputs, or the url input." - } - - if (url.isNotEmpty() && validateFieldsNotEmpty()) - require(constructedUri == constructUrlFromInputs()) { - "The provided URL and URI fields form different URLs." - } - - require(constructedUri.host.lowercase() == SUPPORTED_HOST) { - "Only host '$SUPPORTED_HOST' is supported." - } - require(constructedUri.port == SUPPORTED_PORT) { - "Only port '$SUPPORTED_PORT' is supported." - } - - clusterMetricType = findApiType(constructedUri.path) - this.parseEmptyFields() - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // path - sin.readString(), // path params - sin.readString() // url - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .startObject(URI_FIELD) - .field(API_TYPE_FIELD, clusterMetricType) - .field(PATH_FIELD, path) - .field(PATH_PARAMS_FIELD, pathParams) - .field(URL_FIELD, url) - .endObject() - .endObject() - } - - override fun name(): String { - return URI_FIELD - } - - override fun writeTo(out: StreamOutput) { - out.writeString(clusterMetricType.toString()) - out.writeString(path) - out.writeString(pathParams) - out.writeString(url) - } - - companion object { - const val SUPPORTED_SCHEME = "http" - const val SUPPORTED_HOST = "localhost" - const val SUPPORTED_PORT = 9200 - - const val API_TYPE_FIELD = "api_type" - const val PATH_FIELD = "path" - const val PATH_PARAMS_FIELD = "path_params" - const val URL_FIELD = "url" - const val URI_FIELD = "uri" - - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry(Input::class.java, ParseField(URI_FIELD), CheckedFunction { parseInner(it) }) - - /** - * This parse function uses [XContentParser] to parse JSON input and store corresponding fields to create a [ClusterMetricsInput] object - */ - @JvmStatic @Throws(IOException::class) - fun parseInner(xcp: XContentParser): ClusterMetricsInput { - var path = "" - var pathParams = "" - var url = "" - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - PATH_FIELD -> path = xcp.text() - PATH_PARAMS_FIELD -> pathParams = xcp.text() - URL_FIELD -> url = xcp.text() - } - } - return ClusterMetricsInput(path, pathParams, url) - } - } - - /** - * Constructs the [URI] using either the provided [url], or the - * supported scheme, host, and port and provided [path]+[pathParams]. - * @return The [URI] constructed from [url] if it's defined; - * otherwise a [URI] constructed from the provided [URI] fields. - */ - private fun toConstructedUri(): URI { - return if (url.isEmpty()) { - constructUrlFromInputs() - } else { - URIBuilder(url).build() - } - } - - /** - * Isolates just the path parameters from the [ClusterMetricsInput] URI. - * @return The path parameters portion of the [ClusterMetricsInput] URI. - * @throws IllegalArgumentException if the [ClusterMetricType] requires path parameters, but none are supplied; - * or when path parameters are provided for an [ClusterMetricType] that does not use path parameters. - */ - fun parsePathParams(): String { - val path = this.constructedUri.path - val apiType = this.clusterMetricType - - var pathParams: String - if (this.pathParams.isNotEmpty()) { - pathParams = this.pathParams - } else { - val prependPath = if (apiType.supportsPathParams) apiType.prependPath else apiType.defaultPath - pathParams = path.removePrefix(prependPath) - pathParams = pathParams.removeSuffix(apiType.appendPath) - } - - if (pathParams.isNotEmpty()) { - pathParams = pathParams.trim('/') - ILLEGAL_PATH_PARAMETER_CHARACTERS.forEach { character -> - if (pathParams.contains(character)) - throw IllegalArgumentException( - "The provided path parameters contain invalid characters or spaces. Please omit: " + - "${ILLEGAL_PATH_PARAMETER_CHARACTERS.joinToString(" ")}" - ) - } - } - - if (apiType.requiresPathParams && pathParams.isEmpty()) - throw IllegalArgumentException("The API requires path parameters.") - if (!apiType.supportsPathParams && pathParams.isNotEmpty()) - throw IllegalArgumentException("The API does not use path parameters.") - - return pathParams - } - - /** - * Examines the path of a [ClusterMetricsInput] to determine which API is being called. - * @param uriPath The path to examine. - * @return The [ClusterMetricType] associated with the [ClusterMetricsInput] monitor. - * @throws IllegalArgumentException when the API to call cannot be determined from the URI. - */ - private fun findApiType(uriPath: String): ClusterMetricType { - var apiType = ClusterMetricType.BLANK - ClusterMetricType.values() - .filter { option -> option != ClusterMetricType.BLANK } - .forEach { option -> - if (uriPath.startsWith(option.prependPath) || uriPath.startsWith(option.defaultPath)) - apiType = option - } - if (apiType.isBlank()) - throw IllegalArgumentException("The API could not be determined from the provided URI.") - return apiType - } - - /** - * Constructs a [URI] from the supported scheme, host, and port, and the provided [path], and [pathParams]. - * @return The constructed [URI]. - */ - private fun constructUrlFromInputs(): URI { - val uriBuilder = URIBuilder() - .setScheme(SUPPORTED_SCHEME) - .setHost(SUPPORTED_HOST) - .setPort(SUPPORTED_PORT) - .setPath(path + pathParams) - return uriBuilder.build() - } - - /** - * If [url] field is empty, populates it with [constructedUri]. - * If [path] and [pathParams] are empty, populates them with values from [url]. - */ - private fun parseEmptyFields() { - if (pathParams.isEmpty()) - pathParams = this.parsePathParams() - if (path.isEmpty()) - path = if (pathParams.isEmpty()) clusterMetricType.defaultPath else clusterMetricType.prependPath - if (url.isEmpty()) - url = constructedUri.toString() - } - - /** - * Helper function to confirm at least [url], or required URI component fields are defined. - * @return TRUE if at least either [url] or the other components are provided; otherwise FALSE. - */ - private fun validateFields(): Boolean { - return url.isNotEmpty() || validateFieldsNotEmpty() - } - - /** - * Confirms that required URI component fields are defined. - * Only validating path for now, as that's the only required field. - * @return TRUE if all those fields are defined; otherwise FALSE. - */ - private fun validateFieldsNotEmpty(): Boolean { - return path.isNotEmpty() - } - - /** - * An enum class to quickly reference various supported API. - */ - enum class ClusterMetricType( - val defaultPath: String, - val prependPath: String, - val appendPath: String, - val supportsPathParams: Boolean, - val requiresPathParams: Boolean - ) { - BLANK("", "", "", false, false), - CAT_PENDING_TASKS( - "/_cat/pending_tasks", - "/_cat/pending_tasks", - "", - false, - false - ), - CAT_RECOVERY( - "/_cat/recovery", - "/_cat/recovery", - "", - true, - false - ), - CAT_SNAPSHOTS( - "/_cat/snapshots", - "/_cat/snapshots", - "", - true, - true - ), - CAT_TASKS( - "/_cat/tasks", - "/_cat/tasks", - "", - false, - false - ), - CLUSTER_HEALTH( - "/_cluster/health", - "/_cluster/health", - "", - true, - false - ), - CLUSTER_SETTINGS( - "/_cluster/settings", - "/_cluster/settings", - "", - false, - false - ), - CLUSTER_STATS( - "/_cluster/stats", - "/_cluster/stats", - "", - true, - false - ), - NODES_STATS( - "/_nodes/stats", - "/_nodes", - "", - false, - false - ); - - /** - * @return TRUE if the [ClusterMetricType] is [BLANK]; otherwise FALSE. - */ - fun isBlank(): Boolean { - return this === BLANK - } - } -} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelMonitorInput.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelMonitorInput.kt deleted file mode 100644 index fbeba6007..000000000 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelMonitorInput.kt +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -data class DocLevelMonitorInput( - val description: String = NO_DESCRIPTION, - val indices: List, - val queries: List -) : Input { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // description - sin.readStringList(), // indices - sin.readList(::DocLevelQuery) // docLevelQueries - ) - - fun asTemplateArg(): Map { - return mapOf( - DESCRIPTION_FIELD to description, - INDICES_FIELD to indices, - QUERIES_FIELD to queries.map { it.asTemplateArg() } - ) - } - - override fun name(): String { - return DOC_LEVEL_INPUT_FIELD - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(description) - out.writeStringCollection(indices) - out.writeCollection(queries) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(DOC_LEVEL_INPUT_FIELD) - .field(DESCRIPTION_FIELD, description) - .field(INDICES_FIELD, indices.toTypedArray()) - .field(QUERIES_FIELD, queries.toTypedArray()) - .endObject() - .endObject() - return builder - } - - companion object { - const val DESCRIPTION_FIELD = "description" - const val INDICES_FIELD = "indices" - const val DOC_LEVEL_INPUT_FIELD = "doc_level_input" - const val QUERIES_FIELD = "queries" - - const val NO_DESCRIPTION = "" - - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry( - Input::class.java, - ParseField(DOC_LEVEL_INPUT_FIELD), CheckedFunction { parse(it) } - ) - - @JvmStatic @Throws(IOException::class) - fun parse(xcp: XContentParser): DocLevelMonitorInput { - var description: String = NO_DESCRIPTION - val indices: MutableList = mutableListOf() - val docLevelQueries: MutableList = mutableListOf() - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - DESCRIPTION_FIELD -> description = xcp.text() - INDICES_FIELD -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - indices.add(xcp.text()) - } - } - QUERIES_FIELD -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - docLevelQueries.add(DocLevelQuery.parse(xcp)) - } - } - } - } - - return DocLevelMonitorInput(description = description, indices = indices, queries = docLevelQueries) - } - - @JvmStatic @Throws(IOException::class) - fun readFrom(sin: StreamInput): DocLevelMonitorInput { - return DocLevelMonitorInput(sin) - } - } -} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelQuery.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelQuery.kt deleted file mode 100644 index 06d6c480b..000000000 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/DocLevelQuery.kt +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import com.google.common.collect.ImmutableList -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.lang.IllegalArgumentException -import java.util.UUID - -data class DocLevelQuery( - val id: String = UUID.randomUUID().toString(), - val name: String, - val query: String, - val tags: List = mutableListOf() -) : Writeable, ToXContentObject { - - init { - // Ensure the name and tags have valid characters - validateQuery(name) - for (tag in tags) { - validateQuery(tag) - } - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readString(), // name - sin.readString(), // query - sin.readStringList() // tags - ) - - fun asTemplateArg(): Map { - return mapOf( - QUERY_ID_FIELD to id, - NAME_FIELD to name, - QUERY_FIELD to query, - TAGS_FIELD to tags - ) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeString(name) - out.writeString(query) - out.writeStringCollection(tags) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(QUERY_ID_FIELD, id) - .field(NAME_FIELD, name) - .field(QUERY_FIELD, query) - .field(TAGS_FIELD, tags.toTypedArray()) - .endObject() - return builder - } - - companion object { - const val QUERY_ID_FIELD = "id" - const val NAME_FIELD = "name" - const val QUERY_FIELD = "query" - const val TAGS_FIELD = "tags" - const val NO_ID = "" - val INVALID_CHARACTERS: ImmutableList = ImmutableList.of(" ", "[", "]", "{", "}", "(", ")") - - @JvmStatic @Throws(IOException::class) - fun parse(xcp: XContentParser): DocLevelQuery { - var id: String = UUID.randomUUID().toString() - lateinit var query: String - lateinit var name: String - val tags: MutableList = mutableListOf() - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - QUERY_ID_FIELD -> id = xcp.text() - NAME_FIELD -> { - name = xcp.text() - validateQuery(name) - } - QUERY_FIELD -> query = xcp.text() - TAGS_FIELD -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - val tag = xcp.text() - validateQuery(tag) - tags.add(tag) - } - } - } - } - - return DocLevelQuery( - id = id, - name = name, - query = query, - tags = tags - ) - } - - @JvmStatic @Throws(IOException::class) - fun readFrom(sin: StreamInput): DocLevelQuery { - return DocLevelQuery(sin) - } - - // TODO: add test for this - private fun validateQuery(stringVal: String) { - for (inValidChar in INVALID_CHARACTERS) { - if (stringVal.contains(inValidChar)) { - throw IllegalArgumentException( - "They query name or tag, $stringVal, contains an invalid character: [' ','[',']','{','}','(',')']" - ) - } - } - } - } -} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/Input.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/Input.kt deleted file mode 100644 index 06d351fb8..000000000 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/Input.kt +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import org.opensearch.alerting.core.model.ClusterMetricsInput.Companion.URI_FIELD -import org.opensearch.alerting.core.model.DocLevelMonitorInput.Companion.DOC_LEVEL_INPUT_FIELD -import org.opensearch.alerting.core.model.SearchInput.Companion.SEARCH_FIELD -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -interface Input : Writeable, ToXContentObject { - - enum class Type(val value: String) { - DOCUMENT_LEVEL_INPUT(DOC_LEVEL_INPUT_FIELD), - CLUSTER_METRICS_INPUT(URI_FIELD), - SEARCH_INPUT(SEARCH_FIELD); - - override fun toString(): String { - return value - } - } - - companion object { - - @Throws(IOException::class) - fun parse(xcp: XContentParser): Input { - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - val input = if (xcp.currentName() == Type.SEARCH_INPUT.value) { - SearchInput.parseInner(xcp) - } else if (xcp.currentName() == Type.CLUSTER_METRICS_INPUT.value) { - ClusterMetricsInput.parseInner(xcp) - } else { - DocLevelMonitorInput.parse(xcp) - } - ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) - return input - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Input { - return when (val type = sin.readEnum(Input.Type::class.java)) { - Type.DOCUMENT_LEVEL_INPUT -> DocLevelMonitorInput(sin) - Type.CLUSTER_METRICS_INPUT -> ClusterMetricsInput(sin) - Type.SEARCH_INPUT -> SearchInput(sin) - // This shouldn't be reachable but ensuring exhaustiveness as Kotlin warns - // enum can be null in Java - else -> throw IllegalStateException("Unexpected input [$type] when reading Trigger") - } - } - } - - fun name(): String -} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/Schedule.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/Schedule.kt deleted file mode 100644 index 7867dee07..000000000 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/Schedule.kt +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import com.cronutils.model.CronType -import com.cronutils.model.definition.CronDefinitionBuilder -import com.cronutils.model.time.ExecutionTime -import com.cronutils.parser.CronParser -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.time.DateTimeException -import java.time.Duration -import java.time.Instant -import java.time.ZoneId -import java.time.ZonedDateTime -import java.time.temporal.ChronoUnit -import java.time.zone.ZoneRulesException -import java.util.Locale - -sealed class Schedule : Writeable, ToXContentObject { - enum class TYPE { CRON, INTERVAL } - companion object { - const val CRON_FIELD = "cron" - const val EXPRESSION_FIELD = "expression" - const val TIMEZONE_FIELD = "timezone" - const val PERIOD_FIELD = "period" - const val INTERVAL_FIELD = "interval" - const val UNIT_FIELD = "unit" - - val cronParser = CronParser(CronDefinitionBuilder.instanceDefinitionFor(CronType.UNIX)) - - @JvmStatic @Throws(IOException::class) - fun parse(xcp: XContentParser): Schedule { - var expression: String? = null - var timezone: ZoneId? = null - var interval: Int? = null - var unit: ChronoUnit? = null - var schedule: Schedule? = null - var type: TYPE? = null - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldname = xcp.currentName() - xcp.nextToken() - // If the type field has already been set the customer has provide more than one type of schedule. - if (type != null) { - throw IllegalArgumentException("You can only specify one type of schedule.") - } - when (fieldname) { - CRON_FIELD -> { - type = TYPE.CRON - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val cronFieldName = xcp.currentName() - xcp.nextToken() - when (cronFieldName) { - EXPRESSION_FIELD -> expression = xcp.textOrNull() - TIMEZONE_FIELD -> timezone = getTimeZone(xcp.text()) - } - } - } - PERIOD_FIELD -> { - type = TYPE.INTERVAL - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val cronFieldName = xcp.currentName() - xcp.nextToken() - when (cronFieldName) { - INTERVAL_FIELD -> interval = xcp.intValue() - UNIT_FIELD -> unit = ChronoUnit.valueOf(xcp.text().uppercase(Locale.getDefault())) - } - } - } - else -> { - throw IllegalArgumentException("Invalid field: [$fieldname] found in schedule.") - } - } - } - if (type == TYPE.CRON) { - schedule = CronSchedule( - requireNotNull(expression) { "Expression in cron schedule is null." }, - requireNotNull(timezone) { "Timezone in cron schedule is null." } - ) - } else if (type == TYPE.INTERVAL) { - schedule = IntervalSchedule( - requireNotNull(interval) { "Interval in period schedule is null." }, - requireNotNull(unit) { "Unit in period schedule is null." } - ) - } - return requireNotNull(schedule) { "Schedule is null." } - } - - @JvmStatic @Throws(IllegalArgumentException::class) - private fun getTimeZone(timeZone: String): ZoneId { - try { - return ZoneId.of(timeZone) - } catch (zre: ZoneRulesException) { - throw IllegalArgumentException("Timezone $timeZone is not supported") - } catch (dte: DateTimeException) { - throw IllegalArgumentException("Timezone $timeZone is not supported") - } - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Schedule { - val type = sin.readEnum(Schedule.TYPE::class.java) - if (type == Schedule.TYPE.CRON) - return CronSchedule(sin) - else - return IntervalSchedule(sin) - } - } - - /** - * @param enabledTime is used in IntervalSchedule to calculate next time to execute the schedule. - */ - abstract fun nextTimeToExecute(enabledTime: Instant): Duration? - - /** - * @param expectedPreviousExecutionTime is the calculated previous execution time that should always be correct, - * the first time this is called the value passed in is the enabledTime which acts as the expectedPreviousExecutionTime - */ - abstract fun getExpectedNextExecutionTime(enabledTime: Instant, expectedPreviousExecutionTime: Instant?): Instant? - - /** - * Returns the start and end time for this schedule starting at the given start time (if provided). - * If not, the start time is assumed to be the last time the Schedule would have executed (if it's a Cron schedule) - * or [Instant.now] if it's an interval schedule. - * - * If this is a schedule that runs only once this function will return [Instant.now] for both start and end time. - */ - abstract fun getPeriodStartingAt(startTime: Instant?): Pair - - /** - * Returns the start and end time for this schedule ending at the given end time (if provided). - * If not, the end time is assumed to be the next time the Schedule would have executed (if it's a Cron schedule) - * or [Instant.now] if it's an interval schedule. - * - * If this is a schedule that runs only once this function will return [Instant.now] for both start and end time. - */ - abstract fun getPeriodEndingAt(endTime: Instant?): Pair - - abstract fun runningOnTime(lastExecutionTime: Instant?): Boolean -} - -/** - * @param testInstant Normally this not be set and it should only be used in unit test to control time. - */ -data class CronSchedule( - val expression: String, - val timezone: ZoneId, - // visible for testing - @Transient val testInstant: Instant? = null -) : Schedule() { - @Transient - val executionTime: ExecutionTime = ExecutionTime.forCron(cronParser.parse(expression)) - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // expression - sin.readZoneId() // timezone - ) - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): CronSchedule { - return CronSchedule(sin) - } - } - - /* - * @param enabledTime is not used in CronSchedule. - */ - override fun nextTimeToExecute(enabledTime: Instant): Duration? { - val zonedDateTime = ZonedDateTime.ofInstant(testInstant ?: Instant.now(), timezone) - val timeToNextExecution = executionTime.timeToNextExecution(zonedDateTime) - return timeToNextExecution.orElse(null) - } - - override fun getExpectedNextExecutionTime(enabledTime: Instant, expectedPreviousExecutionTime: Instant?): Instant? { - val zonedDateTime = ZonedDateTime.ofInstant(expectedPreviousExecutionTime ?: testInstant ?: Instant.now(), timezone) - val nextExecution = executionTime.nextExecution(zonedDateTime) - return nextExecution.orElse(null)?.toInstant() - } - - override fun getPeriodStartingAt(startTime: Instant?): Pair { - val realStartTime = if (startTime != null) { - startTime - } else { - // Probably the first time we're running. Try to figure out the last execution time - val lastExecutionTime = executionTime.lastExecution(ZonedDateTime.now(timezone)) - // This shouldn't happen unless the cron is configured to run only once, which our current cron syntax doesn't support - if (!lastExecutionTime.isPresent) { - val currentTime = Instant.now() - return Pair(currentTime, currentTime) - } - lastExecutionTime.get().toInstant() - } - val zonedDateTime = ZonedDateTime.ofInstant(realStartTime, timezone) - val newEndTime = executionTime.nextExecution(zonedDateTime).orElse(null) - return Pair(realStartTime, newEndTime?.toInstant() ?: realStartTime) - } - - override fun getPeriodEndingAt(endTime: Instant?): Pair { - val realEndTime = if (endTime != null) { - endTime - } else { - val nextExecutionTime = executionTime.nextExecution(ZonedDateTime.now(timezone)) - // This shouldn't happen unless the cron is configured to run only once which our current cron syntax doesn't support - if (!nextExecutionTime.isPresent) { - val currentTime = Instant.now() - return Pair(currentTime, currentTime) - } - nextExecutionTime.get().toInstant() - } - val zonedDateTime = ZonedDateTime.ofInstant(realEndTime, timezone) - val newStartTime = executionTime.lastExecution(zonedDateTime).orElse(null) - return Pair(newStartTime?.toInstant() ?: realEndTime, realEndTime) - } - - override fun runningOnTime(lastExecutionTime: Instant?): Boolean { - if (lastExecutionTime == null) { - return true - } - - val zonedDateTime = ZonedDateTime.ofInstant(testInstant ?: Instant.now(), timezone) - val expectedExecutionTime = executionTime.lastExecution(zonedDateTime) - - if (!expectedExecutionTime.isPresent) { - // At this point we know lastExecutionTime is not null, this should never happen. - // If expected execution time is null, we shouldn't have executed the ScheduledJob. - return false - } - val actualExecutionTime = ZonedDateTime.ofInstant(lastExecutionTime, timezone) - - return ChronoUnit.SECONDS.between(expectedExecutionTime.get(), actualExecutionTime) == 0L - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(CRON_FIELD) - .field(EXPRESSION_FIELD, expression) - .field(TIMEZONE_FIELD, timezone.id) - .endObject() - .endObject() - return builder - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(expression) - out.writeZoneId(timezone) - } -} - -data class IntervalSchedule( - val interval: Int, - val unit: ChronoUnit, - // visible for testing - @Transient val testInstant: Instant? = null -) : Schedule() { - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readInt(), // interval - sin.readEnum(ChronoUnit::class.java) // unit - ) - companion object { - @Transient - private val SUPPORTED_UNIT = listOf(ChronoUnit.MINUTES, ChronoUnit.HOURS, ChronoUnit.DAYS) - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): IntervalSchedule { - return IntervalSchedule(sin) - } - } - - init { - if (!SUPPORTED_UNIT.contains(unit)) { - throw IllegalArgumentException("Timezone $unit is not supported expected $SUPPORTED_UNIT") - } - - if (interval <= 0) { - throw IllegalArgumentException("Interval is not allowed to be 0 or negative") - } - } - - @Transient - private val intervalInMills = Duration.of(interval.toLong(), unit).toMillis() - - override fun nextTimeToExecute(enabledTime: Instant): Duration? { - val enabledTimeEpochMillis = enabledTime.toEpochMilli() - - val currentTime = testInstant ?: Instant.now() - val delta = currentTime.toEpochMilli() - enabledTimeEpochMillis - // Remainder of the Delta time is how much we have already spent waiting. - // We need to subtract remainder of that time from the interval time to get remaining schedule time to wait. - val remainingScheduleTime = intervalInMills - delta.rem(intervalInMills) - return Duration.of(remainingScheduleTime, ChronoUnit.MILLIS) - } - - override fun getExpectedNextExecutionTime(enabledTime: Instant, expectedPreviousExecutionTime: Instant?): Instant? { - val expectedPreviousExecutionTimeEpochMillis = (expectedPreviousExecutionTime ?: enabledTime).toEpochMilli() - // We still need to calculate the delta even when using expectedPreviousExecutionTime because the initial value passed in - // is the enabledTime (which also happens with cluster/node restart) - val currentTime = testInstant ?: Instant.now() - val delta = currentTime.toEpochMilli() - expectedPreviousExecutionTimeEpochMillis - // Remainder of the Delta time is how much we have already spent waiting. - // We need to subtract remainder of that time from the interval time to get remaining schedule time to wait. - val remainingScheduleTime = intervalInMills - delta.rem(intervalInMills) - return Instant.ofEpochMilli(currentTime.toEpochMilli() + remainingScheduleTime) - } - - override fun getPeriodStartingAt(startTime: Instant?): Pair { - val realStartTime = startTime ?: Instant.now() - val newEndTime = realStartTime.plusMillis(intervalInMills) - return Pair(realStartTime, newEndTime) - } - - override fun getPeriodEndingAt(endTime: Instant?): Pair { - val realEndTime = endTime ?: Instant.now() - val newStartTime = realEndTime.minusMillis(intervalInMills) - return Pair(newStartTime, realEndTime) - } - - override fun runningOnTime(lastExecutionTime: Instant?): Boolean { - if (lastExecutionTime == null) { - return true - } - - // Make sure the lastExecutionTime is less than interval time. - val delta = ChronoUnit.MILLIS.between(lastExecutionTime, testInstant ?: Instant.now()) - return 0 < delta && delta < intervalInMills - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .startObject(PERIOD_FIELD) - .field(INTERVAL_FIELD, interval) - .field(UNIT_FIELD, unit.name) - .endObject() - .endObject() - return builder - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeInt(interval) - out.writeEnum(unit) - } -} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/ScheduledJob.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/ScheduledJob.kt deleted file mode 100644 index fb595d9f0..000000000 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/ScheduledJob.kt +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import org.opensearch.alerting.core.model.ScheduledJob.Companion.NO_ID -import org.opensearch.alerting.core.model.ScheduledJob.Companion.NO_VERSION -import org.opensearch.alerting.core.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentObject -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.time.Instant - -/** - * A job that runs periodically in the ElasticSearch cluster. - * - * All implementations of [ScheduledJob]s are stored in the [SCHEDULED_JOBS_INDEX] index and are scheduled in a - * single global Scheduler running on each node. Each implementation should have its own separate APIs for writing, - * updating and deleting instances of that job type into the [SCHEDULED_JOBS_INDEX] index. The index is periodically - * scanned for updates which are then scheduled or unscheduled with the Scheduler. - * - * Like all documents in OpenSearch [ScheduledJob]s also have an [id] and a [version]. Jobs that have not been - * persisted in the cluster should use the special sentinel values [NO_ID] and [NO_VERSION] for these fields. - */ -interface ScheduledJob : Writeable, ToXContentObject { - - fun toXContentWithType(builder: XContentBuilder): XContentBuilder = toXContent(builder, XCONTENT_WITH_TYPE) - - companion object { - /** The name of the ElasticSearch index in which we store jobs */ - const val SCHEDULED_JOBS_INDEX = ".opendistro-alerting-config" - const val DOC_LEVEL_QUERIES_INDEX = ".opensearch-alerting-queries" - - const val NO_ID = "" - - const val NO_VERSION = 1L - - private val XCONTENT_WITH_TYPE = ToXContent.MapParams(mapOf("with_type" to "true")) - - /** - * This function parses the job, delegating to the specific subtype parser registered in the [XContentParser.getXContentRegistry] - * at runtime. Each concrete job subclass is expected to register a parser in this registry. - * The Job's json representation is expected to be of the form: - * { "" : { } } - * - * If the job comes from an OpenSearch index it's [id] and [version] can also be supplied. - */ - @Throws(IOException::class) - fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): ScheduledJob { - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - val job = xcp.namedObject(ScheduledJob::class.java, xcp.currentName(), null) - ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) - return job.fromDocument(id, version) - } - - /** - * This function parses the job, but expects the type to be passed in. This is for the specific - * use case in sweeper where we first want to check if the job is allowed to be swept before - * trying to fully parse it. If you need to parse a job, you most likely want to use - * the above parse function. - */ - @Throws(IOException::class) - fun parse(xcp: XContentParser, type: String, id: String = NO_ID, version: Long = NO_VERSION): ScheduledJob { - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - val job = xcp.namedObject(ScheduledJob::class.java, type, null) - ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) - return job.fromDocument(id, version) - } - } - - /** The id of the job in the [SCHEDULED_JOBS_INDEX] or [NO_ID] if not persisted */ - val id: String - - /** The version of the job in the [SCHEDULED_JOBS_INDEX] or [NO_VERSION] if not persisted */ - val version: Long - - /** The name of the job */ - val name: String - - /** The type of the job */ - val type: String - - /** Controls whether the job will be scheduled or not */ - val enabled: Boolean - - /** The schedule for running the job */ - val schedule: Schedule - - /** The last time the job was updated */ - val lastUpdateTime: Instant - - /** The time the job was enabled */ - val enabledTime: Instant? - - /** Copy constructor for persisted jobs */ - fun fromDocument(id: String, version: Long): ScheduledJob -} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/model/SearchInput.kt b/core/src/main/kotlin/org/opensearch/alerting/core/model/SearchInput.kt deleted file mode 100644 index 6e2d075eb..000000000 --- a/core/src/main/kotlin/org/opensearch/alerting/core/model/SearchInput.kt +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import org.opensearch.common.CheckedFunction -import org.opensearch.common.ParseField -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParser.Token -import org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.search.builder.SearchSourceBuilder -import java.io.IOException - -data class SearchInput(val indices: List, val query: SearchSourceBuilder) : Input { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readStringList(), // indices - SearchSourceBuilder(sin) // query - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .startObject(SEARCH_FIELD) - .field(INDICES_FIELD, indices.toTypedArray()) - .field(QUERY_FIELD, query) - .endObject() - .endObject() - } - - override fun name(): String { - return SEARCH_FIELD - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeStringCollection(indices) - query.writeTo(out) - } - - companion object { - const val INDICES_FIELD = "indices" - const val QUERY_FIELD = "query" - const val SEARCH_FIELD = "search" - - val XCONTENT_REGISTRY = NamedXContentRegistry.Entry(Input::class.java, ParseField("search"), CheckedFunction { parseInner(it) }) - - @JvmStatic @Throws(IOException::class) - fun parseInner(xcp: XContentParser): SearchInput { - val indices = mutableListOf() - lateinit var searchSourceBuilder: SearchSourceBuilder - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - INDICES_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - indices.add(xcp.text()) - } - } - QUERY_FIELD -> { - searchSourceBuilder = SearchSourceBuilder.fromXContent(xcp, false) - } - } - } - - return SearchInput( - indices, - requireNotNull(searchSourceBuilder) { "SearchInput query is null" } - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): SearchInput { - return SearchInput(sin) - } - } -} diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt b/core/src/main/kotlin/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt index 9835cd4f1..c79c260b9 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt @@ -8,8 +8,9 @@ package org.opensearch.alerting.core.resthandler import org.opensearch.alerting.core.action.node.ScheduledJobsStatsAction import org.opensearch.alerting.core.action.node.ScheduledJobsStatsRequest import org.opensearch.client.node.NodeClient -import org.opensearch.common.Strings +import org.opensearch.core.common.Strings import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer import org.opensearch.rest.RestHandler import org.opensearch.rest.RestHandler.Route import org.opensearch.rest.RestRequest diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobScheduler.kt b/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobScheduler.kt index 07ab0dfdb..a4a729121 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobScheduler.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobScheduler.kt @@ -7,8 +7,8 @@ package org.opensearch.alerting.core.schedule import org.apache.logging.log4j.LogManager import org.opensearch.alerting.core.JobRunner -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.ScheduledJob import org.opensearch.threadpool.Scheduler import org.opensearch.threadpool.ThreadPool import java.time.Duration diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt b/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt index 3213597dd..dff1ecd52 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt @@ -5,12 +5,12 @@ package org.opensearch.alerting.core.schedule -import org.opensearch.common.io.stream.StreamInput -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.io.stream.Writeable -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.ToXContentFragment -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder import java.time.Instant class JobSchedulerMetrics : ToXContentFragment, Writeable { diff --git a/core/src/main/kotlin/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt b/core/src/main/kotlin/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt index 19c8501c4..6bdb18bec 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt @@ -5,7 +5,6 @@ package org.opensearch.alerting.core.settings -import org.opensearch.alerting.core.model.ScheduledJob import org.opensearch.common.settings.Setting /** diff --git a/core/src/main/kotlin/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt b/core/src/main/kotlin/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt index a49181292..3e87f207f 100644 --- a/core/src/main/kotlin/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt +++ b/core/src/main/kotlin/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt @@ -11,32 +11,26 @@ import kotlinx.coroutines.delay import kotlinx.coroutines.withContext import org.apache.logging.log4j.Logger import org.opensearch.OpenSearchException -import org.opensearch.action.ActionListener import org.opensearch.action.bulk.BackoffPolicy import org.opensearch.action.search.SearchResponse import org.opensearch.action.search.ShardSearchFailure import org.opensearch.client.OpenSearchClient -import org.opensearch.common.bytes.BytesReference import org.opensearch.common.settings.Settings import org.opensearch.common.util.concurrent.ThreadContext -import org.opensearch.common.util.concurrent.ThreadContext.StoredContext -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentParserUtils import org.opensearch.common.xcontent.XContentType import org.opensearch.commons.InjectSecurity import org.opensearch.commons.authuser.User import org.opensearch.commons.notifications.NotificationsPluginInterface +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.rest.RestStatus.BAD_GATEWAY +import org.opensearch.core.rest.RestStatus.GATEWAY_TIMEOUT +import org.opensearch.core.rest.RestStatus.SERVICE_UNAVAILABLE +import org.opensearch.core.xcontent.ToXContent import org.opensearch.index.query.BoolQueryBuilder import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestStatus -import org.opensearch.rest.RestStatus.BAD_GATEWAY -import org.opensearch.rest.RestStatus.GATEWAY_TIMEOUT -import org.opensearch.rest.RestStatus.SERVICE_UNAVAILABLE import org.opensearch.search.builder.SearchSourceBuilder -import java.time.Instant import kotlin.coroutines.CoroutineContext import kotlin.coroutines.resume import kotlin.coroutines.resumeWithException @@ -142,43 +136,12 @@ fun SearchResponse.firstFailureOrNull(): ShardSearchFailure? { return shardFailures?.getOrNull(0) } -fun XContentParser.instant(): Instant? { - return when { - currentToken() == XContentParser.Token.VALUE_NULL -> null - currentToken().isValue -> Instant.ofEpochMilli(longValue()) - else -> { - XContentParserUtils.throwUnknownToken(currentToken(), tokenLocation) - null // unreachable - } - } -} - -fun XContentBuilder.optionalTimeField(name: String, instant: Instant?): XContentBuilder { - if (instant == null) { - return nullField(name) - } - // second name as readableName should be different than first name - return this.timeField(name, "${name}_in_millis", instant.toEpochMilli()) -} - -fun XContentBuilder.optionalUserField(name: String, user: User?): XContentBuilder { - if (user == null) { - return nullField(name) - } - return this.field(name, user) -} - fun addFilter(user: User, searchSourceBuilder: SearchSourceBuilder, fieldName: String) { val filterBackendRoles = QueryBuilders.termsQuery(fieldName, user.backendRoles) val queryBuilder = searchSourceBuilder.query() as BoolQueryBuilder searchSourceBuilder.query(queryBuilder.filter(filterBackendRoles)) } -/** - * Extension function for ES 6.3 and above that duplicates the ES 6.2 XContentBuilder.string() method. - */ -fun XContentBuilder.string(): String = BytesReference.bytes(this).utf8ToString() - /** * Converts [OpenSearchClient] methods that take a callback into a kotlin suspending function. * @@ -207,28 +170,13 @@ suspend fun NotificationsPluginInterface.suspendUntil(block: NotificationsPl }) } -/** - * Store a [ThreadContext] and restore a [ThreadContext] when the coroutine resumes on a different thread. - * - * @param threadContext - a [ThreadContext] instance - */ -class ElasticThreadContextElement(private val threadContext: ThreadContext) : ThreadContextElement { - - companion object Key : CoroutineContext.Key - private var context: StoredContext = threadContext.newStoredContext(true) - - override val key: CoroutineContext.Key<*> - get() = Key - - override fun restoreThreadContext(context: CoroutineContext, oldState: Unit) { - this.context = threadContext.stashContext() - } - - override fun updateThreadContext(context: CoroutineContext) = this.context.close() -} - -class InjectorContextElement(id: String, settings: Settings, threadContext: ThreadContext, private val roles: List?) : - ThreadContextElement { +class InjectorContextElement( + id: String, + settings: Settings, + threadContext: ThreadContext, + private val roles: List?, + private val user: User? = null +) : ThreadContextElement { companion object Key : CoroutineContext.Key override val key: CoroutineContext.Key<*> @@ -238,6 +186,8 @@ class InjectorContextElement(id: String, settings: Settings, threadContext: Thre override fun updateThreadContext(context: CoroutineContext) { rolesInjectorHelper.injectRoles(roles) + // This is from where plugins extract backend roles. It should be passed when calling APIs of other plugins + rolesInjectorHelper.injectUserInfo(user) } override fun restoreThreadContext(context: CoroutineContext, oldState: Unit) { diff --git a/core/src/main/resources/mappings/scheduled-jobs.json b/core/src/main/resources/mappings/scheduled-jobs.json index 9771686ff..2651c862e 100644 --- a/core/src/main/resources/mappings/scheduled-jobs.json +++ b/core/src/main/resources/mappings/scheduled-jobs.json @@ -1,6 +1,6 @@ { "_meta" : { - "schema_version": 5 + "schema_version": 8 }, "properties": { "monitor": { @@ -18,6 +18,15 @@ } } }, + "owner": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, "monitor_type": { "type": "keyword" }, @@ -118,6 +127,46 @@ } } }, + "data_sources": { + "properties": { + "alerts_index": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "findings_index": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "query_index": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "query_index_mapping": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, "group_by_fields": { "type": "text", "fields": { @@ -250,6 +299,154 @@ } } }, + "workflow": { + "dynamic": "false", + "properties": { + "schema_version": { + "type": "integer" + }, + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "owner": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "workflow_type": { + "type": "keyword" + }, + "user": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "backend_roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "custom_attribute_names": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "type": { + "type": "keyword" + }, + "enabled": { + "type": "boolean" + }, + "audit_delegate_monitor_alerts": { + "type": "boolean" + }, + "enabled_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "last_update_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "schedule": { + "properties": { + "period": { + "properties": { + "interval": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + }, + "cron": { + "properties": { + "expression": { + "type": "text" + }, + "timezone": { + "type": "keyword" + } + } + } + } + }, + "inputs": { + "type": "nested", + "properties": { + "composite_input": { + "type": "nested", + "properties": { + "sequence": { + "properties": { + "delegates": { + "type": "nested", + "properties": { + "order": { + "type": "integer" + }, + "monitor_id": { + "type": "keyword" + }, + "chained_monitor_findings": { + "properties": { + "monitor_id": { + "type": "keyword" + } + } + } + } + } + } + } + } + } + } + }, + "group_by_fields": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, "destination": { "dynamic": "false", "properties": { @@ -463,6 +660,33 @@ "last_run_context": { "type": "object", "enabled": false + }, + "source_to_query_index_mapping": { + "type": "object", + "enabled": false + } + } + }, + "workflow_metadata" : { + "properties": { + "workflow_id": { + "type": "keyword" + }, + "monitor_ids": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 1000 + } + } + }, + "latest_run_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "latest_execution_id": { + "type": "keyword" } } } diff --git a/core/src/main/resources/settings/doc-level-queries.json b/core/src/main/resources/settings/doc-level-queries.json new file mode 100644 index 000000000..c5cbfa445 --- /dev/null +++ b/core/src/main/resources/settings/doc-level-queries.json @@ -0,0 +1,10 @@ +{ + "index": { + "mapping": { + "total_fields": { + "limit": 10000 + } + }, + "hidden": true + } +} \ No newline at end of file diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/WriteableTests.kt b/core/src/test/kotlin/org/opensearch/alerting/core/WriteableTests.kt index 32fd3d510..f48ffa370 100644 --- a/core/src/test/kotlin/org/opensearch/alerting/core/WriteableTests.kt +++ b/core/src/test/kotlin/org/opensearch/alerting/core/WriteableTests.kt @@ -9,7 +9,7 @@ import org.joda.time.DateTime import org.junit.Test import org.opensearch.alerting.core.schedule.JobSchedulerMetrics import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamInput import org.opensearch.test.OpenSearchTestCase.assertEquals class WriteableTests { diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/XContentTests.kt b/core/src/test/kotlin/org/opensearch/alerting/core/XContentTests.kt deleted file mode 100644 index 610125469..000000000 --- a/core/src/test/kotlin/org/opensearch/alerting/core/XContentTests.kt +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core - -import org.opensearch.alerting.core.model.Input -import org.opensearch.alerting.core.model.SearchInput -import org.opensearch.alerting.core.model.XContentTestBase -import org.opensearch.alerting.opensearchapi.string -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import kotlin.test.Test -import kotlin.test.assertEquals - -class XContentTests : XContentTestBase { - - @Test - fun `test input parsing`() { - val input = randomInput() - - val inputString = input.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedInput = Input.parse(parser(inputString)) - - assertEquals(input, parsedInput, "Round tripping input doesn't work") - } - - private fun randomInput(): Input { - return SearchInput( - indices = listOf("foo", "bar"), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - } -} diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/model/ClusterMetricsInputTests.kt b/core/src/test/kotlin/org/opensearch/alerting/core/model/ClusterMetricsInputTests.kt deleted file mode 100644 index 50fa27ee1..000000000 --- a/core/src/test/kotlin/org/opensearch/alerting/core/model/ClusterMetricsInputTests.kt +++ /dev/null @@ -1,448 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import kotlin.test.Test -import kotlin.test.assertEquals -import kotlin.test.assertFailsWith - -class ClusterMetricsInputTests { - private var path = "/_cluster/health" - private var pathParams = "" - private var url = "" - - @Test - fun `test valid ClusterMetricsInput creation using HTTP URI component fields`() { - // GIVEN - val testUrl = "http://localhost:9200/_cluster/health" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(path, clusterMetricsInput.path) - assertEquals(pathParams, clusterMetricsInput.pathParams) - assertEquals(testUrl, clusterMetricsInput.url) - } - - @Test - fun `test valid ClusterMetricsInput creation using HTTP url field`() { - // GIVEN - path = "" - url = "http://localhost:9200/_cluster/health" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(url, clusterMetricsInput.url) - } - - @Test - fun `test valid ClusterMetricsInput creation using HTTPS url field`() { - // GIVEN - path = "" - url = "https://localhost:9200/_cluster/health" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(url, clusterMetricsInput.url) - } - - @Test - fun `test invalid path`() { - // GIVEN - path = "///" - - // WHEN + THEN - assertFailsWith("Invalid URL.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test invalid url`() { - // GIVEN - url = "///" - - // WHEN + THEN - assertFailsWith("Invalid URL.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test url field and URI component fields create equal URI`() { - // GIVEN - url = "http://localhost:9200/_cluster/health" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(path, clusterMetricsInput.path) - assertEquals(pathParams, clusterMetricsInput.pathParams) - assertEquals(url, clusterMetricsInput.url) - assertEquals(url, clusterMetricsInput.constructedUri.toString()) - } - - @Test - fun `test url field and URI component fields with path params create equal URI`() { - // GIVEN - path = "/_cluster/health/" - pathParams = "index1,index2,index3,index4,index5" - url = "http://localhost:9200/_cluster/health/index1,index2,index3,index4,index5" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(path, clusterMetricsInput.path) - assertEquals(pathParams, clusterMetricsInput.pathParams) - assertEquals(url, clusterMetricsInput.url) - assertEquals(url, clusterMetricsInput.constructedUri.toString()) - } - - @Test - fun `test url field and URI component fields create different URI`() { - // GIVEN - url = "http://localhost:9200/_cluster/stats" - - // WHEN + THEN - assertFailsWith("The provided URL and URI fields form different URLs.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test url field and URI component fields with path params create different URI`() { - // GIVEN - pathParams = "index1,index2,index3,index4,index5" - url = "http://localhost:9200/_cluster/stats/index1,index2,index3,index4,index5" - - // WHEN + THEN - assertFailsWith("The provided URL and URI fields form different URLs.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test ClusterMetricsInput creation when all inputs are empty`() { - // GIVEN - path = "" - pathParams = "" - url = "" - - // WHEN + THEN - assertFailsWith("The uri.api_type field, uri.path field, or uri.uri field must be defined.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test ClusterMetricsInput creation when all inputs but path params are empty`() { - // GIVEN - path = "" - pathParams = "index1,index2,index3,index4,index5" - url = "" - - // WHEN + THEN - assertFailsWith("The uri.api_type field, uri.path field, or uri.uri field must be defined.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test invalid scheme in url field`() { - // GIVEN - path = "" - url = "invalidScheme://localhost:9200/_cluster/health" - - // WHEN + THEN - assertFailsWith("Invalid URL.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test invalid host in url field`() { - // GIVEN - path = "" - url = "http://127.0.0.1:9200/_cluster/health" - - // WHEN + THEN - assertFailsWith("Only host '${ClusterMetricsInput.SUPPORTED_HOST}' is supported.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test invalid port in url field`() { - // GIVEN - path = "" - url = "http://localhost:${ClusterMetricsInput.SUPPORTED_PORT + 1}/_cluster/health" - - // WHEN + THEN - assertFailsWith("Only port '${ClusterMetricsInput.SUPPORTED_PORT}' is supported.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test parsePathParams with no path params`() { - // GIVEN - val testUrl = "http://localhost:9200/_cluster/health" - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // WHEN - val params = clusterMetricsInput.parsePathParams() - - // THEN - assertEquals(pathParams, params) - assertEquals(testUrl, clusterMetricsInput.constructedUri.toString()) - } - - @Test - fun `test parsePathParams with path params as URI field`() { - // GIVEN - path = "/_cluster/health/" - pathParams = "index1,index2,index3,index4,index5" - val testUrl = "http://localhost:9200/_cluster/health/index1,index2,index3,index4,index5" - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // WHEN - val params = clusterMetricsInput.parsePathParams() - - // THEN - assertEquals(pathParams, params) - assertEquals(testUrl, clusterMetricsInput.constructedUri.toString()) - } - - @Test - fun `test parsePathParams with path params in url`() { - // GIVEN - path = "" - val testParams = "index1,index2,index3,index4,index5" - url = "http://localhost:9200/_cluster/health/index1,index2,index3,index4,index5" - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // WHEN - val params = clusterMetricsInput.parsePathParams() - - // THEN - assertEquals(testParams, params) - assertEquals(url, clusterMetricsInput.constructedUri.toString()) - } - - @Test - fun `test parsePathParams with no path params for ApiType that requires path params`() { - // GIVEN - path = "/_cat/snapshots" - - // WHEN + THEN - assertFailsWith("The API requires path parameters.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test parsePathParams with path params for ApiType that doesn't support path params`() { - // GIVEN - path = "/_cluster/settings" - pathParams = "index1,index2,index3,index4,index5" - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // WHEN + THEN - assertFailsWith("The API does not use path parameters.") { - clusterMetricsInput.parsePathParams() - } - } - - @Test - fun `test parsePathParams with path params containing illegal characters`() { - var testCount = 0 // Start off with count of 1 to account for ApiType.BLANK - ILLEGAL_PATH_PARAMETER_CHARACTERS.forEach { character -> - // GIVEN - pathParams = "index1,index2,$character,index4,index5" - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // WHEN + THEN - assertFailsWith( - "The provided path parameters contain invalid characters or spaces. Please omit: " + - "${ILLEGAL_PATH_PARAMETER_CHARACTERS.joinToString(" ")}" - ) { - clusterMetricsInput.parsePathParams() - } - testCount++ - } - assertEquals(ILLEGAL_PATH_PARAMETER_CHARACTERS.size, testCount) - } - - @Test - fun `test ClusterMetricsInput correctly determines ApiType when path is provided as URI component`() { - var testCount = 1 // Start off with count of 1 to account for ApiType.BLANK - ClusterMetricsInput.ClusterMetricType.values() - .filter { enum -> enum != ClusterMetricsInput.ClusterMetricType.BLANK } - .forEach { testApiType -> - // GIVEN - path = testApiType.defaultPath - pathParams = if (testApiType.supportsPathParams) "index1,index2,index3,index4,index5" else "" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(testApiType, clusterMetricsInput.clusterMetricType) - testCount++ - } - assertEquals(ClusterMetricsInput.ClusterMetricType.values().size, testCount) - } - - @Test - fun `test ClusterMetricsInput correctly determines ApiType when path and path params are provided as URI components`() { - var testCount = 1 // Start off with count of 1 to account for ApiType.BLANK - ClusterMetricsInput.ClusterMetricType.values() - .filter { enum -> enum != ClusterMetricsInput.ClusterMetricType.BLANK } - .forEach { testApiType -> - // GIVEN - path = testApiType.defaultPath - pathParams = "index1,index2,index3,index4,index5" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(testApiType, clusterMetricsInput.clusterMetricType) - testCount++ - } - assertEquals(ClusterMetricsInput.ClusterMetricType.values().size, testCount) - } - - @Test - fun `test ClusterMetricsInput correctly determines ApiType when path is provided in URL field`() { - var testCount = 1 // Start off with count of 1 to account for ApiType.BLANK - ClusterMetricsInput.ClusterMetricType.values() - .filter { enum -> enum != ClusterMetricsInput.ClusterMetricType.BLANK } - .forEach { testApiType -> - // GIVEN - path = "" - pathParams = if (testApiType.supportsPathParams) "index1,index2,index3,index4,index5" else "" - url = "http://localhost:9200${testApiType.defaultPath}" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(testApiType, clusterMetricsInput.clusterMetricType) - testCount++ - } - assertEquals(ClusterMetricsInput.ClusterMetricType.values().size, testCount) - } - - @Test - fun `test ClusterMetricsInput correctly determines ApiType when path and path params are provided in URL field`() { - var testCount = 1 // Start off with count of 1 to account for ApiType.BLANK - ClusterMetricsInput.ClusterMetricType.values() - .filter { enum -> enum != ClusterMetricsInput.ClusterMetricType.BLANK } - .forEach { testApiType -> - // GIVEN - path = "" - pathParams = if (testApiType.supportsPathParams) "/index1,index2,index3,index4,index5" else "" - url = "http://localhost:9200${testApiType.defaultPath}$pathParams" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(testApiType, clusterMetricsInput.clusterMetricType) - testCount++ - } - assertEquals(ClusterMetricsInput.ClusterMetricType.values().size, testCount) - } - - @Test - fun `test ClusterMetricsInput cannot determine ApiType when invalid path is provided as URI component`() { - // GIVEN - path = "/_cat/paws" - - // WHEN + THEN - assertFailsWith("The API could not be determined from the provided URI.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test ClusterMetricsInput cannot determine ApiType when invalid path and path params are provided as URI components`() { - // GIVEN - path = "/_cat/paws" - pathParams = "index1,index2,index3,index4,index5" - - // WHEN + THEN - assertFailsWith("The API could not be determined from the provided URI.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test ClusterMetricsInput cannot determine ApiType when invaid path is provided in URL`() { - // GIVEN - path = "" - url = "http://localhost:9200/_cat/paws" - - // WHEN + THEN - assertFailsWith("The API could not be determined from the provided URI.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test ClusterMetricsInput cannot determine ApiType when invaid path and path params are provided in URL`() { - // GIVEN - path = "" - url = "http://localhost:9200/_cat/paws/index1,index2,index3,index4,index5" - - // WHEN + THEN - assertFailsWith("The API could not be determined from the provided URI.") { - ClusterMetricsInput(path, pathParams, url) - } - } - - @Test - fun `test parseEmptyFields populates empty path and path_params when url is provided`() { - // GIVEN - path = "" - pathParams = "" - val testPath = "/_cluster/health" - val testPathParams = "index1,index2,index3,index4,index5" - url = "http://localhost:9200$testPath$testPathParams" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(testPath, clusterMetricsInput.path) - assertEquals(testPathParams, clusterMetricsInput.pathParams) - assertEquals(url, clusterMetricsInput.url) - } - - @Test - fun `test parseEmptyFields populates empty url field when path and path_params are provided`() { - // GIVEN - path = "/_cluster/health/" - pathParams = "index1,index2,index3,index4,index5" - val testUrl = "http://localhost:9200$path$pathParams" - - // WHEN - val clusterMetricsInput = ClusterMetricsInput(path, pathParams, url) - - // THEN - assertEquals(path, clusterMetricsInput.path) - assertEquals(pathParams, clusterMetricsInput.pathParams) - assertEquals(testUrl, clusterMetricsInput.url) - } -} diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/model/MockScheduledJob.kt b/core/src/test/kotlin/org/opensearch/alerting/core/model/MockScheduledJob.kt index 26a2c18d2..08e3fb8c4 100644 --- a/core/src/test/kotlin/org/opensearch/alerting/core/model/MockScheduledJob.kt +++ b/core/src/test/kotlin/org/opensearch/alerting/core/model/MockScheduledJob.kt @@ -1,13 +1,10 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - package org.opensearch.alerting.core.model -import org.opensearch.common.io.stream.StreamOutput -import org.opensearch.common.xcontent.ToXContent -import org.opensearch.common.xcontent.XContentBuilder +import org.opensearch.commons.alerting.model.Schedule +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder import java.io.IOException import java.time.Instant diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/model/ScheduleTest.kt b/core/src/test/kotlin/org/opensearch/alerting/core/model/ScheduleTest.kt deleted file mode 100644 index 604178d31..000000000 --- a/core/src/test/kotlin/org/opensearch/alerting/core/model/ScheduleTest.kt +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import org.opensearch.alerting.opensearchapi.string -import org.opensearch.common.xcontent.ToXContent -import java.time.Clock -import java.time.Instant -import java.time.ZoneId -import java.time.ZonedDateTime -import java.time.temporal.ChronoUnit -import kotlin.test.Test -import kotlin.test.assertEquals -import kotlin.test.assertFailsWith -import kotlin.test.assertFalse -import kotlin.test.assertNotNull -import kotlin.test.assertTrue - -class ScheduleTest : XContentTestBase { - @Test - fun `test time zone conversion`() { - val cronExpression = "31 * * * *" // Run at minute 31. - // This is 2018-09-27 20:00:58 GMT which will in conversion lead to 30min 58 seconds IST - val testInstance = Instant.ofEpochSecond(1538164858L) - - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Kolkata"), testInstance) - val nextTimeToExecute = cronSchedule.nextTimeToExecute(Instant.now()) - assertNotNull(nextTimeToExecute, "There should be next execute time.") - assertEquals(2L, nextTimeToExecute.seconds, "Execute time should be 2 seconds") - } - - @Test - fun `test time zone`() { - val cronExpression = "0 11 * * 3" // Run at 11:00 on Wednesday. - // This is 2018-09-26 01:59:58 GMT which will in conversion lead to Wednesday 10:59:58 JST - val testInstance = Instant.ofEpochSecond(1537927198L) - - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Tokyo"), testInstance) - val nextTimeToExecute = cronSchedule.nextTimeToExecute(Instant.now()) - assertNotNull(nextTimeToExecute, "There should be next execute time.") - assertEquals(2L, nextTimeToExecute.seconds, "Execute time should be 2 seconds") - } - - @Test - fun `test cron calculates next time to execute after restart`() { - val cronExpression = "* * * * *" - // This is 2018-09-26 01:59:58 GMT - val testInstance = Instant.ofEpochSecond(1537927198L) - // This enabled time represents GMT: Wednesday, September 19, 2018 3:19:51 AM - val enabledTimeInstance = Instant.ofEpochSecond(1537327191) - - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("America/Los_Angeles"), testInstance) - // The nextTimeToExecute should be the minute after the test instance, not enabledTimeInstance, replicating a cluster restart - val nextTimeToExecute = cronSchedule.getExpectedNextExecutionTime(enabledTimeInstance, null) - assertNotNull(nextTimeToExecute, "There should be next execute time") - assertEquals( - testInstance.plusSeconds(2L), nextTimeToExecute, - "nextTimeToExecute should be 2 seconds after test instance" - ) - } - - @Test - fun `test cron calculates next time to execute using cached previous time`() { - val cronExpression = "* * * * *" - // This is 2018-09-26 01:59:58 GMT - val previousExecutionTimeInstance = Instant.ofEpochSecond(1537927198L) - // This enabled time represents GMT: Wednesday, September 19, 2018 3:19:51 AM - val enabledTimeInstance = Instant.ofEpochSecond(1537327191) - - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("America/Los_Angeles")) - // The nextTimeToExecute should be the minute after the previous execution time instance, not enabledTimeInstance - val nextTimeToExecute = cronSchedule.getExpectedNextExecutionTime(enabledTimeInstance, previousExecutionTimeInstance) - assertNotNull(nextTimeToExecute, "There should be next execute time") - assertEquals( - previousExecutionTimeInstance.plusSeconds(2L), nextTimeToExecute, - "nextTimeToExecute should be 2 seconds after test instance" - ) - } - - @Test - fun `test interval calculates next time to execute using enabled time`() { - // This enabled time represents 2018-09-26 01:59:58 GMT - val enabledTimeInstance = Instant.ofEpochSecond(1537927138L) - // This is 2018-09-26 01:59:59 GMT, which is 61 seconds after enabledTime - val testInstance = Instant.ofEpochSecond(1537927199L) - - val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) - - // The nextTimeToExecute should be 120 seconds after the enabled time - val nextTimeToExecute = intervalSchedule.getExpectedNextExecutionTime(enabledTimeInstance, null) - assertNotNull(nextTimeToExecute, "There should be next execute time") - assertEquals( - enabledTimeInstance.plusSeconds(120L), nextTimeToExecute, - "nextTimeToExecute should be 120 seconds seconds after enabled time" - ) - } - - @Test - fun `test interval calculates next time to execute using cached previous time`() { - // This is 2018-09-26 01:59:58 GMT - val previousExecutionTimeInstance = Instant.ofEpochSecond(1537927198L) - // This is 2018-09-26 02:00:00 GMT - val testInstance = Instant.ofEpochSecond(1537927200L) - // This enabled time represents 2018-09-26 01:58:58 GMT - val enabledTimeInstance = Instant.ofEpochSecond(1537927138L) - - val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) - - // The nextTimeToExecute should be the minute after the previous execution time instance - val nextTimeToExecute = intervalSchedule.getExpectedNextExecutionTime(enabledTimeInstance, previousExecutionTimeInstance) - assertNotNull(nextTimeToExecute, "There should be next execute time") - assertEquals( - previousExecutionTimeInstance.plusSeconds(60L), nextTimeToExecute, - "nextTimeToExecute should be 60 seconds after previous execution time" - ) - } - - @Test - fun `test cron schedule round trip`() { - val cronExpression = "0 * * * *" - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("Asia/Tokyo")) - - val scheduleString = cronSchedule.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedSchedule = Schedule.parse(parser(scheduleString)) - - assertTrue(parsedSchedule is CronSchedule, "Parsed scheduled is not Cron Scheduled Type.") - assertEquals(cronSchedule, parsedSchedule, "Round tripping Cron Schedule doesn't work") - } - - @Test - fun `test interval schedule round trip`() { - val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES) - - val scheduleString = intervalSchedule.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedSchedule = Schedule.parse(parser(scheduleString)) - assertTrue(parsedSchedule is IntervalSchedule, "Parsed scheduled is not Interval Scheduled Type.") - assertEquals(intervalSchedule, parsedSchedule, "Round tripping Interval Schedule doesn't work") - } - - @Test - fun `test cron invalid missing timezone`() { - val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\"}}" - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - Schedule.parse(parser(scheduleString)) - } - } - - @Test - fun `test cron invalid timezone rule`() { - val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\",\"timezone\":\"Going/Nowhere\"}}" - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - Schedule.parse(parser(scheduleString)) - } - } - - @Test - fun `test cron invalid timezone offset`() { - val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\",\"timezone\":\"+++9\"}}" - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - Schedule.parse(parser(scheduleString)) - } - } - - @Test - fun `test invalid type`() { - val scheduleString = "{\"foobarzzz\":{\"expression\":\"0 * * * *\",\"timezone\":\"+++9\"}}" - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - Schedule.parse(parser(scheduleString)) - } - } - - @Test - fun `test two types`() { - val scheduleString = "{\"cron\":{\"expression\":\"0 * * * *\",\"timezone\":\"Asia/Tokyo\"}, " + - "\"period\":{\"interval\":\"1\",\"unit\":\"Minutes\"}}" - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - Schedule.parse(parser(scheduleString)) - } - } - - @Test - fun `test invalid cron expression`() { - val scheduleString = "{\"cron\":{\"expression\":\"5 * 1 * * *\",\"timezone\":\"Asia/Tokyo\"}}" - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - Schedule.parse(parser(scheduleString)) - } - } - - @Test - fun `test interval period starting at`() { - val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES) - - val (periodStartTime, periodEndTime) = intervalSchedule.getPeriodStartingAt(null) - - assertEquals(periodStartTime, periodEndTime.minus(1, ChronoUnit.MINUTES), "Period didn't match interval") - - val startTime = Instant.now() - // Kotlin has destructuring declarations but no destructuring assignments? Gee, thanks... - val (periodStartTime2, _) = intervalSchedule.getPeriodStartingAt(startTime) - assertEquals(startTime, periodStartTime2, "Periods doesn't start at provided start time") - } - - @Test - fun `test interval period ending at`() { - val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES) - - val (periodStartTime, periodEndTime) = intervalSchedule.getPeriodEndingAt(null) - - assertEquals(periodStartTime, periodEndTime.minus(1, ChronoUnit.MINUTES), "Period didn't match interval") - - val endTime = Instant.now() - // destructuring declarations but no destructuring assignments? Gee, thanks... https://youtrack.jetbrains.com/issue/KT-11362 - val (_, periodEndTime2) = intervalSchedule.getPeriodEndingAt(endTime) - assertEquals(endTime, periodEndTime2, "Periods doesn't end at provided end time") - } - - @Test - fun `test cron period starting at`() { - val cronSchedule = CronSchedule("0 * * * *", ZoneId.of("Asia/Tokyo")) - - val (startTime1, endTime) = cronSchedule.getPeriodStartingAt(null) - assertTrue(startTime1 <= Instant.now(), "startTime is in future; should be the last execution time") - assertTrue(cronSchedule.executionTime.isMatch(ZonedDateTime.ofInstant(endTime, ZoneId.of("Asia/Tokyo")))) - - val (startTime, _) = cronSchedule.getPeriodStartingAt(endTime) - assertEquals(startTime, endTime, "Subsequent period doesn't start at provided end time") - } - - @Test - fun `test cron period ending at`() { - val cronSchedule = CronSchedule("0 * * * *", ZoneId.of("Asia/Tokyo")) - - val (startTime, endTime1) = cronSchedule.getPeriodEndingAt(null) - assertTrue(endTime1 >= Instant.now(), "endTime is in past; should be the next execution time") - assertTrue(cronSchedule.executionTime.isMatch(ZonedDateTime.ofInstant(startTime, ZoneId.of("Asia/Tokyo")))) - - val (_, endTime2) = cronSchedule.getPeriodEndingAt(startTime) - assertEquals(endTime2, startTime, "Previous period doesn't end at provided start time") - } - - @Test - fun `cron job not running on time`() { - val cronSchedule = createTestCronSchedule() - - val lastExecutionTime = 1539715560L - assertFalse(cronSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) - } - - @Test - fun `cron job running on time`() { - val cronSchedule = createTestCronSchedule() - - val lastExecutionTime = 1539715620L - assertTrue(cronSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) - } - - @Test - fun `period job running exactly at interval`() { - val testInstance = Instant.ofEpochSecond(1539715678L) - val enabledTime = Instant.ofEpochSecond(1539615178L) - val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) - - val nextTimeToExecute = intervalSchedule.nextTimeToExecute(enabledTime) - assertNotNull(nextTimeToExecute, "There should be next execute time.") - assertEquals(60L, nextTimeToExecute.seconds, "Excepted 60 seconds but was ${nextTimeToExecute.seconds}") - } - - @Test - fun `period job 3 minutes`() { - val testInstance = Instant.ofEpochSecond(1539615226L) - val enabledTime = Instant.ofEpochSecond(1539615144L) - val intervalSchedule = IntervalSchedule(3, ChronoUnit.MINUTES, testInstance) - - val nextTimeToExecute = intervalSchedule.nextTimeToExecute(enabledTime) - assertNotNull(nextTimeToExecute, "There should be next execute time.") - assertEquals(98L, nextTimeToExecute.seconds, "Excepted 98 seconds but was ${nextTimeToExecute.seconds}") - } - - @Test - fun `period job running on time`() { - val intervalSchedule = createTestIntervalSchedule() - - val lastExecutionTime = 1539715620L - assertTrue(intervalSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) - } - - @Test - fun `period job not running on time`() { - val intervalSchedule = createTestIntervalSchedule() - - val lastExecutionTime = 1539715560L - assertFalse(intervalSchedule.runningOnTime(Instant.ofEpochSecond(lastExecutionTime))) - } - - @Test - fun `period job test null last execution time`() { - val intervalSchedule = createTestIntervalSchedule() - - assertTrue(intervalSchedule.runningOnTime(null)) - } - - @Test - fun `execution time matches across different time zones`() { - val now = Instant.now() - val pdtClock = Clock.fixed(now, ZoneId.of("America/Los_Angeles")) - val utcClock = Clock.fixed(now, ZoneId.of("UTC")) - val pdtClockCronSchedule = CronSchedule("* * * * *", ZoneId.of("America/Los_Angeles")) - val utcClockCronSchedule = CronSchedule("* * * * *", ZoneId.of("UTC")) - val pdtNextExecution = pdtClockCronSchedule.getExpectedNextExecutionTime(pdtClock.instant(), null) - val utcNextExecution = utcClockCronSchedule.getExpectedNextExecutionTime(utcClock.instant(), null) - assertEquals(pdtNextExecution, utcNextExecution) - } - - private fun createTestIntervalSchedule(): IntervalSchedule { - val testInstance = Instant.ofEpochSecond(1539715678L) - val enabledTime = Instant.ofEpochSecond(1539615146L) - val intervalSchedule = IntervalSchedule(1, ChronoUnit.MINUTES, testInstance) - - val nextTimeToExecute = intervalSchedule.nextTimeToExecute(enabledTime) - assertNotNull(nextTimeToExecute, "There should be next execute time.") - assertEquals(28L, nextTimeToExecute.seconds, "Excepted 28 seconds but was ${nextTimeToExecute.seconds}") - - return intervalSchedule - } - - private fun createTestCronSchedule(): CronSchedule { - val cronExpression = "* * * * *" - val testInstance = Instant.ofEpochSecond(1539715678L) - - val cronSchedule = CronSchedule(cronExpression, ZoneId.of("UTC"), testInstance) - val nextTimeToExecute = cronSchedule.nextTimeToExecute(Instant.now()) - assertNotNull(nextTimeToExecute, "There should be next execute time.") - assertEquals(2L, nextTimeToExecute.seconds, "Execute time should be 2 seconds") - - return cronSchedule - } - - @Test - fun `test invalid interval units`() { - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - IntervalSchedule(1, ChronoUnit.SECONDS) - } - - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - IntervalSchedule(1, ChronoUnit.MONTHS) - } - - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - IntervalSchedule(-1, ChronoUnit.MINUTES) - } - } -} diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/model/XContentTestBase.kt b/core/src/test/kotlin/org/opensearch/alerting/core/model/XContentTestBase.kt deleted file mode 100644 index 4a4140954..000000000 --- a/core/src/test/kotlin/org/opensearch/alerting/core/model/XContentTestBase.kt +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.model - -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.NamedXContentRegistry -import org.opensearch.common.xcontent.XContentBuilder -import org.opensearch.common.xcontent.XContentParser -import org.opensearch.common.xcontent.XContentType -import org.opensearch.search.SearchModule - -interface XContentTestBase { - fun builder(): XContentBuilder { - return XContentBuilder.builder(XContentType.JSON.xContent()) - } - - fun parser(xc: String): XContentParser { - val parser = XContentType.JSON.xContent().createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, xc) - parser.nextToken() - return parser - } - - fun xContentRegistry(): NamedXContentRegistry { - return NamedXContentRegistry( - listOf(SearchInput.XCONTENT_REGISTRY) + - SearchModule(Settings.EMPTY, emptyList()).namedXContents - ) - } -} diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt b/core/src/test/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt index 4f673fbd4..a0453e935 100644 --- a/core/src/test/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt +++ b/core/src/test/kotlin/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt @@ -6,10 +6,10 @@ package org.opensearch.alerting.core.schedule import org.junit.Before -import org.opensearch.alerting.core.model.CronSchedule -import org.opensearch.alerting.core.model.IntervalSchedule import org.opensearch.alerting.core.model.MockScheduledJob import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.CronSchedule +import org.opensearch.commons.alerting.model.IntervalSchedule import org.opensearch.threadpool.ThreadPool import java.time.Instant import java.time.ZoneId diff --git a/core/src/test/kotlin/org/opensearch/alerting/core/schedule/MockJobRunner.kt b/core/src/test/kotlin/org/opensearch/alerting/core/schedule/MockJobRunner.kt index 6d7ff68da..15fe770b9 100644 --- a/core/src/test/kotlin/org/opensearch/alerting/core/schedule/MockJobRunner.kt +++ b/core/src/test/kotlin/org/opensearch/alerting/core/schedule/MockJobRunner.kt @@ -6,7 +6,7 @@ package org.opensearch.alerting.core.schedule import org.opensearch.alerting.core.JobRunner -import org.opensearch.alerting.core.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob import java.time.Instant class MockJobRunner : JobRunner { diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index 41d9927a4..d64cd4917 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index aa991fcea..3499ded5c 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -1,5 +1,6 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.4.2-bin.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip +networkTimeout=10000 zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew index 1b6c78733..1aa94a426 100755 --- a/gradlew +++ b/gradlew @@ -55,7 +55,7 @@ # Darwin, MinGW, and NonStop. # # (3) This script is generated from the Groovy template -# https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt # within the Gradle project. # # You can find Gradle at https://github.com/gradle/gradle/. @@ -80,13 +80,11 @@ do esac done -APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit - -APP_NAME="Gradle" +# This is normally unused +# shellcheck disable=SC2034 APP_BASE_NAME=${0##*/} - -# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. -DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit # Use the maximum available, or set MAX_FD != -1 to use that value. MAX_FD=maximum @@ -133,22 +131,29 @@ location of your Java installation." fi else JAVACMD=java - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." + fi fi # Increase the maximum file descriptors if we can. if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then case $MAX_FD in #( max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 MAX_FD=$( ulimit -H -n ) || warn "Could not query maximum file descriptor limit" esac case $MAX_FD in #( '' | soft) :;; #( *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 ulimit -n "$MAX_FD" || warn "Could not set maximum file descriptor limit to $MAX_FD" esac @@ -193,11 +198,15 @@ if "$cygwin" || "$msys" ; then done fi -# Collect all arguments for the java command; -# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of -# shell script including quotes and variable substitutions, so put them in -# double quotes to make sure that they get re-expanded; and -# * put everything else in single quotes, so that it's not re-expanded. + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. set -- \ "-Dorg.gradle.appname=$APP_BASE_NAME" \ @@ -205,6 +214,12 @@ set -- \ org.gradle.wrapper.GradleWrapperMain \ "$@" +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + # Use "xargs" to parse quoted args. # # With -n1 it outputs one arg per line, with the quotes and backslashes removed. diff --git a/gradlew.bat b/gradlew.bat index ac1b06f93..6689b85be 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -14,7 +14,7 @@ @rem limitations under the License. @rem -@if "%DEBUG%" == "" @echo off +@if "%DEBUG%"=="" @echo off @rem ########################################################################## @rem @rem Gradle startup script for Windows @@ -25,7 +25,8 @@ if "%OS%"=="Windows_NT" setlocal set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused set APP_BASE_NAME=%~n0 set APP_HOME=%DIRNAME% @@ -40,7 +41,7 @@ if defined JAVA_HOME goto findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute +if %ERRORLEVEL% equ 0 goto execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -75,13 +76,15 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if %ERRORLEVEL% equ 0 goto mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of rem the _cmd.exe /c_ return code! -if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 -exit /b 1 +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% :mainEnd if "%OS%"=="Windows_NT" endlocal diff --git a/release-notes/opensearch-alerting.release-notes-2.1.0.0.md b/release-notes/opensearch-alerting.release-notes-2.1.0.0.md new file mode 100644 index 000000000..dae6e4800 --- /dev/null +++ b/release-notes/opensearch-alerting.release-notes-2.1.0.0.md @@ -0,0 +1,9 @@ +## Version 2.1.0.0 2022-07-06 + +Compatible with OpenSearch 2.1.0 + +### Maintenance +* Bumped version to 2.1.0, and gradle to 7.4.2. ([#475](https://github.com/opensearch-project/alerting/pull/475])) + +### Documentation +* Added 2.1 release notes. ([#485](https://github.com/opensearch-project/alerting/pull/485)) \ No newline at end of file diff --git a/release-notes/opensearch-alerting.release-notes-2.10.0.0.md b/release-notes/opensearch-alerting.release-notes-2.10.0.0.md new file mode 100644 index 000000000..6f1d47d76 --- /dev/null +++ b/release-notes/opensearch-alerting.release-notes-2.10.0.0.md @@ -0,0 +1,25 @@ +## Version 2.10.0.0 2023-09-06 +Compatible with OpenSearch 2.10.0 + +### Maintenance +* Increment version to 2.10.0-SNAPSHOT. ([#1018](https://github.com/opensearch-project/alerting/pull/1018)) +* exclude