From 22ab75ad57985322e1e7e594514e07f6fa390885 Mon Sep 17 00:00:00 2001 From: Tyler Ohlsen Date: Thu, 18 Jan 2024 09:09:38 -0800 Subject: [PATCH 1/3] Inject namedWriteableRegistry during ser/deser of SearchMonitorAction (#1382) Signed-off-by: Tyler Ohlsen --- alerting/bin/main/DUMMY-FILE | 1 + ....opensearch.painless.spi.PainlessExtension | 4 + .../org/opensearch/alerting/AlertService.kt | 892 +++ .../org/opensearch/alerting/AlertingPlugin.kt | 387 ++ .../alerting/BucketLevelMonitorRunner.kt | 503 ++ .../alerting/DocumentLevelMonitorRunner.kt | 804 +++ .../org/opensearch/alerting/InputService.kt | 226 + .../alerting/MonitorMetadataService.kt | 274 + .../org/opensearch/alerting/MonitorRunner.kt | 194 + .../alerting/MonitorRunnerExecutionContext.kt | 51 + .../alerting/MonitorRunnerService.kt | 353 + .../alerting/QueryLevelMonitorRunner.kt | 101 + .../org/opensearch/alerting/TriggerService.kt | 186 + .../alerting/WorkflowMetadataService.kt | 174 + .../opensearch/alerting/WorkflowService.kt | 144 + .../alerting/action/ExecuteMonitorAction.kt | 15 + .../alerting/action/ExecuteMonitorRequest.kt | 60 + .../alerting/action/ExecuteMonitorResponse.kt | 39 + .../alerting/action/ExecuteWorkflowAction.kt | 15 + .../alerting/action/ExecuteWorkflowRequest.kt | 70 + .../action/ExecuteWorkflowResponse.kt | 39 + .../alerting/action/GetDestinationsAction.kt | 15 + .../alerting/action/GetDestinationsRequest.kt | 61 + .../action/GetDestinationsResponse.kt | 66 + .../alerting/action/GetEmailAccountAction.kt | 15 + .../alerting/action/GetEmailAccountRequest.kt | 60 + .../action/GetEmailAccountResponse.kt | 86 + .../alerting/action/GetEmailGroupAction.kt | 15 + .../alerting/action/GetEmailGroupRequest.kt | 60 + .../alerting/action/GetEmailGroupResponse.kt | 86 + .../action/SearchEmailAccountAction.kt | 16 + .../alerting/action/SearchEmailGroupAction.kt | 16 + .../alerting/alerts/AlertIndices.kt | 588 ++ .../opensearch/alerting/alerts/AlertMover.kt | 250 + .../alerting/alerts/alert_mapping.json | 174 + .../alerting/alerts/finding_mapping.json | 71 + .../parsers/ChainedAlertExpressionParser.kt | 53 + .../ChainedAlertExpressionRPNBaseParser.kt | 114 + .../parsers/ExpressionParser.kt | 12 + .../resolvers/ChainedAlertRPNResolver.kt | 110 + .../ChainedAlertTriggerExpression.kt | 32 + .../resolvers/ChainedAlertTriggerResolver.kt | 11 + .../tokens/CAExpressionOperator.kt | 20 + .../tokens/CAExpressionToken.kt | 11 + .../tokens/ChainedAlertExpressionConstant.kt | 24 + .../tokens/ExpressionToken.kt | 8 + .../alerting/model/AlertingConfigAccessor.kt | 63 + .../model/BucketLevelTriggerRunResult.kt | 58 + .../model/ChainedAlertTriggerRunResult.kt | 69 + .../model/DocumentExecutionContext.kt | 14 + .../model/DocumentLevelTriggerRunResult.kt | 52 + .../alerting/model/MonitorMetadata.kt | 198 + .../alerting/model/MonitorRunResult.kt | 216 + .../model/QueryLevelTriggerRunResult.kt | 66 + .../alerting/model/TriggerRunResult.kt | 55 + .../alerting/model/WorkflowMetadata.kt | 105 + .../alerting/model/WorkflowRunResult.kt | 82 + .../alerting/model/destination/Chime.kt | 74 + .../model/destination/CustomWebhook.kt | 143 + .../alerting/model/destination/Destination.kt | 308 + .../model/destination/DestinationContext.kt | 17 + .../destination/DestinationContextFactory.kt | 80 + .../alerting/model/destination/SNS.kt | 63 + .../alerting/model/destination/Slack.kt | 74 + .../alerting/model/destination/email/Email.kt | 188 + .../model/destination/email/EmailAccount.kt | 175 + .../model/destination/email/EmailGroup.kt | 190 + .../alerting/org.opensearch.alerting.txt | 52 + .../resthandler/AsyncActionHandler.kt | 17 + .../resthandler/RestAcknowledgeAlertAction.kt | 92 + .../RestAcknowledgeChainedAlertsAction.kt | 82 + .../resthandler/RestDeleteMonitorAction.kt | 66 + .../resthandler/RestDeleteWorkflowAction.kt | 60 + .../resthandler/RestExecuteMonitorAction.kt | 77 + .../resthandler/RestExecuteWorkflowAction.kt | 59 + .../resthandler/RestGetAlertsAction.kt | 82 + .../resthandler/RestGetDestinationsAction.kt | 96 + .../resthandler/RestGetEmailAccountAction.kt | 68 + .../resthandler/RestGetEmailGroupAction.kt | 68 + .../resthandler/RestGetFindingsAction.kt | 67 + .../resthandler/RestGetMonitorAction.kt | 75 + .../resthandler/RestGetWorkflowAction.kt | 59 + .../RestGetWorkflowAlertsAction.kt | 92 + .../resthandler/RestIndexMonitorAction.kt | 174 + .../resthandler/RestIndexWorkflowAction.kt | 99 + .../RestSearchEmailAccountAction.kt | 108 + .../resthandler/RestSearchEmailGroupAction.kt | 109 + .../resthandler/RestSearchMonitorAction.kt | 136 + .../BucketLevelTriggerExecutionContext.kt | 51 + .../ChainedAlertTriggerExecutionContext.kt | 40 + .../DocumentLevelTriggerExecutionContext.kt | 44 + .../QueryLevelTriggerExecutionContext.kt | 50 + .../script/TriggerExecutionContext.kt | 43 + .../alerting/script/TriggerScript.kt | 46 + .../alerting/service/DeleteMonitorService.kt | 186 + .../alerting/settings/AlertingSettings.kt | 180 + .../alerting/settings/DestinationSettings.kt | 109 + .../LegacyOpenDistroAlertingSettings.kt | 157 + .../LegacyOpenDistroDestinationSettings.kt | 99 + .../SupportedClusterMetricsSettings.kt | 154 + .../settings/supported_json_payloads.json | 12 + .../transport/SecureTransportAction.kt | 139 + .../TransportAcknowledgeAlertAction.kt | 268 + .../TransportAcknowledgeChainedAlertAction.kt | 296 + .../transport/TransportDeleteMonitorAction.kt | 141 + .../TransportDeleteWorkflowAction.kt | 327 + .../TransportExecuteMonitorAction.kt | 161 + .../TransportExecuteWorkflowAction.kt | 131 + .../transport/TransportGetAlertsAction.kt | 273 + .../TransportGetDestinationsAction.kt | 171 + .../TransportGetEmailAccountAction.kt | 122 + .../transport/TransportGetEmailGroupAction.kt | 122 + .../transport/TransportGetFindingsAction.kt | 230 + .../transport/TransportGetMonitorAction.kt | 194 + .../transport/TransportGetWorkflowAction.kt | 148 + .../TransportGetWorkflowAlertsAction.kt | 274 + .../transport/TransportIndexMonitorAction.kt | 732 ++ .../transport/TransportIndexWorkflowAction.kt | 796 +++ .../TransportSearchEmailAccountAction.kt | 73 + .../TransportSearchEmailGroupAction.kt | 73 + .../transport/TransportSearchMonitorAction.kt | 128 + .../parsers/ExpressionParser.kt | 12 + .../parsers/TriggerExpressionParser.kt | 53 + .../parsers/TriggerExpressionRPNBaseParser.kt | 116 + .../resolvers/TriggerExpression.kt | 32 + .../resolvers/TriggerExpressionRPNResolver.kt | 103 + .../resolvers/TriggerExpressionResolver.kt | 12 + .../tokens/ExpressionToken.kt | 8 + .../tokens/TriggerExpressionConstant.kt | 26 + .../tokens/TriggerExpressionOperator.kt | 20 + .../tokens/TriggerExpressionToken.kt | 11 + .../alerting/util/AggregationQueryRewriter.kt | 114 + .../alerting/util/AlertingException.kt | 73 + .../opensearch/alerting/util/AlertingUtils.kt | 141 + .../alerting/util/AnomalyDetectionUtils.kt | 68 + .../alerting/util/DestinationType.kt | 18 + .../alerting/util/DocLevelMonitorQueries.kt | 608 ++ .../opensearch/alerting/util/IndexUtils.kt | 200 + .../alerting/util/RestHandlerUtils.kt | 29 + .../alerting/util/ScheduledJobUtils.kt | 70 + .../CatIndicesHelpers.kt | 859 +++ .../CatShardsHelpers.kt | 495 ++ ...pportedClusterMetricsSettingsExtensions.kt | 171 + .../DestinationConversionUtils.kt | 184 + .../DestinationMigrationCoordinator.kt | 106 + .../DestinationMigrationUtilService.kt | 228 + .../NotificationApiUtils.kt | 172 + .../workflow/CompositeWorkflowRunner.kt | 395 ++ .../alerting/workflow/WorkflowRunContext.kt | 15 + .../alerting/workflow/WorkflowRunner.kt | 199 + .../PercolateQueryBuilderExt$1.class | Bin 0 -> 2105 bytes .../PercolateQueryBuilderExt$2.class | Bin 0 -> 5688 bytes .../percolator/PercolateQueryBuilderExt.class | Bin 0 -> 33817 bytes .../PercolatorFieldMapperExt$Builder.class | Bin 0 -> 9395 bytes ...orFieldMapperExt$PercolatorFieldType.class | Bin 0 -> 11742 bytes .../PercolatorFieldMapperExt$TypeParser.class | Bin 0 -> 1964 bytes .../percolator/PercolatorFieldMapperExt.class | Bin 0 -> 17553 bytes .../percolator/PercolatorPluginExt.class | Bin 0 -> 4265 bytes alerting/bin/test/esnode-key.pem | 28 + alerting/bin/test/esnode.pem | 28 + alerting/bin/test/kirk-key.pem | 28 + alerting/bin/test/kirk.pem | 26 + .../org/opensearch/alerting/ADTestHelpers.kt | 508 ++ .../org/opensearch/alerting/AccessRoles.kt | 49 + .../opensearch/alerting/AlertServiceTests.kt | 256 + .../alerting/AlertingRestTestCase.kt | 1821 +++++ .../alerting/DocumentMonitorRunnerIT.kt | 1675 +++++ .../alerting/MonitorDataSourcesIT.kt | 5923 +++++++++++++++++ .../alerting/MonitorRunnerServiceIT.kt | 2093 ++++++ .../org/opensearch/alerting/MonitorTests.kt | 47 + .../opensearch/alerting/ODFERestTestCase.kt | 146 + .../org/opensearch/alerting/TestHelpers.kt | 797 +++ .../alerting/TriggerServiceTests.kt | 260 + .../action/ExecuteMonitorActionTests.kt | 15 + .../action/ExecuteMonitorRequestTests.kt | 47 + .../action/ExecuteMonitorResponseTests.kt | 42 + .../action/GetDestinationsActionTests.kt | 16 + .../action/GetDestinationsRequestTests.kt | 83 + .../action/GetDestinationsResponseTests.kt | 62 + .../action/GetEmailAccountActionTests.kt | 16 + .../action/GetEmailAccountRequestTests.kt | 45 + .../action/GetEmailAccountResponseTests.kt | 47 + .../action/GetEmailGroupActionTests.kt | 16 + .../action/GetEmailGroupRequestTests.kt | 45 + .../action/GetEmailGroupResponseTests.kt | 47 + .../action/GetFindingsRequestTests.kt | 41 + .../action/SearchEmailAccountActionTests.kt | 16 + .../action/SearchEmailGroupActionTests.kt | 16 + ...ucketSelectorExtAggregationBuilderTests.kt | 56 + .../BucketSelectorExtAggregatorTests.kt | 371 ++ .../alerting/alerts/AlertIndicesIT.kt | 371 ++ .../bwc/AlertingBackwardsCompatibilityIT.kt | 207 + .../ChainedAlertsExpressionParserTests.kt | 84 + .../ChainedAlertsExpressionResolveTests.kt | 118 + .../opensearch/alerting/model/AlertTests.kt | 62 + .../alerting/model/DestinationTests.kt | 310 + .../alerting/model/EmailAccountTests.kt | 61 + .../alerting/model/EmailGroupTests.kt | 60 + .../opensearch/alerting/model/FindingTests.kt | 40 + .../alerting/model/WriteableTests.kt | 126 + .../alerting/model/XContentTests.kt | 94 + .../resthandler/DestinationRestApiIT.kt | 189 + .../resthandler/EmailAccountRestApiIT.kt | 180 + .../resthandler/EmailGroupRestApiIT.kt | 173 + .../alerting/resthandler/FindingsRestApiIT.kt | 210 + .../alerting/resthandler/MonitorRestApiIT.kt | 1412 ++++ .../resthandler/SecureDestinationRestApiIT.kt | 159 + .../SecureEmailAccountRestApiIT.kt | 179 + .../resthandler/SecureEmailGroupsRestApiIT.kt | 128 + .../resthandler/SecureMonitorRestApiIT.kt | 1577 +++++ .../resthandler/SecureWorkflowRestApiIT.kt | 1421 ++++ .../alerting/resthandler/WorkflowRestApiIT.kt | 1188 ++++ .../settings/AlertingSettingsTests.kt | 189 + .../settings/DestinationSettingsTests.kt | 73 + .../transport/AlertingSingleNodeTestCase.kt | 503 ++ .../TriggerExpressionParserTests.kt | 76 + .../TriggerExpressionResolverTests.kt | 124 + .../util/AggregationQueryRewriterTests.kt | 335 + .../util/AnomalyDetectionUtilsTests.kt | 162 + .../alerting/util/IndexUtilsTests.kt | 91 + .../CatIndicesWrappersIT.kt | 173 + .../CatShardsWrappersIT.kt | 165 + ...edClusterMetricsSettingsExtensionsTests.kt | 122 + .../DestinationMigrationUtilServiceIT.kt | 104 + alerting/bin/test/plugin-security.policy | 8 + alerting/bin/test/root-ca.pem | 24 + alerting/bin/test/sample.pem | 25 + alerting/bin/test/test-kirk.jks | Bin 0 -> 4504 bytes .../transport/TransportSearchMonitorAction.kt | 7 +- core/bin/main/mappings/doc-level-queries.json | 16 + core/bin/main/mappings/scheduled-jobs.json | 694 ++ .../org/opensearch/alerting/core/JobRunner.kt | 17 + .../opensearch/alerting/core/JobSweeper.kt | 512 ++ .../alerting/core/JobSweeperMetrics.kt | 29 + .../alerting/core/ScheduledJobIndices.kt | 65 + .../core/action/node/ScheduledJobStats.kt | 88 + .../action/node/ScheduledJobsStatsAction.kt | 25 + .../action/node/ScheduledJobsStatsRequest.kt | 45 + .../action/node/ScheduledJobsStatsResponse.kt | 78 + .../node/ScheduledJobsStatsTransportAction.kt | 139 + .../RestScheduledJobStatsHandler.kt | 121 + .../alerting/core/schedule/JobScheduler.kt | 228 + .../core/schedule/JobSchedulerMetrics.kt | 48 + .../LegacyOpenDistroScheduledJobSettings.kt | 49 + .../core/settings/ScheduledJobSettings.kt | 51 + .../opensearchapi/OpenSearchExtensions.kt | 207 + core/bin/main/settings/doc-level-queries.json | 10 + .../alerting/core/WriteableTests.kt | 26 + .../alerting/core/model/MockScheduledJob.kt | 33 + .../core/schedule/JobSchedulerTest.kt | 190 + .../alerting/core/schedule/MockJobRunner.kt | 31 + 251 files changed, 48937 insertions(+), 3 deletions(-) create mode 100644 alerting/bin/main/DUMMY-FILE create mode 100644 alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension create mode 100644 alerting/bin/main/org/opensearch/alerting/AlertService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/InputService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/TriggerService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/WorkflowService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json create mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt create mode 100644 alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$1.class create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$2.class create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt.class create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$Builder.class create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$PercolatorFieldType.class create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$TypeParser.class create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt.class create mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorPluginExt.class create mode 100644 alerting/bin/test/esnode-key.pem create mode 100644 alerting/bin/test/esnode.pem create mode 100644 alerting/bin/test/kirk-key.pem create mode 100644 alerting/bin/test/kirk.pem create mode 100644 alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/AccessRoles.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/MonitorTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/TestHelpers.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt create mode 100644 alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt create mode 100644 alerting/bin/test/plugin-security.policy create mode 100644 alerting/bin/test/root-ca.pem create mode 100644 alerting/bin/test/sample.pem create mode 100644 alerting/bin/test/test-kirk.jks create mode 100644 core/bin/main/mappings/doc-level-queries.json create mode 100644 core/bin/main/mappings/scheduled-jobs.json create mode 100644 core/bin/main/org/opensearch/alerting/core/JobRunner.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/JobSweeper.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt create mode 100644 core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt create mode 100644 core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt create mode 100644 core/bin/main/settings/doc-level-queries.json create mode 100644 core/bin/test/org/opensearch/alerting/core/WriteableTests.kt create mode 100644 core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt create mode 100644 core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt create mode 100644 core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt diff --git a/alerting/bin/main/DUMMY-FILE b/alerting/bin/main/DUMMY-FILE new file mode 100644 index 000000000..74623997f --- /dev/null +++ b/alerting/bin/main/DUMMY-FILE @@ -0,0 +1 @@ +THIS IS A DUMMY FILE \ No newline at end of file diff --git a/alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension b/alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension new file mode 100644 index 000000000..3a1412058 --- /dev/null +++ b/alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension @@ -0,0 +1,4 @@ +# Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 + +org.opensearch.alerting.AlertingPlugin \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/AlertService.kt b/alerting/bin/main/org/opensearch/alerting/AlertService.kt new file mode 100644 index 000000000..05e35c1b7 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/AlertService.kt @@ -0,0 +1,892 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.model.ActionRunResult +import org.opensearch.alerting.model.ChainedAlertTriggerRunResult +import org.opensearch.alerting.model.QueryLevelTriggerRunResult +import org.opensearch.alerting.opensearchapi.firstFailureOrNull +import org.opensearch.alerting.opensearchapi.retry +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext +import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext +import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.alerting.util.MAX_SEARCH_SIZE +import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.client.Client +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.NoOpTrigger +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.VersionType +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortOrder +import java.time.Instant +import java.util.UUID +import java.util.concurrent.TimeUnit +import kotlin.coroutines.resume +import kotlin.coroutines.resumeWithException +import kotlin.coroutines.suspendCoroutine + +/** Service that handles CRUD operations for alerts */ +class AlertService( + val client: Client, + val xContentRegistry: NamedXContentRegistry, + val alertIndices: AlertIndices +) { + + companion object { + const val MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT = 500 + const val ERROR_ALERT_ID_PREFIX = "error-alert" + + val ALERTS_SEARCH_TIMEOUT = TimeValue(5, TimeUnit.MINUTES) + } + + private val logger = LogManager.getLogger(AlertService::class.java) + + suspend fun loadCurrentAlertsForWorkflow(workflow: Workflow, dataSources: DataSources): Map { + val searchAlertsResponse: SearchResponse = searchAlerts( + workflow = workflow, + size = workflow.triggers.size * 2, // We expect there to be only a single in-progress alert so fetch 2 to check + dataSources = dataSources + ) + + val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } + .groupBy { it.triggerId } + foundAlerts.values.forEach { alerts -> + if (alerts.size > 1) { + logger.warn("Found multiple alerts for same trigger: $alerts") + } + } + + return workflow.triggers.associateWith { trigger -> + foundAlerts[trigger.id]?.firstOrNull() + } + } + + suspend fun loadCurrentAlertsForQueryLevelMonitor(monitor: Monitor, workflowRunContext: WorkflowRunContext?): Map { + val searchAlertsResponse: SearchResponse = searchAlerts( + monitor = monitor, + size = monitor.triggers.size * 2, // We expect there to be only a single in-progress alert so fetch 2 to check + workflowRunContext + ) + + val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } + .groupBy { it.triggerId } + foundAlerts.values.forEach { alerts -> + if (alerts.size > 1) { + logger.warn("Found multiple alerts for same trigger: $alerts") + } + } + + return monitor.triggers.associateWith { trigger -> + foundAlerts[trigger.id]?.firstOrNull() + } + } + + suspend fun loadCurrentAlertsForBucketLevelMonitor( + monitor: Monitor, + workflowRunContext: WorkflowRunContext?, + ): Map> { + val searchAlertsResponse: SearchResponse = searchAlerts( + monitor = monitor, + // TODO: This should be limited based on a circuit breaker that limits Alerts + size = MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT, + workflowRunContext = workflowRunContext + ) + + val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } + .groupBy { it.triggerId } + + return monitor.triggers.associateWith { trigger -> + // Default to an empty map if there are no Alerts found for a Trigger to make Alert categorization logic easier + ( + foundAlerts[trigger.id]?.mapNotNull { alert -> + alert.aggregationResultBucket?.let { it.getBucketKeysHash() to alert } + }?.toMap()?.toMutableMap() ?: mutableMapOf() + ) + } + } + + fun composeQueryLevelAlert( + ctx: QueryLevelTriggerExecutionContext, + result: QueryLevelTriggerRunResult, + alertError: AlertError?, + executionId: String, + workflorwRunContext: WorkflowRunContext? + ): Alert? { + val currentTime = Instant.now() + val currentAlert = ctx.alert + + val updatedActionExecutionResults = mutableListOf() + val currentActionIds = mutableSetOf() + if (currentAlert != null) { + // update current alert's action execution results + for (actionExecutionResult in currentAlert.actionExecutionResults) { + val actionId = actionExecutionResult.actionId + currentActionIds.add(actionId) + val actionRunResult = result.actionResults[actionId] + when { + actionRunResult == null -> updatedActionExecutionResults.add(actionExecutionResult) + actionRunResult.throttled -> + updatedActionExecutionResults.add( + actionExecutionResult.copy( + throttledCount = actionExecutionResult.throttledCount + 1 + ) + ) + else -> updatedActionExecutionResults.add(actionExecutionResult.copy(lastExecutionTime = actionRunResult.executionTime)) + } + } + // add action execution results which not exist in current alert + updatedActionExecutionResults.addAll( + result.actionResults.filter { !currentActionIds.contains(it.key) } + .map { ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) } + ) + } else { + updatedActionExecutionResults.addAll( + result.actionResults.map { + ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) + } + ) + } + + // Merge the alert's error message to the current alert's history + val updatedHistory = currentAlert?.errorHistory.update(alertError) + return if (alertError == null && !result.triggered) { + currentAlert?.copy( + state = Alert.State.COMPLETED, + endTime = currentTime, + errorMessage = null, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion + ) + } else if (alertError == null && currentAlert?.isAcknowledged() == true) { + null + } else if (currentAlert != null) { + val alertState = if (alertError == null) Alert.State.ACTIVE else Alert.State.ERROR + currentAlert.copy( + state = alertState, + lastNotificationTime = currentTime, + errorMessage = alertError?.message, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion, + ) + } else { + val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else if (alertError == null) Alert.State.ACTIVE + else Alert.State.ERROR + Alert( + monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, + lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, + errorHistory = updatedHistory, actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion, executionId = executionId, + workflowId = workflorwRunContext?.workflowId ?: "" + ) + } + } + + // TODO: clean this up so it follows the proper alert management for doc monitors + fun composeDocLevelAlert( + findings: List, + relatedDocIds: List, + ctx: DocumentLevelTriggerExecutionContext, + alertError: AlertError?, + executionId: String, + workflorwRunContext: WorkflowRunContext? + ): Alert { + val currentTime = Instant.now() + + val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else if (alertError == null) { + Alert.State.ACTIVE + } else { + Alert.State.ERROR + } + return Alert( + id = UUID.randomUUID().toString(), monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, + lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, + schemaVersion = IndexUtils.alertIndexSchemaVersion, findingIds = findings, relatedDocIds = relatedDocIds, + executionId = executionId, workflowId = workflorwRunContext?.workflowId ?: "" + ) + } + + fun composeMonitorErrorAlert( + id: String, + monitor: Monitor, + alertError: AlertError, + executionId: String?, + workflowRunContext: WorkflowRunContext? + ): Alert { + val currentTime = Instant.now() + val alertState = if (workflowRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else { + Alert.State.ERROR + } + return Alert( + id = id, monitor = monitor, trigger = NoOpTrigger(), startTime = currentTime, + lastNotificationTime = currentTime, state = alertState, errorMessage = alertError.message, + schemaVersion = IndexUtils.alertIndexSchemaVersion, executionId = executionId, workflowId = workflowRunContext?.workflowId ?: "" + ) + } + + fun composeChainedAlert( + ctx: ChainedAlertTriggerExecutionContext, + executionId: String, + workflow: Workflow, + associatedAlertIds: List, + result: ChainedAlertTriggerRunResult, + alertError: AlertError? = null, + ): Alert? { + + val currentTime = Instant.now() + val currentAlert = ctx.alert + + val updatedActionExecutionResults = mutableListOf() + val currentActionIds = mutableSetOf() + if (currentAlert != null) { + // update current alert's action execution results + for (actionExecutionResult in currentAlert.actionExecutionResults) { + val actionId = actionExecutionResult.actionId + currentActionIds.add(actionId) + val actionRunResult = result.actionResults[actionId] + when { + actionRunResult == null -> updatedActionExecutionResults.add(actionExecutionResult) + actionRunResult.throttled -> + updatedActionExecutionResults.add( + actionExecutionResult.copy( + throttledCount = actionExecutionResult.throttledCount + 1 + ) + ) + + else -> updatedActionExecutionResults.add(actionExecutionResult.copy(lastExecutionTime = actionRunResult.executionTime)) + } + } + // add action execution results which not exist in current alert + updatedActionExecutionResults.addAll( + result.actionResults.filter { !currentActionIds.contains(it.key) } + .map { ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) } + ) + } else { + updatedActionExecutionResults.addAll( + result.actionResults.map { + ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) + } + ) + } + + // Merge the alert's error message to the current alert's history + val updatedHistory = currentAlert?.errorHistory.update(alertError) + return if (alertError == null && !result.triggered) { + currentAlert?.copy( + state = Alert.State.COMPLETED, + endTime = currentTime, + errorMessage = null, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion + ) + } else if (alertError == null && currentAlert?.isAcknowledged() == true) { + null + } else if (currentAlert != null) { + val alertState = Alert.State.ACTIVE + currentAlert.copy( + state = alertState, + lastNotificationTime = currentTime, + errorMessage = alertError?.message, + errorHistory = updatedHistory, + actionExecutionResults = updatedActionExecutionResults, + schemaVersion = IndexUtils.alertIndexSchemaVersion, + ) + } else { + if (alertError == null) Alert.State.ACTIVE + else Alert.State.ERROR + Alert( + startTime = Instant.now(), + lastNotificationTime = currentTime, + state = Alert.State.ACTIVE, + errorMessage = null, schemaVersion = IndexUtils.alertIndexSchemaVersion, + chainedAlertTrigger = ctx.trigger, + executionId = executionId, + workflow = workflow, + associatedAlertIds = associatedAlertIds + ) + } + } + + fun updateActionResultsForBucketLevelAlert( + currentAlert: Alert, + actionResults: Map, + alertError: AlertError? + ): Alert { + val updatedActionExecutionResults = mutableListOf() + val currentActionIds = mutableSetOf() + // Update alert's existing action execution results + for (actionExecutionResult in currentAlert.actionExecutionResults) { + val actionId = actionExecutionResult.actionId + currentActionIds.add(actionId) + val actionRunResult = actionResults[actionId] + when { + actionRunResult == null -> updatedActionExecutionResults.add(actionExecutionResult) + actionRunResult.throttled -> + updatedActionExecutionResults.add( + actionExecutionResult.copy( + throttledCount = actionExecutionResult.throttledCount + 1 + ) + ) + else -> updatedActionExecutionResults.add(actionExecutionResult.copy(lastExecutionTime = actionRunResult.executionTime)) + } + } + + // Add action execution results not currently present in the alert + updatedActionExecutionResults.addAll( + actionResults.filter { !currentActionIds.contains(it.key) } + .map { ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) } + ) + + val updatedErrorHistory = currentAlert.errorHistory.update(alertError) + return if (alertError == null) { + currentAlert.copy(errorHistory = updatedErrorHistory, actionExecutionResults = updatedActionExecutionResults) + } else { + currentAlert.copy( + state = Alert.State.ERROR, + errorMessage = alertError.message, + errorHistory = updatedErrorHistory, + actionExecutionResults = updatedActionExecutionResults + ) + } + } + + // TODO: Can change the parameters to use ctx: BucketLevelTriggerExecutionContext instead of monitor/trigger and + // result: AggTriggerRunResult for aggResultBuckets + // TODO: Can refactor this method to use Sets instead which can cleanup some of the categorization logic (like getting completed alerts) + fun getCategorizedAlertsForBucketLevelMonitor( + monitor: Monitor, + trigger: BucketLevelTrigger, + currentAlerts: MutableMap, + aggResultBuckets: List, + findings: List, + executionId: String, + workflorwRunContext: WorkflowRunContext? + ): Map> { + val dedupedAlerts = mutableListOf() + val newAlerts = mutableListOf() + val currentTime = Instant.now() + + aggResultBuckets.forEach { aggAlertBucket -> + val currentAlert = currentAlerts[aggAlertBucket.getBucketKeysHash()] + if (currentAlert != null) { + // De-duped Alert + dedupedAlerts.add(currentAlert.copy(aggregationResultBucket = aggAlertBucket)) + + // Remove de-duped Alert from currentAlerts since it is no longer a candidate for a potentially completed Alert + currentAlerts.remove(aggAlertBucket.getBucketKeysHash()) + } else { + // New Alert + val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { + Alert.State.AUDIT + } else Alert.State.ACTIVE + val newAlert = Alert( + monitor = monitor, trigger = trigger, startTime = currentTime, + lastNotificationTime = currentTime, state = alertState, errorMessage = null, + errorHistory = mutableListOf(), actionExecutionResults = mutableListOf(), + schemaVersion = IndexUtils.alertIndexSchemaVersion, aggregationResultBucket = aggAlertBucket, + findingIds = findings, executionId = executionId, workflowId = workflorwRunContext?.workflowId ?: "" + ) + newAlerts.add(newAlert) + } + } + + return mapOf( + AlertCategory.DEDUPED to dedupedAlerts, + AlertCategory.NEW to newAlerts + ) + } + + fun convertToCompletedAlerts(currentAlerts: Map?): List { + val currentTime = Instant.now() + return currentAlerts?.map { + it.value.copy( + state = Alert.State.COMPLETED, + endTime = currentTime, + errorMessage = null, + schemaVersion = IndexUtils.alertIndexSchemaVersion + ) + } ?: listOf() + } + + suspend fun upsertMonitorErrorAlert( + monitor: Monitor, + errorMessage: String, + executionId: String?, + workflowRunContext: WorkflowRunContext?, + ) { + val newErrorAlertId = "$ERROR_ALERT_ID_PREFIX-${monitor.id}-${UUID.randomUUID()}" + + val searchRequest = SearchRequest(monitor.dataSources.alertsIndex) + .source( + SearchSourceBuilder() + .sort(Alert.START_TIME_FIELD, SortOrder.DESC) + .query( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitor.id)) + .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) + ) + ) + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + + var alert = + composeMonitorErrorAlert(newErrorAlertId, monitor, AlertError(Instant.now(), errorMessage), executionId, workflowRunContext) + + if (searchResponse.hits.totalHits.value > 0L) { + if (searchResponse.hits.totalHits.value > 1L) { + logger.warn("There are [${searchResponse.hits.totalHits.value}] error alerts for monitor [${monitor.id}]") + } + // Deserialize first/latest Alert + val hit = searchResponse.hits.hits[0] + val xcp = contentParser(hit.sourceRef) + val existingErrorAlert = Alert.parse(xcp, hit.id, hit.version) + + val currentTime = Instant.now() + alert = if (alert.errorMessage != existingErrorAlert.errorMessage) { + var newErrorHistory = existingErrorAlert.errorHistory.update( + AlertError(existingErrorAlert.startTime, existingErrorAlert.errorMessage!!) + ) + alert.copy( + id = existingErrorAlert.id, + errorHistory = newErrorHistory, + startTime = currentTime, + lastNotificationTime = currentTime + ) + } else { + existingErrorAlert.copy(lastNotificationTime = currentTime) + } + } + + val alertIndexRequest = IndexRequest(monitor.dataSources.alertsIndex) + .routing(alert.monitorId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + .opType(DocWriteRequest.OpType.INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .id(alert.id) + + val indexResponse: IndexResponse = client.suspendUntil { index(alertIndexRequest, it) } + logger.debug("Monitor error Alert successfully upserted. Op result: ${indexResponse.result}") + } + + suspend fun clearMonitorErrorAlert(monitor: Monitor) { + val currentTime = Instant.now() + try { + val searchRequest = SearchRequest("${monitor.dataSources.alertsIndex}") + .source( + SearchSourceBuilder() + .size(MAX_SEARCH_SIZE) + .sort(Alert.START_TIME_FIELD, SortOrder.DESC) + .query( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitor.id)) + .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) + ) + + ) + searchRequest.cancelAfterTimeInterval = ALERTS_SEARCH_TIMEOUT + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + // If there's no error alert present, there's nothing to clear. We can stop here. + if (searchResponse.hits.totalHits.value == 0L) { + return + } + + val indexRequests = mutableListOf() + searchResponse.hits.hits.forEach { hit -> + if (searchResponse.hits.totalHits.value > 1L) { + logger.warn("Found [${searchResponse.hits.totalHits.value}] error alerts for monitor [${monitor.id}] while clearing") + } + // Deserialize first/latest Alert + val xcp = contentParser(hit.sourceRef) + val existingErrorAlert = Alert.parse(xcp, hit.id, hit.version) + + val updatedAlert = existingErrorAlert.copy( + endTime = currentTime + ) + + indexRequests += IndexRequest(monitor.dataSources.alertsIndex) + .routing(monitor.id) + .id(updatedAlert.id) + .source(updatedAlert.toXContentWithUser(XContentFactory.jsonBuilder())) + .opType(DocWriteRequest.OpType.INDEX) + } + + val bulkResponse: BulkResponse = client.suspendUntil { + bulk(BulkRequest().add(indexRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + if (bulkResponse.hasFailures()) { + bulkResponse.items.forEach { item -> + if (item.isFailed) { + logger.debug("Failed clearing error alert ${item.id} of monitor [${monitor.id}]") + } + } + } else { + logger.debug("[${bulkResponse.items.size}] Error Alerts successfully cleared. End time set to: $currentTime") + } + } catch (e: Exception) { + logger.error("Error clearing monitor error alerts for monitor [${monitor.id}]: ${ExceptionsHelper.detailedMessage(e)}") + } + } + + /** + * Moves already cleared "error alerts" to history index. + * Error Alert is cleared when endTime timestamp is set, on first successful run after failed run + * */ + suspend fun moveClearedErrorAlertsToHistory(monitorId: String, alertIndex: String, alertHistoryIndex: String) { + try { + val searchRequest = SearchRequest(alertIndex) + .source( + SearchSourceBuilder() + .size(MAX_SEARCH_SIZE) + .query( + QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) + .must(QueryBuilders.existsQuery(Alert.END_TIME_FIELD)) + ) + .version(true) // Do we need this? + ) + searchRequest.cancelAfterTimeInterval = ALERTS_SEARCH_TIMEOUT + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + + if (searchResponse.hits.totalHits.value == 0L) { + return + } + + // Copy to history index + + val copyRequests = mutableListOf() + + searchResponse.hits.hits.forEach { hit -> + + val xcp = contentParser(hit.sourceRef) + val alert = Alert.parse(xcp, hit.id, hit.version) + + copyRequests.add( + IndexRequest(alertHistoryIndex) + .routing(alert.monitorId) + .source(hit.sourceRef, XContentType.JSON) + .version(hit.version) + .versionType(VersionType.EXTERNAL_GTE) + .id(hit.id) + .timeout(MonitorRunnerService.monitorCtx.indexTimeout) + ) + } + + val bulkResponse: BulkResponse = client.suspendUntil { + bulk(BulkRequest().add(copyRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + if (bulkResponse.hasFailures()) { + bulkResponse.items.forEach { item -> + if (item.isFailed) { + logger.error("Failed copying error alert [${item.id}] to history index [$alertHistoryIndex]") + } + } + return + } + + // Delete from alertIndex + + val alertIds = searchResponse.hits.hits.map { it.id } + + val deleteResponse: BulkByScrollResponse = suspendCoroutine { cont -> + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(alertIndex) + .filter(QueryBuilders.termsQuery("_id", alertIds)) + .refresh(true) + .timeout(ALERTS_SEARCH_TIMEOUT) + .execute( + object : ActionListener { + override fun onResponse(response: BulkByScrollResponse) = cont.resume(response) + override fun onFailure(t: Exception) = cont.resumeWithException(t) + } + ) + } + deleteResponse.bulkFailures.forEach { + logger.error("Failed deleting alert while moving cleared alerts: [${it.id}] cause: [${it.cause}] ") + } + } catch (e: Exception) { + logger.error("Failed moving cleared error alerts to history index: ${ExceptionsHelper.detailedMessage(e)}") + } + } + + suspend fun saveAlerts( + dataSources: DataSources, + alerts: List, + retryPolicy: BackoffPolicy, + allowUpdatingAcknowledgedAlert: Boolean = false, + routingId: String // routing is mandatory and set as monitor id. for workflow chained alerts we pass workflow id as routing + ) { + val alertsIndex = dataSources.alertsIndex + val alertsHistoryIndex = dataSources.alertsHistoryIndex + + var requestsToRetry = alerts.flatMap { alert -> + // We don't want to set the version when saving alerts because the MonitorRunner has first priority when writing alerts. + // In the rare event that a user acknowledges an alert between when it's read and when it's written + // back we're ok if that acknowledgement is lost. It's easier to get the user to retry than for the runner to + // spend time reloading the alert and writing it back. + when (alert.state) { + Alert.State.ACTIVE, Alert.State.ERROR -> { + listOf>( + IndexRequest(alertsIndex) + .routing(routingId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + .id(if (alert.id != Alert.NO_ID) alert.id else null) + ) + } + Alert.State.ACKNOWLEDGED -> { + // Allow ACKNOWLEDGED Alerts to be updated for Bucket-Level Monitors since de-duped Alerts can be ACKNOWLEDGED + // and updated by the MonitorRunner + if (allowUpdatingAcknowledgedAlert) { + listOf>( + IndexRequest(alertsIndex) + .routing(routingId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + .id(if (alert.id != Alert.NO_ID) alert.id else null) + ) + } else { + throw IllegalStateException("Unexpected attempt to save ${alert.state} alert: $alert") + } + } + Alert.State.AUDIT -> { + val index = if (alertIndices.isAlertHistoryEnabled()) { + dataSources.alertsHistoryIndex + } else dataSources.alertsIndex + listOf>( + IndexRequest(index) + .routing(routingId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + .id(if (alert.id != Alert.NO_ID) alert.id else null) + ) + } + Alert.State.DELETED -> { + throw IllegalStateException("Unexpected attempt to save ${alert.state} alert: $alert") + } + Alert.State.COMPLETED -> { + listOfNotNull>( + DeleteRequest(alertsIndex, alert.id) + .routing(routingId), + // Only add completed alert to history index if history is enabled + if (alertIndices.isAlertHistoryEnabled()) { + IndexRequest(alertsHistoryIndex) + .routing(routingId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + .id(alert.id) + } else null + ) + } + } + } + + if (requestsToRetry.isEmpty()) return + // Retry Bulk requests if there was any 429 response + retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkRequest, it) } + val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } + requestsToRetry = failedResponses.filter { it.status() == RestStatus.TOO_MANY_REQUESTS } + .map { bulkRequest.requests()[it.itemId] as IndexRequest } + + if (requestsToRetry.isNotEmpty()) { + val retryCause = failedResponses.first { it.status() == RestStatus.TOO_MANY_REQUESTS }.failure.cause + throw ExceptionsHelper.convertToOpenSearchException(retryCause) + } + } + } + + /** + * This is a separate method created specifically for saving new Alerts during the Bucket-Level Monitor run. + * Alerts are saved in two batches during the execution of an Bucket-Level Monitor, once before the Actions are executed + * and once afterwards. This method saves Alerts to the monitor's alertIndex but returns the same Alerts with their document IDs. + * + * The Alerts are required with their indexed ID so that when the new Alerts are updated after the Action execution, + * the ID is available for the index request so that the existing Alert can be updated, instead of creating a duplicate Alert document. + */ + suspend fun saveNewAlerts(dataSources: DataSources, alerts: List, retryPolicy: BackoffPolicy): List { + val savedAlerts = mutableListOf() + var alertsBeingIndexed = alerts + var requestsToRetry: MutableList = alerts.map { alert -> + if (alert.state != Alert.State.ACTIVE && alert.state != Alert.State.AUDIT) { + throw IllegalStateException("Unexpected attempt to save new alert [$alert] with state [${alert.state}]") + } + if (alert.id != Alert.NO_ID) { + throw IllegalStateException("Unexpected attempt to save new alert [$alert] with an existing alert ID [${alert.id}]") + } + val alertIndex = if (alert.state == Alert.State.AUDIT && alertIndices.isAlertHistoryEnabled()) { + dataSources.alertsHistoryIndex + } else dataSources.alertsIndex + IndexRequest(alertIndex) + .routing(alert.monitorId) + .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) + }.toMutableList() + + if (requestsToRetry.isEmpty()) return listOf() + + // Retry Bulk requests if there was any 429 response. + // The responses of a bulk request will be in the same order as the individual requests. + // If the index request succeeded for an Alert, the document ID from the response is taken and saved in the Alert. + // If the index request is to be retried, the Alert is saved separately as well so that its relative ordering is maintained in + // relation to index request in the retried bulk request for when it eventually succeeds. + retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkRequest, it) } + // TODO: This is only used to retrieve the retryCause, could instead fetch it from the bulkResponse iteration below + val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } + + requestsToRetry = mutableListOf() + val alertsBeingRetried = mutableListOf() + bulkResponse.items.forEach { item -> + if (item.isFailed) { + // TODO: What if the failure cause was not TOO_MANY_REQUESTS, should these be saved and logged? + if (item.status() == RestStatus.TOO_MANY_REQUESTS) { + requestsToRetry.add(bulkRequest.requests()[item.itemId] as IndexRequest) + alertsBeingRetried.add(alertsBeingIndexed[item.itemId]) + } + } else { + // The ID of the BulkItemResponse in this case is the document ID resulting from the DocWriteRequest operation + savedAlerts.add(alertsBeingIndexed[item.itemId].copy(id = item.id)) + } + } + + alertsBeingIndexed = alertsBeingRetried + + if (requestsToRetry.isNotEmpty()) { + val retryCause = failedResponses.first { it.status() == RestStatus.TOO_MANY_REQUESTS }.failure.cause + throw ExceptionsHelper.convertToOpenSearchException(retryCause) + } + } + + return savedAlerts + } + + private fun contentParser(bytesReference: BytesReference): XContentParser { + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + bytesReference, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + return xcp + } + + /** + * Searches for Alerts in the monitor's alertIndex. + * + * @param monitorId The Monitor to get Alerts for + * @param size The number of search hits (Alerts) to return + */ + private suspend fun searchAlerts(monitor: Monitor, size: Int, workflowRunContext: WorkflowRunContext?): SearchResponse { + val monitorId = monitor.id + val alertIndex = monitor.dataSources.alertsIndex + + val queryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + if (workflowRunContext != null) { + queryBuilder.must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowRunContext.workflowId)) + } + val searchSourceBuilder = SearchSourceBuilder() + .size(size) + .query(queryBuilder) + + val searchRequest = SearchRequest(alertIndex) + .routing(monitorId) + .source(searchSourceBuilder) + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + if (searchResponse.status() != RestStatus.OK) { + throw (searchResponse.firstFailureOrNull()?.cause ?: RuntimeException("Unknown error loading alerts")) + } + + return searchResponse + } + + /** + * Searches for ACTIVE/ACKNOWLEDGED chained alerts in the workflow's alertIndex. + * + * @param monitorId The Monitor to get Alerts for + * @param size The number of search hits (Alerts) to return + */ + private suspend fun searchAlerts( + workflow: Workflow, + size: Int, + dataSources: DataSources, + ): SearchResponse { + val workflowId = workflow.id + val alertIndex = dataSources.alertsIndex + + val queryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowId)) + .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, "")) + val searchSourceBuilder = SearchSourceBuilder() + .size(size) + .query(queryBuilder) + + val searchRequest = SearchRequest(alertIndex) + .routing(workflowId) + .source(searchSourceBuilder) + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + if (searchResponse.status() != RestStatus.OK) { + throw (searchResponse.firstFailureOrNull()?.cause ?: RuntimeException("Unknown error loading alerts")) + } + return searchResponse + } + + private fun List?.update(alertError: AlertError?): List { + return when { + this == null && alertError == null -> emptyList() + this != null && alertError == null -> this + this == null && alertError != null -> listOf(alertError) + this != null && alertError != null -> (listOf(alertError) + this).take(10) + else -> throw IllegalStateException("Unreachable code reached!") + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt b/alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt new file mode 100644 index 000000000..e0d73658f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt @@ -0,0 +1,387 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.opensearch.action.ActionRequest +import org.opensearch.alerting.action.ExecuteMonitorAction +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.alerting.action.GetDestinationsAction +import org.opensearch.alerting.action.GetEmailAccountAction +import org.opensearch.alerting.action.GetEmailGroupAction +import org.opensearch.alerting.action.SearchEmailAccountAction +import org.opensearch.alerting.action.SearchEmailGroupAction +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.core.JobSweeper +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.core.action.node.ScheduledJobsStatsAction +import org.opensearch.alerting.core.action.node.ScheduledJobsStatsTransportAction +import org.opensearch.alerting.core.resthandler.RestScheduledJobStatsHandler +import org.opensearch.alerting.core.schedule.JobScheduler +import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings +import org.opensearch.alerting.core.settings.ScheduledJobSettings +import org.opensearch.alerting.resthandler.RestAcknowledgeAlertAction +import org.opensearch.alerting.resthandler.RestAcknowledgeChainedAlertAction +import org.opensearch.alerting.resthandler.RestDeleteMonitorAction +import org.opensearch.alerting.resthandler.RestDeleteWorkflowAction +import org.opensearch.alerting.resthandler.RestExecuteMonitorAction +import org.opensearch.alerting.resthandler.RestExecuteWorkflowAction +import org.opensearch.alerting.resthandler.RestGetAlertsAction +import org.opensearch.alerting.resthandler.RestGetDestinationsAction +import org.opensearch.alerting.resthandler.RestGetEmailAccountAction +import org.opensearch.alerting.resthandler.RestGetEmailGroupAction +import org.opensearch.alerting.resthandler.RestGetFindingsAction +import org.opensearch.alerting.resthandler.RestGetMonitorAction +import org.opensearch.alerting.resthandler.RestGetWorkflowAction +import org.opensearch.alerting.resthandler.RestGetWorkflowAlertsAction +import org.opensearch.alerting.resthandler.RestIndexMonitorAction +import org.opensearch.alerting.resthandler.RestIndexWorkflowAction +import org.opensearch.alerting.resthandler.RestSearchEmailAccountAction +import org.opensearch.alerting.resthandler.RestSearchEmailGroupAction +import org.opensearch.alerting.resthandler.RestSearchMonitorAction +import org.opensearch.alerting.script.TriggerScript +import org.opensearch.alerting.service.DeleteMonitorService +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.DestinationSettings +import org.opensearch.alerting.settings.LegacyOpenDistroAlertingSettings +import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings +import org.opensearch.alerting.transport.TransportAcknowledgeAlertAction +import org.opensearch.alerting.transport.TransportAcknowledgeChainedAlertAction +import org.opensearch.alerting.transport.TransportDeleteMonitorAction +import org.opensearch.alerting.transport.TransportDeleteWorkflowAction +import org.opensearch.alerting.transport.TransportExecuteMonitorAction +import org.opensearch.alerting.transport.TransportExecuteWorkflowAction +import org.opensearch.alerting.transport.TransportGetAlertsAction +import org.opensearch.alerting.transport.TransportGetDestinationsAction +import org.opensearch.alerting.transport.TransportGetEmailAccountAction +import org.opensearch.alerting.transport.TransportGetEmailGroupAction +import org.opensearch.alerting.transport.TransportGetFindingsSearchAction +import org.opensearch.alerting.transport.TransportGetMonitorAction +import org.opensearch.alerting.transport.TransportGetWorkflowAction +import org.opensearch.alerting.transport.TransportGetWorkflowAlertsAction +import org.opensearch.alerting.transport.TransportIndexMonitorAction +import org.opensearch.alerting.transport.TransportIndexWorkflowAction +import org.opensearch.alerting.transport.TransportSearchEmailAccountAction +import org.opensearch.alerting.transport.TransportSearchEmailGroupAction +import org.opensearch.alerting.transport.TransportSearchMonitorAction +import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.alerting.util.destinationmigration.DestinationMigrationCoordinator +import org.opensearch.client.Client +import org.opensearch.cluster.metadata.IndexNameExpressionResolver +import org.opensearch.cluster.node.DiscoveryNodes +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.ClusterSettings +import org.opensearch.common.settings.IndexScopedSettings +import org.opensearch.common.settings.Setting +import org.opensearch.common.settings.Settings +import org.opensearch.common.settings.SettingsFilter +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.env.Environment +import org.opensearch.env.NodeEnvironment +import org.opensearch.index.IndexModule +import org.opensearch.painless.spi.PainlessExtension +import org.opensearch.painless.spi.Whitelist +import org.opensearch.painless.spi.WhitelistLoader +import org.opensearch.percolator.PercolatorPluginExt +import org.opensearch.plugins.ActionPlugin +import org.opensearch.plugins.ReloadablePlugin +import org.opensearch.plugins.ScriptPlugin +import org.opensearch.plugins.SearchPlugin +import org.opensearch.repositories.RepositoriesService +import org.opensearch.rest.RestController +import org.opensearch.rest.RestHandler +import org.opensearch.script.ScriptContext +import org.opensearch.script.ScriptService +import org.opensearch.threadpool.ThreadPool +import org.opensearch.watcher.ResourceWatcherService +import java.util.function.Supplier + +/** + * Entry point of the OpenDistro for Elasticsearch alerting plugin + * This class initializes the [RestGetMonitorAction], [RestDeleteMonitorAction], [RestIndexMonitorAction] rest handlers. + * It also adds [Monitor.XCONTENT_REGISTRY], [SearchInput.XCONTENT_REGISTRY], [QueryLevelTrigger.XCONTENT_REGISTRY], + * [BucketLevelTrigger.XCONTENT_REGISTRY], [ClusterMetricsInput.XCONTENT_REGISTRY] to the [NamedXContentRegistry] so that we are able to deserialize the custom named objects. + */ +internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, ReloadablePlugin, SearchPlugin, PercolatorPluginExt() { + + override fun getContextWhitelists(): Map, List> { + val whitelist = WhitelistLoader.loadFromResourceFiles(javaClass, "org.opensearch.alerting.txt") + return mapOf(TriggerScript.CONTEXT to listOf(whitelist)) + } + + companion object { + @JvmField val OPEN_SEARCH_DASHBOARDS_USER_AGENT = "OpenSearch-Dashboards" + + @JvmField val UI_METADATA_EXCLUDE = arrayOf("monitor.${Monitor.UI_METADATA_FIELD}") + + @JvmField val MONITOR_BASE_URI = "/_plugins/_alerting/monitors" + @JvmField val WORKFLOW_BASE_URI = "/_plugins/_alerting/workflows" + @JvmField val DESTINATION_BASE_URI = "/_plugins/_alerting/destinations" + + @JvmField val LEGACY_OPENDISTRO_MONITOR_BASE_URI = "/_opendistro/_alerting/monitors" + + @JvmField val LEGACY_OPENDISTRO_DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" + + @JvmField val EMAIL_ACCOUNT_BASE_URI = "$DESTINATION_BASE_URI/email_accounts" + + @JvmField val EMAIL_GROUP_BASE_URI = "$DESTINATION_BASE_URI/email_groups" + + @JvmField val LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_accounts" + + @JvmField val LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_groups" + + @JvmField val FINDING_BASE_URI = "/_plugins/_alerting/findings" + + @JvmField val ALERTING_JOB_TYPES = listOf("monitor", "workflow") + } + + lateinit var runner: MonitorRunnerService + lateinit var scheduler: JobScheduler + lateinit var sweeper: JobSweeper + lateinit var scheduledJobIndices: ScheduledJobIndices + lateinit var docLevelMonitorQueries: DocLevelMonitorQueries + lateinit var threadPool: ThreadPool + lateinit var alertIndices: AlertIndices + lateinit var clusterService: ClusterService + lateinit var destinationMigrationCoordinator: DestinationMigrationCoordinator + + override fun getRestHandlers( + settings: Settings, + restController: RestController, + clusterSettings: ClusterSettings, + indexScopedSettings: IndexScopedSettings, + settingsFilter: SettingsFilter, + indexNameExpressionResolver: IndexNameExpressionResolver?, + nodesInCluster: Supplier + ): List { + return listOf( + RestGetMonitorAction(), + RestDeleteMonitorAction(), + RestIndexMonitorAction(), + RestIndexWorkflowAction(), + RestSearchMonitorAction(settings, clusterService), + RestExecuteMonitorAction(), + RestExecuteWorkflowAction(), + RestAcknowledgeAlertAction(), + RestAcknowledgeChainedAlertAction(), + RestScheduledJobStatsHandler("_alerting"), + RestSearchEmailAccountAction(), + RestGetEmailAccountAction(), + RestSearchEmailGroupAction(), + RestGetEmailGroupAction(), + RestGetDestinationsAction(), + RestGetAlertsAction(), + RestGetWorkflowAlertsAction(), + RestGetFindingsAction(), + RestGetWorkflowAction(), + RestDeleteWorkflowAction() + ) + } + + override fun getActions(): List> { + return listOf( + ActionPlugin.ActionHandler(ScheduledJobsStatsAction.INSTANCE, ScheduledJobsStatsTransportAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.INDEX_MONITOR_ACTION_TYPE, TransportIndexMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_MONITOR_ACTION_TYPE, TransportGetMonitorAction::class.java), + ActionPlugin.ActionHandler(ExecuteMonitorAction.INSTANCE, TransportExecuteMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, TransportSearchMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.DELETE_MONITOR_ACTION_TYPE, TransportDeleteMonitorAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, TransportAcknowledgeAlertAction::class.java), + ActionPlugin.ActionHandler( + AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, TransportAcknowledgeChainedAlertAction::class.java + ), + ActionPlugin.ActionHandler(GetEmailAccountAction.INSTANCE, TransportGetEmailAccountAction::class.java), + ActionPlugin.ActionHandler(SearchEmailAccountAction.INSTANCE, TransportSearchEmailAccountAction::class.java), + ActionPlugin.ActionHandler(GetEmailGroupAction.INSTANCE, TransportGetEmailGroupAction::class.java), + ActionPlugin.ActionHandler(SearchEmailGroupAction.INSTANCE, TransportSearchEmailGroupAction::class.java), + ActionPlugin.ActionHandler(GetDestinationsAction.INSTANCE, TransportGetDestinationsAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_ALERTS_ACTION_TYPE, TransportGetAlertsAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, TransportGetWorkflowAlertsAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_FINDINGS_ACTION_TYPE, TransportGetFindingsSearchAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, TransportIndexWorkflowAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.GET_WORKFLOW_ACTION_TYPE, TransportGetWorkflowAction::class.java), + ActionPlugin.ActionHandler(AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, TransportDeleteWorkflowAction::class.java), + ActionPlugin.ActionHandler(ExecuteWorkflowAction.INSTANCE, TransportExecuteWorkflowAction::class.java) + ) + } + + override fun getNamedXContent(): List { + return listOf( + Monitor.XCONTENT_REGISTRY, + SearchInput.XCONTENT_REGISTRY, + DocLevelMonitorInput.XCONTENT_REGISTRY, + QueryLevelTrigger.XCONTENT_REGISTRY, + BucketLevelTrigger.XCONTENT_REGISTRY, + ClusterMetricsInput.XCONTENT_REGISTRY, + DocumentLevelTrigger.XCONTENT_REGISTRY, + ChainedAlertTrigger.XCONTENT_REGISTRY, + Workflow.XCONTENT_REGISTRY + ) + } + + override fun createComponents( + client: Client, + clusterService: ClusterService, + threadPool: ThreadPool, + resourceWatcherService: ResourceWatcherService, + scriptService: ScriptService, + xContentRegistry: NamedXContentRegistry, + environment: Environment, + nodeEnvironment: NodeEnvironment, + namedWriteableRegistry: NamedWriteableRegistry, + indexNameExpressionResolver: IndexNameExpressionResolver, + repositoriesServiceSupplier: Supplier + ): Collection { + // Need to figure out how to use the OpenSearch DI classes rather than handwiring things here. + val settings = environment.settings() + alertIndices = AlertIndices(settings, client, threadPool, clusterService) + runner = MonitorRunnerService + .registerClusterService(clusterService) + .registerClient(client) + .registerNamedXContentRegistry(xContentRegistry) + .registerindexNameExpressionResolver(indexNameExpressionResolver) + .registerScriptService(scriptService) + .registerSettings(settings) + .registerThreadPool(threadPool) + .registerAlertIndices(alertIndices) + .registerInputService(InputService(client, scriptService, namedWriteableRegistry, xContentRegistry, clusterService, settings)) + .registerTriggerService(TriggerService(scriptService)) + .registerAlertService(AlertService(client, xContentRegistry, alertIndices)) + .registerDocLevelMonitorQueries(DocLevelMonitorQueries(client, clusterService)) + .registerWorkflowService(WorkflowService(client, xContentRegistry)) + .registerConsumers() + .registerDestinationSettings() + scheduledJobIndices = ScheduledJobIndices(client.admin(), clusterService) + docLevelMonitorQueries = DocLevelMonitorQueries(client, clusterService) + scheduler = JobScheduler(threadPool, runner) + sweeper = JobSweeper(environment.settings(), client, clusterService, threadPool, xContentRegistry, scheduler, ALERTING_JOB_TYPES) + destinationMigrationCoordinator = DestinationMigrationCoordinator(client, clusterService, threadPool, scheduledJobIndices) + this.threadPool = threadPool + this.clusterService = clusterService + + MonitorMetadataService.initialize( + client, + clusterService, + xContentRegistry, + settings + ) + + WorkflowMetadataService.initialize( + client, + clusterService, + xContentRegistry, + settings + ) + + DeleteMonitorService.initialize(client) + + return listOf(sweeper, scheduler, runner, scheduledJobIndices, docLevelMonitorQueries, destinationMigrationCoordinator) + } + + override fun getSettings(): List> { + return listOf( + ScheduledJobSettings.REQUEST_TIMEOUT, + ScheduledJobSettings.SWEEP_BACKOFF_MILLIS, + ScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, + ScheduledJobSettings.SWEEP_PERIOD, + ScheduledJobSettings.SWEEP_PAGE_SIZE, + ScheduledJobSettings.SWEEPER_ENABLED, + LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT, + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, + LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD, + LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, + LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, + AlertingSettings.INPUT_TIMEOUT, + AlertingSettings.INDEX_TIMEOUT, + AlertingSettings.BULK_TIMEOUT, + AlertingSettings.ALERT_BACKOFF_MILLIS, + AlertingSettings.ALERT_BACKOFF_COUNT, + AlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, + AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, + AlertingSettings.ALERT_HISTORY_ENABLED, + AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, + AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, + AlertingSettings.ALERT_HISTORY_MAX_DOCS, + AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, + AlertingSettings.ALERTING_MAX_MONITORS, + AlertingSettings.REQUEST_TIMEOUT, + AlertingSettings.MAX_ACTION_THROTTLE_VALUE, + AlertingSettings.FILTER_BY_BACKEND_ROLES, + AlertingSettings.MAX_ACTIONABLE_ALERT_COUNT, + LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, + LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, + LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, + LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, + LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, + LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, + LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, + DestinationSettings.EMAIL_USERNAME, + DestinationSettings.EMAIL_PASSWORD, + DestinationSettings.ALLOW_LIST, + DestinationSettings.HOST_DENY_LIST, + LegacyOpenDistroDestinationSettings.EMAIL_USERNAME, + LegacyOpenDistroDestinationSettings.EMAIL_PASSWORD, + LegacyOpenDistroDestinationSettings.ALLOW_LIST, + LegacyOpenDistroDestinationSettings.HOST_DENY_LIST, + AlertingSettings.FINDING_HISTORY_ENABLED, + AlertingSettings.FINDING_HISTORY_MAX_DOCS, + AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE, + AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD, + AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD + ) + } + + override fun onIndexModule(indexModule: IndexModule) { + if (indexModule.index.name == ScheduledJob.SCHEDULED_JOBS_INDEX) { + indexModule.addIndexOperationListener(sweeper) + } + } + + override fun getContexts(): List> { + return listOf(TriggerScript.CONTEXT) + } + + override fun reload(settings: Settings) { + runner.reloadDestinationSettings(settings) + } + + override fun getPipelineAggregations(): List { + return listOf( + SearchPlugin.PipelineAggregationSpec( + BucketSelectorExtAggregationBuilder.NAME, + { sin: StreamInput -> BucketSelectorExtAggregationBuilder(sin) }, + { parser: XContentParser, agg_name: String -> + BucketSelectorExtAggregationBuilder.parse(agg_name, parser) + } + ) + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt new file mode 100644 index 000000000..c8adc021c --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt @@ -0,0 +1,503 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.model.ActionRunResult +import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.retry +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.opensearchapi.withClosableContext +import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext +import org.opensearch.alerting.util.defaultToPerExecutionAction +import org.opensearch.alerting.util.getActionExecutionPolicy +import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.alerting.util.getCombinedTriggerRunResult +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.script.ScriptType +import org.opensearch.script.TemplateScript +import org.opensearch.search.aggregations.AggregatorFactories +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import java.time.Instant +import java.util.UUID + +object BucketLevelMonitorRunner : MonitorRunner() { + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runMonitor( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String + ): MonitorRunResult { + val roles = MonitorRunnerService.getRolesForMonitor(monitor) + logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") + + if (periodStart == periodEnd) { + logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") + } + + var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) + val currentAlerts = try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) + if (monitor.dataSources.findingsEnabled == true) { + monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex(monitor.dataSources) + } + monitorCtx.alertService!!.loadCurrentAlertsForBucketLevelMonitor(monitor, workflowRunContext) + } catch (e: Exception) { + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id + logger.error("Error loading alerts for monitor: $id", e) + return monitorResult.copy(error = e) + } + + /* + * Since the aggregation query can consist of multiple pages, each iteration of the do-while loop only has partial results + * from the runBucketLevelTrigger results whereas the currentAlerts has a complete view of existing Alerts. This means that + * it can be confirmed if an Alert is new or de-duped local to the do-while loop if a key appears or doesn't appear in + * the currentAlerts. However, it cannot be guaranteed that an existing Alert is COMPLETED until all pages have been + * iterated over (since a bucket that did not appear in one page of the aggregation results, could appear in a later page). + * + * To solve for this, the currentAlerts will be acting as a list of "potentially completed alerts" throughout the execution. + * When categorizing the Alerts in each iteration, de-duped Alerts will be removed from the currentAlerts map + * (for the Trigger being executed) and the Alerts left in currentAlerts after all pages have been iterated through can + * be marked as COMPLETED since they were never de-duped. + * + * Meanwhile, the nextAlerts map will contain Alerts that will exist at the end of this Monitor execution. It is a compilation + * across Triggers because in the case of executing actions at a PER_EXECUTION frequency, all the Alerts are needed before executing + * Actions which can only be done once all of the aggregation results (and Triggers given the pagination logic) have been evaluated. + */ + val triggerResults = mutableMapOf() + val triggerContexts = mutableMapOf() + val nextAlerts = mutableMapOf>>() + var firstIteration = true + var firstPageOfInputResults = InputRunResults(listOf(), null) + do { + // TODO: Since a composite aggregation is being used for the input query, the total bucket count cannot be determined. + // If a setting is imposed that limits buckets that can be processed for Bucket-Level Monitors, we'd need to iterate over + // the buckets until we hit that threshold. In that case, we'd want to exit the execution without creating any alerts since the + // buckets we iterate over before hitting the limit is not deterministic. Is there a better way to fail faster in this case? + withClosableContext(InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles)) { + // Storing the first page of results in the case of pagination input results to prevent empty results + // in the final output of monitorResult which occurs when all pages have been exhausted. + // If it's favorable to return the last page, will need to check how to accomplish that with multiple aggregation paths + // with different page counts. + val inputResults = monitorCtx.inputService!!.collectInputResults( + monitor, + periodStart, + periodEnd, + monitorResult.inputResults, + workflowRunContext + ) + if (firstIteration) { + firstPageOfInputResults = inputResults + firstIteration = false + } + monitorResult = monitorResult.copy(inputResults = inputResults) + } + + for (trigger in monitor.triggers) { + // The currentAlerts map is formed by iterating over the Monitor's Triggers as keys so null should not be returned here + val currentAlertsForTrigger = currentAlerts[trigger]!! + val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger as BucketLevelTrigger, monitorResult) + triggerContexts[trigger.id] = triggerCtx + val triggerResult = monitorCtx.triggerService!!.runBucketLevelTrigger(monitor, trigger, triggerCtx) + triggerResults[trigger.id] = triggerResult.getCombinedTriggerRunResult(triggerResults[trigger.id]) + + /* + * If an error was encountered when running the trigger, it means that something went wrong when parsing the input results + * for the filtered buckets returned from the pipeline bucket selector injected into the input query. + * + * In this case, the returned aggregation result buckets are empty so the categorization of the Alerts that happens below + * should be skipped/invalidated since comparing the current Alerts to an empty result will lead the execution to believe + * that all Alerts have been COMPLETED. Not doing so would mean it would not be possible to propagate the error into the + * existing Alerts in a way the user can easily view them since they will have all been moved to the history index. + */ + if (triggerResults[trigger.id]?.error != null) continue + val findings = + if (monitor.triggers.size == 1 && monitor.dataSources.findingsEnabled == true) { + logger.debug("Creating bucket level findings") + createFindings( + triggerResult, + monitor, + monitorCtx, + periodStart, + periodEnd, + !dryrun && monitor.id != Monitor.NO_ID, + executionId + ) + } else { + emptyList() + } + // TODO: Should triggerResult's aggregationResultBucket be a list? If not, getCategorizedAlertsForBucketLevelMonitor can + // be refactored to use a map instead + val categorizedAlerts = monitorCtx.alertService!!.getCategorizedAlertsForBucketLevelMonitor( + monitor, + trigger, + currentAlertsForTrigger, + triggerResult.aggregationResultBuckets.values.toList(), + findings, + executionId, + workflowRunContext + ).toMutableMap() + val dedupedAlerts = categorizedAlerts.getOrDefault(AlertCategory.DEDUPED, emptyList()) + var newAlerts = categorizedAlerts.getOrDefault(AlertCategory.NEW, emptyList()) + + /* + * Index de-duped and new Alerts here (if it's not a test Monitor) so they are available at the time the Actions are executed. + * + * The new Alerts have to be returned and saved back with their indexed doc ID to prevent duplicate documents + * when the Alerts are updated again after Action execution. + * + * Note: Index operations can fail for various reasons (such as write blocks on cluster), in such a case, the Actions + * will still execute with the Alert information in the ctx but the Alerts may not be visible. + */ + if (!dryrun && monitor.id != Monitor.NO_ID) { + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, + dedupedAlerts, + monitorCtx.retryPolicy!!, + allowUpdatingAcknowledgedAlert = true, + monitor.id + ) + newAlerts = monitorCtx.alertService!!.saveNewAlerts(monitor.dataSources, newAlerts, monitorCtx.retryPolicy!!) + } + + // Store deduped and new Alerts to accumulate across pages + if (!nextAlerts.containsKey(trigger.id)) { + nextAlerts[trigger.id] = mutableMapOf( + AlertCategory.DEDUPED to mutableListOf(), + AlertCategory.NEW to mutableListOf(), + AlertCategory.COMPLETED to mutableListOf() + ) + } + nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED)?.addAll(dedupedAlerts) + nextAlerts[trigger.id]?.get(AlertCategory.NEW)?.addAll(newAlerts) + } + } while (monitorResult.inputResults.afterKeysPresent()) + + // The completed Alerts are whatever are left in the currentAlerts. + // However, this operation will only be done if there was no trigger error, since otherwise the nextAlerts were not collected + // in favor of just using the currentAlerts as-is. + currentAlerts.forEach { (trigger, keysToAlertsMap) -> + if (triggerResults[trigger.id]?.error == null) { + nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED) + ?.addAll(monitorCtx.alertService!!.convertToCompletedAlerts(keysToAlertsMap)) + } + } + + for (trigger in monitor.triggers) { + val alertsToUpdate = mutableSetOf() + val completedAlertsToUpdate = mutableSetOf() + // Filter ACKNOWLEDGED Alerts from the deduped list so they do not have Actions executed for them. + // New Alerts are ignored since they cannot be acknowledged yet. + val dedupedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED) + ?.filterNot { it.state == Alert.State.ACKNOWLEDGED }?.toMutableList() + ?: mutableListOf() + // Update nextAlerts so the filtered DEDUPED Alerts are reflected for PER_ALERT Action execution + nextAlerts[trigger.id]?.set(AlertCategory.DEDUPED, dedupedAlerts) + val newAlerts = nextAlerts[trigger.id]?.get(AlertCategory.NEW) ?: mutableListOf() + val completedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED) ?: mutableListOf() + + // Adding all the COMPLETED Alerts to a separate set and removing them if they get added + // to alertsToUpdate to ensure the Alert doc is updated at the end in either case + completedAlertsToUpdate.addAll(completedAlerts) + + // All trigger contexts and results should be available at this point since all triggers were evaluated in the main do-while loop + val triggerCtx = triggerContexts[trigger.id]!! + val triggerResult = triggerResults[trigger.id]!! + val monitorOrTriggerError = monitorResult.error ?: triggerResult.error + val shouldDefaultToPerExecution = defaultToPerExecutionAction( + monitorCtx.maxActionableAlertCount, + monitorId = monitor.id, + triggerId = trigger.id, + totalActionableAlertCount = dedupedAlerts.size + newAlerts.size + completedAlerts.size, + monitorOrTriggerError = monitorOrTriggerError + ) + for (action in trigger.actions) { + // ActionExecutionPolicy should not be null for Bucket-Level Monitors since it has a default config when not set explicitly + val actionExecutionScope = action.getActionExecutionPolicy(monitor)!!.actionExecutionScope + if (actionExecutionScope is PerAlertActionScope && !shouldDefaultToPerExecution) { + for (alertCategory in actionExecutionScope.actionableAlerts) { + val alertsToExecuteActionsFor = nextAlerts[trigger.id]?.get(alertCategory) ?: mutableListOf() + for (alert in alertsToExecuteActionsFor) { + val actionCtx = getActionContextForAlertCategory( + alertCategory, + alert, + triggerCtx, + monitorOrTriggerError + ) + // AggregationResultBucket should not be null here + val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() + if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { + triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() + } + + // Keeping the throttled response separate from runAction for now since + // throttling is not supported for PER_EXECUTION + val actionResult = if (MonitorRunnerService.isActionActionable(action, alert)) { + this.runAction(action, actionCtx, monitorCtx, monitor, dryrun) + } else { + ActionRunResult(action.id, action.name, mapOf(), true, null, null) + } + + triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) + alertsToUpdate.add(alert) + // Remove the alert from completedAlertsToUpdate in case it is present there since + // its update will be handled in the alertsToUpdate batch + completedAlertsToUpdate.remove(alert) + } + } + } else if (actionExecutionScope is PerExecutionActionScope || shouldDefaultToPerExecution) { + // If all categories of Alerts are empty, there is nothing to message on and we can skip the Action. + // If the error is not null, this is disregarded and the Action is executed anyway so the user can be notified. + if (monitorOrTriggerError == null && dedupedAlerts.isEmpty() && newAlerts.isEmpty() && completedAlerts.isEmpty()) { + continue + } + + val actionCtx = triggerCtx.copy( + dedupedAlerts = dedupedAlerts, + newAlerts = newAlerts, + completedAlerts = completedAlerts, + error = monitorResult.error ?: triggerResult.error + ) + val actionResult = this.runAction(action, actionCtx, monitorCtx, monitor, dryrun) + // If there was an error during trigger execution then the Alerts to be updated are the current Alerts since the state + // was not changed. Otherwise, the Alerts to be updated are the sum of the deduped, new and completed Alerts. + val alertsToIterate = if (monitorOrTriggerError == null) { + (dedupedAlerts + newAlerts + completedAlerts) + } else currentAlerts[trigger]?.map { it.value } ?: listOf() + // Save the Action run result for every Alert + for (alert in alertsToIterate) { + val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() + if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { + triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() + } + triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) + alertsToUpdate.add(alert) + // Remove the alert from completedAlertsToUpdate in case it is present there since + // its update will be handled in the alertsToUpdate batch + completedAlertsToUpdate.remove(alert) + } + } + } + + // Alerts are only added to alertsToUpdate after Action execution meaning the action results for it should be present + // in the actionResultsMap but returning a default value when accessing the map to be safe. + val updatedAlerts = alertsToUpdate.map { alert -> + val bucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() + val actionResults = triggerResult.actionResultsMap.getOrDefault(bucketKeysHash, emptyMap()) + monitorCtx.alertService!!.updateActionResultsForBucketLevelAlert( + alert.copy(lastNotificationTime = MonitorRunnerService.currentTime()), + actionResults, + // TODO: Update BucketLevelTriggerRunResult.alertError() to retrieve error based on the first failed Action + monitorResult.alertError() ?: triggerResult.alertError() + ) + } + + // Update Alerts with action execution results (if it's not a test Monitor). + // ACKNOWLEDGED Alerts should not be saved here since actions are not executed for them. + if (!dryrun && monitor.id != Monitor.NO_ID) { + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, updatedAlerts, monitorCtx.retryPolicy!!, allowUpdatingAcknowledgedAlert = false, + routingId = monitor.id + ) + // Save any COMPLETED Alerts that were not covered in updatedAlerts + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, + completedAlertsToUpdate.toList(), + monitorCtx.retryPolicy!!, + allowUpdatingAcknowledgedAlert = false, + monitor.id + ) + } + } + + return monitorResult.copy(inputResults = firstPageOfInputResults, triggerResults = triggerResults) + } + + private suspend fun createFindings( + triggerResult: BucketLevelTriggerRunResult, + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + shouldCreateFinding: Boolean, + executionId: String, + ): List { + monitor.inputs.forEach { input -> + if (input is SearchInput) { + val bucketValues: Set = triggerResult.aggregationResultBuckets.keys + val query = input.query + var fieldName = "" + + for (aggFactory in (query.aggregations() as AggregatorFactories.Builder).aggregatorFactories) { + when (aggFactory) { + is CompositeAggregationBuilder -> { + var groupByFields = 0 // if number of fields used to group by > 1 we won't calculate findings + val sources = aggFactory.sources() + for (source in sources) { + if (groupByFields > 0) { + logger.error("grouByFields > 0. not generating findings for bucket level monitor ${monitor.id}") + return listOf() + } + groupByFields++ + fieldName = source.field() + } + } + is TermsAggregationBuilder -> { + fieldName = aggFactory.field() + } + else -> { + logger.error( + "Bucket level monitor findings supported only for composite and term aggs. Found [{${aggFactory.type}}]" + ) + return listOf() + } + } + } + if (fieldName != "") { + val searchParams = mapOf( + "period_start" to periodStart.toEpochMilli(), + "period_end" to periodEnd.toEpochMilli() + ) + val searchSource = monitorCtx.scriptService!!.compile( + Script( + ScriptType.INLINE, + Script.DEFAULT_TEMPLATE_LANG, + query.toString(), + searchParams + ), + TemplateScript.CONTEXT + ) + .newInstance(searchParams) + .execute() + val sr = SearchRequest(*input.indices.toTypedArray()) + XContentType.JSON.xContent().createParser(monitorCtx.xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource) + .use { + val source = SearchSourceBuilder.fromXContent(it) + val queryBuilder = if (input.query.query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(source.query()) + queryBuilder.filter(QueryBuilders.termsQuery(fieldName, bucketValues)) + sr.source().query(queryBuilder) + } + val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(sr, it) } + return createFindingPerIndex(searchResponse, monitor, monitorCtx, shouldCreateFinding, executionId) + } else { + logger.error("Couldn't resolve groupBy field. Not generating bucket level monitor findings for monitor %${monitor.id}") + } + } + } + return listOf() + } + + private suspend fun createFindingPerIndex( + searchResponse: SearchResponse, + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + shouldCreateFinding: Boolean, + workflowExecutionId: String? = null + ): List { + val docIdsByIndexName: MutableMap> = mutableMapOf() + for (hit in searchResponse.hits.hits) { + val ids = docIdsByIndexName.getOrDefault(hit.index, mutableListOf()) + ids.add(hit.id) + docIdsByIndexName[hit.index] = ids + } + val findings = mutableListOf() + var requestsToRetry: MutableList = mutableListOf() + docIdsByIndexName.entries.forEach { it -> + run { + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = it.value, + monitorId = monitor.id, + monitorName = monitor.name, + index = it.key, + timestamp = Instant.now(), + docLevelQueries = listOf(), + executionId = workflowExecutionId + ) + + val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + logger.debug("Bucket level monitor ${monitor.id} Findings: $findingStr") + if (shouldCreateFinding) { + logger.debug("Saving bucket level monitor findings for monitor ${monitor.id}") + val indexRequest = IndexRequest(monitor.dataSources.findingsIndex) + .source(findingStr, XContentType.JSON) + .id(finding.id) + .routing(finding.id) + requestsToRetry.add(indexRequest) + } + findings.add(finding.id) + } + } + if (requestsToRetry.isEmpty()) return listOf() + monitorCtx.retryPolicy!!.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { + val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + val bulkResponse: BulkResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.bulk(bulkRequest, it) } + requestsToRetry = mutableListOf() + val findingsBeingRetried = mutableListOf() + bulkResponse.items.forEach { item -> + if (item.isFailed) { + if (item.status() == RestStatus.TOO_MANY_REQUESTS) { + requestsToRetry.add(bulkRequest.requests()[item.itemId] as IndexRequest) + findingsBeingRetried.add(findingsBeingRetried[item.itemId]) + } + } + } + } + return findings + } + + private fun getActionContextForAlertCategory( + alertCategory: AlertCategory, + alert: Alert, + ctx: BucketLevelTriggerExecutionContext, + error: Exception? + ): BucketLevelTriggerExecutionContext { + return when (alertCategory) { + AlertCategory.DEDUPED -> + ctx.copy(dedupedAlerts = listOf(alert), newAlerts = emptyList(), completedAlerts = emptyList(), error = error) + AlertCategory.NEW -> + ctx.copy(dedupedAlerts = emptyList(), newAlerts = listOf(alert), completedAlerts = emptyList(), error = error) + AlertCategory.COMPLETED -> + ctx.copy(dedupedAlerts = emptyList(), newAlerts = emptyList(), completedAlerts = listOf(alert), error = error) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt new file mode 100644 index 000000000..1eb826622 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt @@ -0,0 +1,804 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.search.SearchAction +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.model.DocumentExecutionContext +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.userErrorMessage +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.alerting.util.defaultToPerExecutionAction +import org.opensearch.alerting.util.getActionExecutionPolicy +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.client.Client +import org.opensearch.client.node.NodeClient +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.routing.Preference +import org.opensearch.cluster.routing.ShardRouting +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.AlertingPluginInterface +import org.opensearch.commons.alerting.action.PublishFindingsRequest +import org.opensearch.commons.alerting.action.SubscribeFindingsResponse +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.IndexNotFoundException +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.Operator +import org.opensearch.index.query.QueryBuilders +import org.opensearch.percolator.PercolateQueryBuilderExt +import org.opensearch.search.SearchHits +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortOrder +import java.io.IOException +import java.time.Instant +import java.util.UUID +import kotlin.math.max + +object DocumentLevelMonitorRunner : MonitorRunner() { + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runMonitor( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String + ): MonitorRunResult { + logger.debug("Document-level-monitor is running ...") + val isTempMonitor = dryrun || monitor.id == Monitor.NO_ID + var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) + + try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex(monitor.dataSources) + } catch (e: Exception) { + val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id + logger.error("Error setting up alerts and findings indices for monitor: $id", e) + monitorResult = monitorResult.copy(error = AlertingException.wrap(e)) + } + + try { + validate(monitor) + } catch (e: Exception) { + logger.error("Failed to start Document-level-monitor. Error: ${e.message}") + monitorResult = monitorResult.copy(error = AlertingException.wrap(e)) + } + + var (monitorMetadata, _) = MonitorMetadataService.getOrCreateMetadata( + monitor = monitor, + createWithRunContext = false, + skipIndex = isTempMonitor, + workflowRunContext?.workflowMetadataId + ) + + val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput + + val queries: List = docLevelMonitorInput.queries + + val lastRunContext = if (monitorMetadata.lastRunContext.isNullOrEmpty()) mutableMapOf() + else monitorMetadata.lastRunContext.toMutableMap() as MutableMap> + + val updatedLastRunContext = lastRunContext.toMutableMap() + + val queryToDocIds = mutableMapOf>() + val inputRunResults = mutableMapOf>() + val docsToQueries = mutableMapOf>() + + try { + // Resolve all passed indices to concrete indices + val allConcreteIndices = IndexUtils.resolveAllIndices( + docLevelMonitorInput.indices, + monitorCtx.clusterService!!, + monitorCtx.indexNameExpressionResolver!! + ) + if (allConcreteIndices.isEmpty()) { + logger.error("indices not found-${docLevelMonitorInput.indices.joinToString(",")}") + throw IndexNotFoundException(docLevelMonitorInput.indices.joinToString(",")) + } + + monitorCtx.docLevelMonitorQueries!!.initDocLevelQueryIndex(monitor.dataSources) + monitorCtx.docLevelMonitorQueries!!.indexDocLevelQueries( + monitor = monitor, + monitorId = monitor.id, + monitorMetadata, + indexTimeout = monitorCtx.indexTimeout!! + ) + + // cleanup old indices that are not monitored anymore from the same monitor + val runContextKeys = updatedLastRunContext.keys.toMutableSet() + for (ind in runContextKeys) { + if (!allConcreteIndices.contains(ind)) { + updatedLastRunContext.remove(ind) + } + } + + // Map of document ids per index when monitor is workflow delegate and has chained findings + val matchingDocIdsPerIndex = workflowRunContext?.matchingDocIdsPerIndex + + docLevelMonitorInput.indices.forEach { indexName -> + var concreteIndices = IndexUtils.resolveAllIndices( + listOf(indexName), + monitorCtx.clusterService!!, + monitorCtx.indexNameExpressionResolver!! + ) + var lastWriteIndex: String? = null + if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || + IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) + ) { + lastWriteIndex = concreteIndices.find { lastRunContext.containsKey(it) } + if (lastWriteIndex != null) { + val lastWriteIndexCreationDate = + IndexUtils.getCreationDateForIndex(lastWriteIndex, monitorCtx.clusterService!!.state()) + concreteIndices = IndexUtils.getNewestIndicesByCreationDate( + concreteIndices, + monitorCtx.clusterService!!.state(), + lastWriteIndexCreationDate + ) + } + } + val updatedIndexName = indexName.replace("*", "_") + val conflictingFields = monitorCtx.docLevelMonitorQueries!!.getAllConflictingFields( + monitorCtx.clusterService!!.state(), + concreteIndices + ) + + concreteIndices.forEach { concreteIndexName -> + // Prepare lastRunContext for each index + val indexLastRunContext = lastRunContext.getOrPut(concreteIndexName) { + val isIndexCreatedRecently = createdRecently( + monitor, + periodStart, + periodEnd, + monitorCtx.clusterService!!.state().metadata.index(concreteIndexName) + ) + MonitorMetadataService.createRunContextForIndex(concreteIndexName, isIndexCreatedRecently) + } + + // Prepare updatedLastRunContext for each index + val indexUpdatedRunContext = updateLastRunContext( + indexLastRunContext.toMutableMap(), + monitorCtx, + concreteIndexName + ) as MutableMap + if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || + IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) + ) { + if (concreteIndexName == IndexUtils.getWriteIndex(indexName, monitorCtx.clusterService!!.state())) { + updatedLastRunContext.remove(lastWriteIndex) + updatedLastRunContext[concreteIndexName] = indexUpdatedRunContext + } + } else { + updatedLastRunContext[concreteIndexName] = indexUpdatedRunContext + } + + val count: Int = indexLastRunContext["shards_count"] as Int + for (i: Int in 0 until count) { + val shard = i.toString() + + // update lastRunContext if its a temp monitor as we only want to view the last bit of data then + // TODO: If dryrun, we should make it so we limit the search as this could still potentially give us lots of data + if (isTempMonitor) { + indexLastRunContext[shard] = max(-1, (indexUpdatedRunContext[shard] as String).toInt() - 10) + } + } + + // Prepare DocumentExecutionContext for each index + val docExecutionContext = DocumentExecutionContext(queries, indexLastRunContext, indexUpdatedRunContext) + + val matchingDocs = getMatchingDocs( + monitor, + monitorCtx, + docExecutionContext, + updatedIndexName, + concreteIndexName, + conflictingFields.toList(), + matchingDocIdsPerIndex?.get(concreteIndexName) + ) + + if (matchingDocs.isNotEmpty()) { + val matchedQueriesForDocs = getMatchedQueries( + monitorCtx, + matchingDocs.map { it.second }, + monitor, + monitorMetadata, + updatedIndexName, + concreteIndexName + ) + + matchedQueriesForDocs.forEach { hit -> + val id = hit.id + .replace("_${updatedIndexName}_${monitor.id}", "") + .replace("_${concreteIndexName}_${monitor.id}", "") + + val docIndices = hit.field("_percolator_document_slot").values.map { it.toString().toInt() } + docIndices.forEach { idx -> + val docIndex = "${matchingDocs[idx].first}|$concreteIndexName" + inputRunResults.getOrPut(id) { mutableSetOf() }.add(docIndex) + docsToQueries.getOrPut(docIndex) { mutableListOf() }.add(id) + } + } + } + } + } + monitorResult = monitorResult.copy(inputResults = InputRunResults(listOf(inputRunResults))) + + /* + populate the map queryToDocIds with pairs of + this fixes the issue of passing id, name, tags fields of DocLevelQuery object correctly to TriggerExpressionParser + */ + queries.forEach { + if (inputRunResults.containsKey(it.id)) { + queryToDocIds[it] = inputRunResults[it.id]!! + } + } + + val idQueryMap: Map = queries.associateBy { it.id } + + val triggerResults = mutableMapOf() + // If there are no triggers defined, we still want to generate findings + if (monitor.triggers.isEmpty()) { + if (dryrun == false && monitor.id != Monitor.NO_ID) { + docsToQueries.forEach { + val triggeredQueries = it.value.map { queryId -> idQueryMap[queryId]!! } + createFindings(monitor, monitorCtx, triggeredQueries, it.key, true) + } + } + } else { + monitor.triggers.forEach { + triggerResults[it.id] = runForEachDocTrigger( + monitorCtx, + monitorResult, + it as DocumentLevelTrigger, + monitor, + idQueryMap, + docsToQueries, + queryToDocIds, + dryrun, + executionId = executionId, + workflowRunContext = workflowRunContext + ) + } + } + // Don't update monitor if this is a test monitor + if (!isTempMonitor) { + // If any error happened during trigger execution, upsert monitor error alert + val errorMessage = constructErrorMessageFromTriggerResults(triggerResults = triggerResults) + if (errorMessage.isNotEmpty()) { + monitorCtx.alertService!!.upsertMonitorErrorAlert( + monitor = monitor, + errorMessage = errorMessage, + executionId = executionId, + workflowRunContext + ) + } else { + onSuccessfulMonitorRun(monitorCtx, monitor) + } + + MonitorMetadataService.upsertMetadata( + monitorMetadata.copy(lastRunContext = updatedLastRunContext), + true + ) + } + + // TODO: Update the Document as part of the Trigger and return back the trigger action result + return monitorResult.copy(triggerResults = triggerResults) + } catch (e: Exception) { + val errorMessage = ExceptionsHelper.detailedMessage(e) + monitorCtx.alertService!!.upsertMonitorErrorAlert(monitor, errorMessage, executionId, workflowRunContext) + logger.error("Failed running Document-level-monitor ${monitor.name}", e) + val alertingException = AlertingException( + errorMessage, + RestStatus.INTERNAL_SERVER_ERROR, + e + ) + return monitorResult.copy(error = alertingException, inputResults = InputRunResults(emptyList(), alertingException)) + } + } + + private suspend fun onSuccessfulMonitorRun(monitorCtx: MonitorRunnerExecutionContext, monitor: Monitor) { + monitorCtx.alertService!!.clearMonitorErrorAlert(monitor) + if (monitor.dataSources.alertsHistoryIndex != null) { + monitorCtx.alertService!!.moveClearedErrorAlertsToHistory( + monitor.id, + monitor.dataSources.alertsIndex, + monitor.dataSources.alertsHistoryIndex!! + ) + } + } + + private fun constructErrorMessageFromTriggerResults( + triggerResults: MutableMap? = null + ): String { + var errorMessage = "" + if (triggerResults != null) { + val triggersErrorBuilder = StringBuilder() + triggerResults.forEach { + if (it.value.error != null) { + triggersErrorBuilder.append("[${it.key}]: [${it.value.error!!.userErrorMessage()}]").append(" | ") + } + } + if (triggersErrorBuilder.isNotEmpty()) { + errorMessage = "Trigger errors: $triggersErrorBuilder" + } + } + return errorMessage + } + + private suspend fun runForEachDocTrigger( + monitorCtx: MonitorRunnerExecutionContext, + monitorResult: MonitorRunResult, + trigger: DocumentLevelTrigger, + monitor: Monitor, + idQueryMap: Map, + docsToQueries: Map>, + queryToDocIds: Map>, + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String + ): DocumentLevelTriggerRunResult { + val triggerCtx = DocumentLevelTriggerExecutionContext(monitor, trigger) + val triggerResult = monitorCtx.triggerService!!.runDocLevelTrigger(monitor, trigger, queryToDocIds) + + val findings = mutableListOf() + val findingDocPairs = mutableListOf>() + + // TODO: Implement throttling for findings + docsToQueries.forEach { + val triggeredQueries = it.value.map { queryId -> idQueryMap[queryId]!! } + val findingId = createFindings( + monitor, + monitorCtx, + triggeredQueries, + it.key, + !dryrun && monitor.id != Monitor.NO_ID, + executionId + ) + findings.add(findingId) + + if (triggerResult.triggeredDocs.contains(it.key)) { + findingDocPairs.add(Pair(findingId, it.key)) + } + } + + val actionCtx = triggerCtx.copy( + triggeredDocs = triggerResult.triggeredDocs, + relatedFindings = findings, + error = monitorResult.error ?: triggerResult.error + ) + + val alerts = mutableListOf() + findingDocPairs.forEach { + val alert = monitorCtx.alertService!!.composeDocLevelAlert( + listOf(it.first), + listOf(it.second), + triggerCtx, + monitorResult.alertError() ?: triggerResult.alertError(), + executionId = executionId, + workflorwRunContext = workflowRunContext + ) + alerts.add(alert) + } + + val shouldDefaultToPerExecution = defaultToPerExecutionAction( + monitorCtx.maxActionableAlertCount, + monitorId = monitor.id, + triggerId = trigger.id, + totalActionableAlertCount = alerts.size, + monitorOrTriggerError = actionCtx.error + ) + + for (action in trigger.actions) { + val actionExecutionScope = action.getActionExecutionPolicy(monitor)!!.actionExecutionScope + if (actionExecutionScope is PerAlertActionScope && !shouldDefaultToPerExecution) { + for (alert in alerts) { + val actionResults = this.runAction(action, actionCtx.copy(alerts = listOf(alert)), monitorCtx, monitor, dryrun) + triggerResult.actionResultsMap.getOrPut(alert.id) { mutableMapOf() } + triggerResult.actionResultsMap[alert.id]?.set(action.id, actionResults) + } + } else if (alerts.isNotEmpty()) { + val actionResults = this.runAction(action, actionCtx.copy(alerts = alerts), monitorCtx, monitor, dryrun) + for (alert in alerts) { + triggerResult.actionResultsMap.getOrPut(alert.id) { mutableMapOf() } + triggerResult.actionResultsMap[alert.id]?.set(action.id, actionResults) + } + } + } + + // Alerts are saved after the actions since if there are failures in the actions, they can be stated in the alert + if (!dryrun && monitor.id != Monitor.NO_ID) { + val updatedAlerts = alerts.map { alert -> + val actionResults = triggerResult.actionResultsMap.getOrDefault(alert.id, emptyMap()) + val actionExecutionResults = actionResults.values.map { actionRunResult -> + ActionExecutionResult(actionRunResult.actionId, actionRunResult.executionTime, if (actionRunResult.throttled) 1 else 0) + } + alert.copy(actionExecutionResults = actionExecutionResults) + } + + monitorCtx.retryPolicy?.let { + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, + updatedAlerts, + it, + routingId = monitor.id + ) + } + } + return triggerResult + } + + private suspend fun createFindings( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + docLevelQueries: List, + matchingDocId: String, + shouldCreateFinding: Boolean, + workflowExecutionId: String? = null, + ): String { + // Before the "|" is the doc id and after the "|" is the index + val docIndex = matchingDocId.split("|") + + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = listOf(docIndex[0]), + correlatedDocIds = listOf(docIndex[0]), + monitorId = monitor.id, + monitorName = monitor.name, + index = docIndex[1], + docLevelQueries = docLevelQueries, + timestamp = Instant.now(), + executionId = workflowExecutionId + ) + + val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + logger.debug("Findings: $findingStr") + + if (shouldCreateFinding) { + val indexRequest = IndexRequest(monitor.dataSources.findingsIndex) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(findingStr, XContentType.JSON) + .id(finding.id) + .routing(finding.id) + + monitorCtx.client!!.suspendUntil { + monitorCtx.client!!.index(indexRequest, it) + } + } + + try { + publishFinding(monitor, monitorCtx, finding) + } catch (e: Exception) { + // suppress exception + logger.error("Optional finding callback failed", e) + } + return finding.id + } + + private fun publishFinding( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + finding: Finding + ) { + val publishFindingsRequest = PublishFindingsRequest(monitor.id, finding) + AlertingPluginInterface.publishFinding( + monitorCtx.client!! as NodeClient, + publishFindingsRequest, + object : ActionListener { + override fun onResponse(response: SubscribeFindingsResponse) {} + + override fun onFailure(e: Exception) {} + } + ) + } + + private suspend fun updateLastRunContext( + lastRunContext: Map, + monitorCtx: MonitorRunnerExecutionContext, + index: String + ): Map { + val count: Int = getShardsCount(monitorCtx.clusterService!!, index) + val updatedLastRunContext = lastRunContext.toMutableMap() + for (i: Int in 0 until count) { + val shard = i.toString() + val maxSeqNo: Long = getMaxSeqNo(monitorCtx.client!!, index, shard) + updatedLastRunContext[shard] = maxSeqNo.toString() + } + return updatedLastRunContext + } + + private fun validate(monitor: Monitor) { + if (monitor.inputs.size > 1) { + throw IOException("Only one input is supported with document-level-monitor.") + } + + if (monitor.inputs[0].name() != DocLevelMonitorInput.DOC_LEVEL_INPUT_FIELD) { + throw IOException("Invalid input with document-level-monitor.") + } + + if ((monitor.inputs[0] as DocLevelMonitorInput).indices.isEmpty()) { + throw IllegalArgumentException("DocLevelMonitorInput has no indices") + } + } + + // Checks if the index was created from the last execution run or when the monitor was last updated to ensure that + // new index is monitored from the beginning of that index + private fun createdRecently( + monitor: Monitor, + periodStart: Instant, + periodEnd: Instant, + indexMetadata: IndexMetadata + ): Boolean { + val lastExecutionTime = if (periodStart == periodEnd) monitor.lastUpdateTime else periodStart + val indexCreationDate = indexMetadata.settings.get("index.creation_date")?.toLong() ?: 0L + return indexCreationDate > lastExecutionTime.toEpochMilli() + } + + /** + * Get the current max seq number of the shard. We find it by searching the last document + * in the primary shard. + */ + private suspend fun getMaxSeqNo(client: Client, index: String, shard: String): Long { + val request: SearchRequest = SearchRequest() + .indices(index) + .preference("_shards:$shard") + .source( + SearchSourceBuilder() + .version(true) + .sort("_seq_no", SortOrder.DESC) + .seqNoAndPrimaryTerm(true) + .query(QueryBuilders.matchAllQuery()) + .size(1) + ) + val response: SearchResponse = client.suspendUntil { client.search(request, it) } + if (response.status() !== RestStatus.OK) { + throw IOException("Failed to get max seq no for shard: $shard") + } + if (response.hits.hits.isEmpty()) { + return -1L + } + + return response.hits.hits[0].seqNo + } + + private fun getShardsCount(clusterService: ClusterService, index: String): Int { + val allShards: List = clusterService!!.state().routingTable().allShards(index) + return allShards.filter { it.primary() }.size + } + + private suspend fun getMatchingDocs( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + docExecutionCtx: DocumentExecutionContext, + index: String, + concreteIndex: String, + conflictingFields: List, + docIds: List? = null + ): List> { + val count: Int = docExecutionCtx.updatedLastRunContext["shards_count"] as Int + val matchingDocs = mutableListOf>() + for (i: Int in 0 until count) { + val shard = i.toString() + try { + val maxSeqNo: Long = docExecutionCtx.updatedLastRunContext[shard].toString().toLong() + val prevSeqNo = docExecutionCtx.lastRunContext[shard].toString().toLongOrNull() + + val hits: SearchHits = searchShard( + monitorCtx, + concreteIndex, + shard, + prevSeqNo, + maxSeqNo, + null, + docIds + ) + + if (hits.hits.isNotEmpty()) { + matchingDocs.addAll(getAllDocs(hits, index, concreteIndex, monitor.id, conflictingFields)) + } + } catch (e: Exception) { + logger.warn("Failed to run for shard $shard. Error: ${e.message}") + } + } + return matchingDocs + } + + private suspend fun searchShard( + monitorCtx: MonitorRunnerExecutionContext, + index: String, + shard: String, + prevSeqNo: Long?, + maxSeqNo: Long, + query: String?, + docIds: List? = null + ): SearchHits { + if (prevSeqNo?.equals(maxSeqNo) == true && maxSeqNo != 0L) { + return SearchHits.empty() + } + val boolQueryBuilder = BoolQueryBuilder() + boolQueryBuilder.filter(QueryBuilders.rangeQuery("_seq_no").gt(prevSeqNo).lte(maxSeqNo)) + + if (query != null) { + boolQueryBuilder.must(QueryBuilders.queryStringQuery(query)) + } + + if (!docIds.isNullOrEmpty()) { + boolQueryBuilder.filter(QueryBuilders.termsQuery("_id", docIds)) + } + + val request: SearchRequest = SearchRequest() + .indices(index) + .preference("_shards:$shard") + .source( + SearchSourceBuilder() + .version(true) + .query(boolQueryBuilder) + .size(10000) // fixme: make this configurable. + ) + .preference(Preference.PRIMARY_FIRST.type()) + val response: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(request, it) } + if (response.status() !== RestStatus.OK) { + throw IOException("Failed to search shard: $shard") + } + return response.hits + } + + private suspend fun getMatchedQueries( + monitorCtx: MonitorRunnerExecutionContext, + docs: List, + monitor: Monitor, + monitorMetadata: MonitorMetadata, + index: String, + concreteIndex: String + ): SearchHits { + val boolQueryBuilder = BoolQueryBuilder().must(QueryBuilders.matchQuery("index", index).operator(Operator.AND)) + + val percolateQueryBuilder = PercolateQueryBuilderExt("query", docs, XContentType.JSON) + if (monitor.id.isNotEmpty()) { + boolQueryBuilder.must(QueryBuilders.matchQuery("monitor_id", monitor.id).operator(Operator.AND)) + } + boolQueryBuilder.filter(percolateQueryBuilder) + + val queryIndex = monitorMetadata.sourceToQueryIndexMapping[index + monitor.id] + if (queryIndex == null) { + val message = "Failed to resolve concrete queryIndex from sourceIndex during monitor execution!" + + " sourceIndex:$concreteIndex queryIndex:${monitor.dataSources.queryIndex}" + logger.error(message) + throw AlertingException.wrap( + OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) + ) + } + val searchRequest = SearchRequest(queryIndex).preference(Preference.PRIMARY_FIRST.type()) + val searchSourceBuilder = SearchSourceBuilder() + searchSourceBuilder.query(boolQueryBuilder) + searchRequest.source(searchSourceBuilder) + + var response: SearchResponse + try { + response = monitorCtx.client!!.suspendUntil { + monitorCtx.client!!.execute(SearchAction.INSTANCE, searchRequest, it) + } + } catch (e: Exception) { + throw IllegalStateException( + "Failed to run percolate search for sourceIndex [$index] and queryIndex [$queryIndex] for ${docs.size} document(s)", e + ) + } + + if (response.status() !== RestStatus.OK) { + throw IOException("Failed to search percolate index: $queryIndex") + } + return response.hits + } + + private fun getAllDocs( + hits: SearchHits, + index: String, + concreteIndex: String, + monitorId: String, + conflictingFields: List + ): List> { + return hits.map { hit -> + val sourceMap = hit.sourceAsMap + + transformDocumentFieldNames( + sourceMap, + conflictingFields, + "_${index}_$monitorId", + "_${concreteIndex}_$monitorId", + "" + ) + + var xContentBuilder = XContentFactory.jsonBuilder().map(sourceMap) + + val sourceRef = BytesReference.bytes(xContentBuilder) + + logger.debug("Document [${hit.id}] payload after transform: ", sourceRef.utf8ToString()) + + Pair(hit.id, sourceRef) + } + } + + /** + * Traverses document fields in leaves recursively and appends [fieldNameSuffixIndex] to field names with same names + * but different mappings & [fieldNameSuffixPattern] to field names which have unique names. + * + * Example for index name is my_log_index and Monitor ID is TReewWdsf2gdJFV: + * { { + * "a": { "a": { + * "b": 1234 ----> "b_my_log_index_TReewWdsf2gdJFV": 1234 + * } } + * } + * + * @param jsonAsMap Input JSON (as Map) + * @param fieldNameSuffix Field suffix which is appended to existing field name + */ + private fun transformDocumentFieldNames( + jsonAsMap: MutableMap, + conflictingFields: List, + fieldNameSuffixPattern: String, + fieldNameSuffixIndex: String, + fieldNamePrefix: String + ) { + val tempMap = mutableMapOf() + val it: MutableIterator> = jsonAsMap.entries.iterator() + while (it.hasNext()) { + val entry = it.next() + if (entry.value is Map<*, *>) { + transformDocumentFieldNames( + entry.value as MutableMap, + conflictingFields, + fieldNameSuffixPattern, + fieldNameSuffixIndex, + if (fieldNamePrefix == "") entry.key else "$fieldNamePrefix.${entry.key}" + ) + } else if (!entry.key.endsWith(fieldNameSuffixPattern) && !entry.key.endsWith(fieldNameSuffixIndex)) { + var alreadyReplaced = false + conflictingFields.forEach { conflictingField -> + if (conflictingField == "$fieldNamePrefix.${entry.key}" || (fieldNamePrefix == "" && conflictingField == entry.key)) { + tempMap["${entry.key}$fieldNameSuffixIndex"] = entry.value + it.remove() + alreadyReplaced = true + } + } + if (!alreadyReplaced) { + tempMap["${entry.key}$fieldNameSuffixPattern"] = entry.value + it.remove() + } + } + } + jsonAsMap.putAll(tempMap) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/InputService.kt b/alerting/bin/main/org/opensearch/alerting/InputService.kt new file mode 100644 index 000000000..b31e21d5f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/InputService.kt @@ -0,0 +1,226 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.TriggerAfterKey +import org.opensearch.alerting.opensearchapi.convertToMap +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.AggregationQueryRewriter +import org.opensearch.alerting.util.addUserBackendRolesFilter +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.executeTransportAction +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.toMap +import org.opensearch.alerting.util.getRoleFilterEnabled +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.client.Client +import org.opensearch.cluster.routing.Preference +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.query.TermsQueryBuilder +import org.opensearch.script.Script +import org.opensearch.script.ScriptService +import org.opensearch.script.ScriptType +import org.opensearch.script.TemplateScript +import org.opensearch.search.builder.SearchSourceBuilder +import java.time.Instant + +/** Service that handles the collection of input results for Monitor executions */ +class InputService( + val client: Client, + val scriptService: ScriptService, + val namedWriteableRegistry: NamedWriteableRegistry, + val xContentRegistry: NamedXContentRegistry, + val clusterService: ClusterService, + val settings: Settings +) { + + private val logger = LogManager.getLogger(InputService::class.java) + + suspend fun collectInputResults( + monitor: Monitor, + periodStart: Instant, + periodEnd: Instant, + prevResult: InputRunResults? = null, + workflowRunContext: WorkflowRunContext? = null + ): InputRunResults { + return try { + val results = mutableListOf>() + val aggTriggerAfterKey: MutableMap = mutableMapOf() + + // If monitor execution is triggered from a workflow + val matchingDocIdsPerIndex = workflowRunContext?.matchingDocIdsPerIndex + + // TODO: If/when multiple input queries are supported for Bucket-Level Monitor execution, aggTriggerAfterKeys will + // need to be updated to account for it + monitor.inputs.forEach { input -> + when (input) { + is SearchInput -> { + // TODO: Figure out a way to use SearchTemplateRequest without bringing in the entire TransportClient + val searchParams = mapOf( + "period_start" to periodStart.toEpochMilli(), + "period_end" to periodEnd.toEpochMilli() + ) + + // Deep copying query before passing it to rewriteQuery since otherwise, the monitor.input is modified directly + // which causes a strange bug where the rewritten query persists on the Monitor across executions + val rewrittenQuery = AggregationQueryRewriter.rewriteQuery(deepCopyQuery(input.query), prevResult, monitor.triggers) + + // Rewrite query to consider the doc ids per given index + if (chainedFindingExist(matchingDocIdsPerIndex) && rewrittenQuery.query() != null) { + val updatedSourceQuery = updateInputQueryWithFindingDocIds(rewrittenQuery.query(), matchingDocIdsPerIndex!!) + rewrittenQuery.query(updatedSourceQuery) + } + + val searchSource = scriptService.compile( + Script( + ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, + rewrittenQuery.toString(), searchParams + ), + TemplateScript.CONTEXT + ) + .newInstance(searchParams) + .execute() + + val searchRequest = SearchRequest() + .indices(*input.indices.toTypedArray()) + .preference(Preference.PRIMARY_FIRST.type()) + XContentType.JSON.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource).use { + searchRequest.source(SearchSourceBuilder.fromXContent(it)) + } + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + aggTriggerAfterKey += AggregationQueryRewriter.getAfterKeysFromSearchResponse( + searchResponse, + monitor.triggers, + prevResult?.aggTriggersAfterKey + ) + results += searchResponse.convertToMap() + } + is ClusterMetricsInput -> { + logger.debug("ClusterMetricsInput clusterMetricType: ${input.clusterMetricType}") + val response = executeTransportAction(input, client) + results += response.toMap() + } + else -> { + throw IllegalArgumentException("Unsupported input type: ${input.name()}.") + } + } + } + InputRunResults(results.toList(), aggTriggersAfterKey = aggTriggerAfterKey) + } catch (e: Exception) { + logger.info("Error collecting inputs for monitor: ${monitor.id}", e) + InputRunResults(emptyList(), e) + } + } + + /** + * Extends the given query builder with query that filters the given indices with the given doc ids per index + * Used whenever we want to select the documents that were found in chained delegate execution of the current workflow run + * + * @param query Original bucket monitor query + * @param matchingDocIdsPerIndex Map of finding doc ids grouped by index + */ + private fun updateInputQueryWithFindingDocIds( + query: QueryBuilder, + matchingDocIdsPerIndex: Map>, + ): QueryBuilder { + val queryBuilder = QueryBuilders.boolQuery().must(query) + val shouldQuery = QueryBuilders.boolQuery() + + matchingDocIdsPerIndex.forEach { entry -> + shouldQuery + .should() + .add( + BoolQueryBuilder() + .must(MatchQueryBuilder("_index", entry.key)) + .must(TermsQueryBuilder("_id", entry.value)) + ) + } + return queryBuilder.must(shouldQuery) + } + + private fun chainedFindingExist(indexToDocIds: Map>?) = + !indexToDocIds.isNullOrEmpty() + + private fun deepCopyQuery(query: SearchSourceBuilder): SearchSourceBuilder { + val out = BytesStreamOutput() + query.writeTo(out) + val sin = NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry) + return SearchSourceBuilder(sin) + } + + /** + * We moved anomaly result index to system index list. So common user could not directly query + * this index any more. This method will stash current thread context to pass security check. + * So monitor job can access anomaly result index. We will add monitor user roles filter in + * search query to only return documents the monitor user can access. + * + * On alerting Kibana, monitor users can only see detectors that they have read access. So they + * can't create monitor on other user's detector which they have no read access. Even they know + * other user's detector id and use it to create monitor, this method will only return anomaly + * results they can read. + */ + suspend fun collectInputResultsForADMonitor(monitor: Monitor, periodStart: Instant, periodEnd: Instant): InputRunResults { + return try { + val results = mutableListOf>() + val input = monitor.inputs[0] as SearchInput + + val searchParams = mapOf("period_start" to periodStart.toEpochMilli(), "period_end" to periodEnd.toEpochMilli()) + val searchSource = scriptService.compile( + Script( + ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, + input.query.toString(), searchParams + ), + TemplateScript.CONTEXT + ) + .newInstance(searchParams) + .execute() + + val searchRequest = SearchRequest() + .indices(*input.indices.toTypedArray()) + .preference(Preference.PRIMARY_FIRST.type()) + XContentType.JSON.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource).use { + searchRequest.source(SearchSourceBuilder.fromXContent(it)) + } + + // Add user role filter for AD result + client.threadPool().threadContext.stashContext().use { + // Possible long term solution: + // 1.Use secure rest client to send request to AD search result API. If no permission exception, + // that mean user has read access on AD result. Then don't need to add user role filter when query + // AD result if AD backend role filter is disabled. + // 2.Security provide some transport action to verify if user has permission to search AD result. + // Monitor runner will send transport request to check permission first. If security plugin response + // is yes, user has permission to query AD result. If AD role filter enabled, we will add user role + // filter to protect data at user role level; otherwise, user can query any AD result. + if (getRoleFilterEnabled(clusterService, settings, "plugins.anomaly_detection.filter_by_backend_roles")) { + addUserBackendRolesFilter(monitor.user, searchRequest.source()) + } + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + results += searchResponse.convertToMap() + } + InputRunResults(results.toList()) + } catch (e: Exception) { + logger.info("Error collecting anomaly result inputs for monitor: ${monitor.id}", e) + InputRunResults(emptyList(), e) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt b/alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt new file mode 100644 index 000000000..c1a5c9aea --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt @@ -0,0 +1,274 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchSecurityException +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.DocWriteResponse +import org.opensearch.action.admin.indices.get.GetIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.action.admin.indices.stats.IndicesStatsAction +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.seqno.SequenceNumbers +import org.opensearch.transport.RemoteTransportException + +private val log = LogManager.getLogger(MonitorMetadataService::class.java) + +object MonitorMetadataService : + CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("MonitorMetadataService")) { + + private lateinit var client: Client + private lateinit var xContentRegistry: NamedXContentRegistry + private lateinit var clusterService: ClusterService + private lateinit var settings: Settings + + @Volatile + private lateinit var indexTimeout: TimeValue + + fun initialize( + client: Client, + clusterService: ClusterService, + xContentRegistry: NamedXContentRegistry, + settings: Settings, + ) { + this.clusterService = clusterService + this.client = client + this.xContentRegistry = xContentRegistry + this.settings = settings + this.indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) + this.clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.INDEX_TIMEOUT) { indexTimeout = it } + } + + @Suppress("ComplexMethod", "ReturnCount") + suspend fun upsertMetadata(metadata: MonitorMetadata, updating: Boolean): MonitorMetadata { + try { + val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(metadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .id(metadata.id) + .routing(metadata.monitorId) + .setIfSeqNo(metadata.seqNo) + .setIfPrimaryTerm(metadata.primaryTerm) + .timeout(indexTimeout) + + if (updating) { + indexRequest.id(metadata.id).setIfSeqNo(metadata.seqNo).setIfPrimaryTerm(metadata.primaryTerm) + } else { + indexRequest.opType(DocWriteRequest.OpType.CREATE) + } + val response: IndexResponse = client.suspendUntil { index(indexRequest, it) } + when (response.result) { + DocWriteResponse.Result.DELETED, DocWriteResponse.Result.NOOP, DocWriteResponse.Result.NOT_FOUND, null -> { + val failureReason = "The upsert metadata call failed with a ${response.result?.lowercase} result" + log.error(failureReason) + throw AlertingException(failureReason, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureReason)) + } + DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED -> { + log.debug("Successfully upserted MonitorMetadata:${metadata.id} ") + } + } + return metadata.copy( + seqNo = response.seqNo, + primaryTerm = response.primaryTerm + ) + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + /** + * Document monitors are keeping the context of the last run. + * Since one monitor can be part of multiple workflows we need to be sure that execution of the current workflow + * doesn't interfere with the other workflows that are dependent on the given monitor + */ + suspend fun getOrCreateMetadata( + monitor: Monitor, + createWithRunContext: Boolean = true, + skipIndex: Boolean = false, + workflowMetadataId: String? = null + ): Pair { + try { + val created = true + val metadata = getMetadata(monitor, workflowMetadataId) + return if (metadata != null) { + metadata to !created + } else { + val newMetadata = createNewMetadata(monitor, createWithRunContext = createWithRunContext, workflowMetadataId) + if (skipIndex) { + newMetadata to created + } else { + upsertMetadata(newMetadata, updating = false) to created + } + } + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + suspend fun getMetadata(monitor: Monitor, workflowMetadataId: String? = null): MonitorMetadata? { + try { + val metadataId = MonitorMetadata.getId(monitor, workflowMetadataId) + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, metadataId).routing(monitor.id) + + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + return if (getResponse.isExists) { + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + MonitorMetadata.parse(xcp, getResponse.id, getResponse.seqNo, getResponse.primaryTerm) + } else { + null + } + } catch (e: Exception) { + if (e.message?.contains("no such index") == true) { + return null + } else { + throw AlertingException.wrap(e) + } + } + } + + suspend fun recreateRunContext(metadata: MonitorMetadata, monitor: Monitor): MonitorMetadata { + try { + val monitorIndex = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + (monitor.inputs[0] as DocLevelMonitorInput).indices[0] + } else null + val runContext = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + createFullRunContext(monitorIndex, metadata.lastRunContext as MutableMap>) + } else null + return if (runContext != null) { + metadata.copy( + lastRunContext = runContext + ) + } else { + metadata + } + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + private suspend fun createNewMetadata( + monitor: Monitor, + createWithRunContext: Boolean, + workflowMetadataId: String? = null, + ): MonitorMetadata { + val monitorIndex = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) + (monitor.inputs[0] as DocLevelMonitorInput).indices[0] + else null + val runContext = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR && createWithRunContext) + createFullRunContext(monitorIndex) + else emptyMap() + return MonitorMetadata( + id = MonitorMetadata.getId(monitor, workflowMetadataId), + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + monitorId = monitor.id, + lastActionExecutionTimes = emptyList(), + lastRunContext = runContext, + sourceToQueryIndexMapping = mutableMapOf() + ) + } + + suspend fun createFullRunContext( + index: String?, + existingRunContext: MutableMap>? = null, + ): MutableMap> { + val lastRunContext = existingRunContext?.toMutableMap() ?: mutableMapOf() + try { + if (index == null) return mutableMapOf() + + val indices = mutableListOf() + if (IndexUtils.isAlias(index, clusterService.state()) || + IndexUtils.isDataStream(index, clusterService.state()) + ) { + IndexUtils.getWriteIndex(index, clusterService.state())?.let { indices.add(it) } + } else { + val getIndexRequest = GetIndexRequest().indices(index) + val getIndexResponse: GetIndexResponse = client.suspendUntil { + client.admin().indices().getIndex(getIndexRequest, it) + } + indices.addAll(getIndexResponse.indices()) + } + + indices.forEach { indexName -> + if (!lastRunContext.containsKey(indexName)) { + lastRunContext[indexName] = createRunContextForIndex(indexName) + } + } + } catch (e: RemoteTransportException) { + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + throw AlertingException("Failed fetching index stats", RestStatus.INTERNAL_SERVER_ERROR, unwrappedException) + } catch (e: OpenSearchSecurityException) { + throw AlertingException( + "Failed fetching index stats - missing required index permissions: ${e.localizedMessage}", + RestStatus.INTERNAL_SERVER_ERROR, + e + ) + } catch (e: Exception) { + throw AlertingException("Failed fetching index stats", RestStatus.INTERNAL_SERVER_ERROR, e) + } + return lastRunContext + } + + suspend fun createRunContextForIndex(index: String, createdRecently: Boolean = false): MutableMap { + val request = IndicesStatsRequest().indices(index).clear() + val response: IndicesStatsResponse = client.suspendUntil { execute(IndicesStatsAction.INSTANCE, request, it) } + if (response.status != RestStatus.OK) { + val errorMessage = "Failed fetching index stats for index:$index" + throw AlertingException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(errorMessage)) + } + val shards = response.shards.filter { it.shardRouting.primary() && it.shardRouting.active() } + val lastRunContext = HashMap() + lastRunContext["index"] = index + val count = shards.size + lastRunContext["shards_count"] = count + + for (shard in shards) { + lastRunContext[shard.shardRouting.id.toString()] = + if (createdRecently) -1L + else shard.seqNoStats?.globalCheckpoint ?: SequenceNumbers.UNASSIGNED_SEQ_NO + } + return lastRunContext + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt new file mode 100644 index 000000000..3340cac43 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt @@ -0,0 +1,194 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.opensearch.OpenSearchSecurityException +import org.opensearch.alerting.action.GetDestinationsAction +import org.opensearch.alerting.action.GetDestinationsRequest +import org.opensearch.alerting.action.GetDestinationsResponse +import org.opensearch.alerting.model.ActionRunResult +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.opensearchapi.withClosableContext +import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext +import org.opensearch.alerting.script.TriggerExecutionContext +import org.opensearch.alerting.util.destinationmigration.NotificationActionConfigs +import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils.Companion.getNotificationConfigInfo +import org.opensearch.alerting.util.destinationmigration.getTitle +import org.opensearch.alerting.util.destinationmigration.publishLegacyNotification +import org.opensearch.alerting.util.destinationmigration.sendNotification +import org.opensearch.alerting.util.isAllowed +import org.opensearch.alerting.util.isTestAction +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.notifications.model.NotificationConfigInfo +import org.opensearch.core.common.Strings +import java.time.Instant + +abstract class MonitorRunner { + + abstract suspend fun runMonitor( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryRun: Boolean, + workflowRunContext: WorkflowRunContext? = null, + executionId: String + ): MonitorRunResult<*> + + suspend fun runAction( + action: Action, + ctx: TriggerExecutionContext, + monitorCtx: MonitorRunnerExecutionContext, + monitor: Monitor, + dryrun: Boolean + ): ActionRunResult { + return try { + if (ctx is QueryLevelTriggerExecutionContext && !MonitorRunnerService.isActionActionable(action, ctx.alert)) { + return ActionRunResult(action.id, action.name, mapOf(), true, null, null) + } + val actionOutput = mutableMapOf() + actionOutput[Action.SUBJECT] = if (action.subjectTemplate != null) { + MonitorRunnerService.compileTemplate(action.subjectTemplate!!, ctx) + } else "" + actionOutput[Action.MESSAGE] = MonitorRunnerService.compileTemplate(action.messageTemplate, ctx) + if (Strings.isNullOrEmpty(actionOutput[Action.MESSAGE])) { + throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") + } + if (!dryrun) { + val client = monitorCtx.client + client!!.threadPool().threadContext.stashContext().use { + withClosableContext( + InjectorContextElement( + monitor.id, + monitorCtx.settings!!, + monitorCtx.threadPool!!.threadContext, + monitor.user?.roles, + monitor.user + ) + ) { + actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( + action, + monitorCtx, + actionOutput[Action.SUBJECT], + actionOutput[Action.MESSAGE]!! + ) + } + } + } + ActionRunResult(action.id, action.name, actionOutput, false, MonitorRunnerService.currentTime(), null) + } catch (e: Exception) { + ActionRunResult(action.id, action.name, mapOf(), false, MonitorRunnerService.currentTime(), e) + } + } + + protected suspend fun getConfigAndSendNotification( + action: Action, + monitorCtx: MonitorRunnerExecutionContext, + subject: String?, + message: String + ): String { + val config = getConfigForNotificationAction(action, monitorCtx) + if (config.destination == null && config.channel == null) { + throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.destinationId}]") + } + + // Adding a check on TEST_ACTION Destination type here to avoid supporting it as a LegacyBaseMessage type + // just for Alerting integration tests + if (config.destination?.isTestAction() == true) { + return "test action" + } + + if (config.destination?.isAllowed(monitorCtx.allowList) == false) { + throw IllegalStateException( + "Monitor contains a Destination type that is not allowed: ${config.destination.type}" + ) + } + + var actionResponseContent = "" + actionResponseContent = config.channel + ?.sendNotification( + monitorCtx.client!!, + config.channel.getTitle(subject), + message + ) ?: actionResponseContent + + actionResponseContent = config.destination + ?.buildLegacyBaseMessage(subject, message, monitorCtx.destinationContextFactory!!.getDestinationContext(config.destination)) + ?.publishLegacyNotification(monitorCtx.client!!) + ?: actionResponseContent + + return actionResponseContent + } + + /** + * The "destination" ID referenced in a Monitor Action could either be a Notification config or a Destination config + * depending on whether the background migration process has already migrated it from a Destination to a Notification config. + * + * To cover both of these cases, the Notification config will take precedence and if it is not found, the Destination will be retrieved. + */ + private suspend fun getConfigForNotificationAction( + action: Action, + monitorCtx: MonitorRunnerExecutionContext + ): NotificationActionConfigs { + var destination: Destination? = null + var notificationPermissionException: Exception? = null + + var channel: NotificationConfigInfo? = null + try { + channel = getNotificationConfigInfo(monitorCtx.client as NodeClient, action.destinationId) + } catch (e: OpenSearchSecurityException) { + notificationPermissionException = e + } + + // If the channel was not found, try to retrieve the Destination + if (channel == null) { + destination = try { + val table = Table( + "asc", + "destination.name.keyword", + null, + 1, + 0, + null + ) + val getDestinationsRequest = GetDestinationsRequest( + action.destinationId, + 0L, + null, + table, + "ALL" + ) + + val getDestinationsResponse: GetDestinationsResponse = monitorCtx.client!!.suspendUntil { + monitorCtx.client!!.execute(GetDestinationsAction.INSTANCE, getDestinationsRequest, it) + } + getDestinationsResponse.destinations.firstOrNull() + } catch (e: IllegalStateException) { + // Catching the exception thrown when the Destination was not found so the NotificationActionConfigs object can be returned + null + } catch (e: OpenSearchSecurityException) { + if (notificationPermissionException != null) { + throw notificationPermissionException + } else { + throw e + } + } + + if (destination == null && notificationPermissionException != null) { + throw notificationPermissionException + } + } + + return NotificationActionConfigs(destination, channel) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt new file mode 100644 index 000000000..41a26bb79 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt @@ -0,0 +1,51 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.model.destination.DestinationContextFactory +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.DestinationSettings +import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings +import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.client.Client +import org.opensearch.cluster.metadata.IndexNameExpressionResolver +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.script.ScriptService +import org.opensearch.threadpool.ThreadPool + +data class MonitorRunnerExecutionContext( + + var clusterService: ClusterService? = null, + var client: Client? = null, + var xContentRegistry: NamedXContentRegistry? = null, + var indexNameExpressionResolver: IndexNameExpressionResolver? = null, + var scriptService: ScriptService? = null, + var settings: Settings? = null, + var threadPool: ThreadPool? = null, + var alertIndices: AlertIndices? = null, + var inputService: InputService? = null, + var triggerService: TriggerService? = null, + var alertService: AlertService? = null, + var docLevelMonitorQueries: DocLevelMonitorQueries? = null, + var workflowService: WorkflowService? = null, + + @Volatile var retryPolicy: BackoffPolicy? = null, + @Volatile var moveAlertsRetryPolicy: BackoffPolicy? = null, + + @Volatile var allowList: List = DestinationSettings.ALLOW_LIST_NONE, + @Volatile var hostDenyList: List = LegacyOpenDistroDestinationSettings.HOST_DENY_LIST_NONE, + + @Volatile var destinationSettings: Map? = null, + @Volatile var destinationContextFactory: DestinationContextFactory? = null, + + @Volatile var maxActionableAlertCount: Long = AlertingSettings.DEFAULT_MAX_ACTIONABLE_ALERT_COUNT, + @Volatile var indexTimeout: TimeValue? = null +) diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt b/alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt new file mode 100644 index 000000000..ca223f7a0 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt @@ -0,0 +1,353 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.Job +import kotlinx.coroutines.SupervisorJob +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.alerts.AlertMover.Companion.moveAlerts +import org.opensearch.alerting.core.JobRunner +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.alerting.model.destination.DestinationContextFactory +import org.opensearch.alerting.opensearchapi.retry +import org.opensearch.alerting.script.TriggerExecutionContext +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_COUNT +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_MILLIS +import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTIONABLE_ALERT_COUNT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_COUNT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_MILLIS +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.settings.DestinationSettings.Companion.HOST_DENY_LIST +import org.opensearch.alerting.settings.DestinationSettings.Companion.loadDestinationSettings +import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.alerting.util.isDocLevelMonitor +import org.opensearch.alerting.workflow.CompositeWorkflowRunner +import org.opensearch.client.Client +import org.opensearch.cluster.metadata.IndexNameExpressionResolver +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.lifecycle.AbstractLifecycleComponent +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.alerting.util.isBucketLevelMonitor +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.script.Script +import org.opensearch.script.ScriptService +import org.opensearch.script.TemplateScript +import org.opensearch.threadpool.ThreadPool +import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneOffset +import java.util.UUID +import kotlin.coroutines.CoroutineContext + +object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleComponent() { + + private val logger = LogManager.getLogger(javaClass) + + var monitorCtx: MonitorRunnerExecutionContext = MonitorRunnerExecutionContext() + private lateinit var runnerSupervisor: Job + override val coroutineContext: CoroutineContext + get() = Dispatchers.Default + runnerSupervisor + + fun registerClusterService(clusterService: ClusterService): MonitorRunnerService { + this.monitorCtx.clusterService = clusterService + return this + } + + fun registerClient(client: Client): MonitorRunnerService { + this.monitorCtx.client = client + return this + } + + fun registerNamedXContentRegistry(xContentRegistry: NamedXContentRegistry): MonitorRunnerService { + this.monitorCtx.xContentRegistry = xContentRegistry + return this + } + + fun registerindexNameExpressionResolver(indexNameExpressionResolver: IndexNameExpressionResolver): MonitorRunnerService { + this.monitorCtx.indexNameExpressionResolver = indexNameExpressionResolver + return this + } + + fun registerScriptService(scriptService: ScriptService): MonitorRunnerService { + this.monitorCtx.scriptService = scriptService + return this + } + + fun registerSettings(settings: Settings): MonitorRunnerService { + this.monitorCtx.settings = settings + return this + } + + fun registerThreadPool(threadPool: ThreadPool): MonitorRunnerService { + this.monitorCtx.threadPool = threadPool + return this + } + + fun registerAlertIndices(alertIndices: AlertIndices): MonitorRunnerService { + this.monitorCtx.alertIndices = alertIndices + return this + } + + fun registerInputService(inputService: InputService): MonitorRunnerService { + this.monitorCtx.inputService = inputService + return this + } + + fun registerTriggerService(triggerService: TriggerService): MonitorRunnerService { + this.monitorCtx.triggerService = triggerService + return this + } + + fun registerAlertService(alertService: AlertService): MonitorRunnerService { + this.monitorCtx.alertService = alertService + return this + } + + fun registerDocLevelMonitorQueries(docLevelMonitorQueries: DocLevelMonitorQueries): MonitorRunnerService { + this.monitorCtx.docLevelMonitorQueries = docLevelMonitorQueries + return this + } + + fun registerWorkflowService(workflowService: WorkflowService): MonitorRunnerService { + this.monitorCtx.workflowService = workflowService + return this + } + + // Must be called after registerClusterService and registerSettings in AlertingPlugin + fun registerConsumers(): MonitorRunnerService { + monitorCtx.retryPolicy = BackoffPolicy.constantBackoff( + ALERT_BACKOFF_MILLIS.get(monitorCtx.settings), + ALERT_BACKOFF_COUNT.get(monitorCtx.settings) + ) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(ALERT_BACKOFF_MILLIS, ALERT_BACKOFF_COUNT) { millis, count -> + monitorCtx.retryPolicy = BackoffPolicy.constantBackoff(millis, count) + } + + monitorCtx.moveAlertsRetryPolicy = + BackoffPolicy.exponentialBackoff( + MOVE_ALERTS_BACKOFF_MILLIS.get(monitorCtx.settings), + MOVE_ALERTS_BACKOFF_COUNT.get(monitorCtx.settings) + ) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer( + MOVE_ALERTS_BACKOFF_MILLIS, + MOVE_ALERTS_BACKOFF_COUNT + ) { millis, count -> + monitorCtx.moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(millis, count) + } + + monitorCtx.allowList = ALLOW_LIST.get(monitorCtx.settings) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { + monitorCtx.allowList = it + } + + // Host deny list is not a dynamic setting so no consumer is registered but the variable is set here + monitorCtx.hostDenyList = HOST_DENY_LIST.get(monitorCtx.settings) + + monitorCtx.maxActionableAlertCount = MAX_ACTIONABLE_ALERT_COUNT.get(monitorCtx.settings) + monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(MAX_ACTIONABLE_ALERT_COUNT) { + monitorCtx.maxActionableAlertCount = it + } + + monitorCtx.indexTimeout = INDEX_TIMEOUT.get(monitorCtx.settings) + + return this + } + + // To be safe, call this last as it depends on a number of other components being registered beforehand (client, settings, etc.) + fun registerDestinationSettings(): MonitorRunnerService { + monitorCtx.destinationSettings = loadDestinationSettings(monitorCtx.settings!!) + monitorCtx.destinationContextFactory = + DestinationContextFactory(monitorCtx.client!!, monitorCtx.xContentRegistry!!, monitorCtx.destinationSettings!!) + return this + } + + // Updates destination settings when the reload API is called so that new keystore values are visible + fun reloadDestinationSettings(settings: Settings) { + monitorCtx.destinationSettings = loadDestinationSettings(settings) + + // Update destinationContextFactory as well since destinationSettings has been updated + monitorCtx.destinationContextFactory!!.updateDestinationSettings(monitorCtx.destinationSettings!!) + } + + override fun doStart() { + runnerSupervisor = SupervisorJob() + } + + override fun doStop() { + runnerSupervisor.cancel() + } + + override fun doClose() {} + + override fun postIndex(job: ScheduledJob) { + if (job is Monitor) { + launch { + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + if (monitorCtx.alertIndices!!.isAlertInitialized(job.dataSources)) { + moveAlerts(monitorCtx.client!!, job.id, job) + } + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for monitor [${job.id}].", e) + } + } + } else if (job is Workflow) { + launch { + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + moveAlerts(monitorCtx.client!!, job.id, job, monitorCtx) + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for monitor [${job.id}].", e) + } + } + } else { + throw IllegalArgumentException("Invalid job type") + } + } + + override fun postDelete(jobId: String) { + launch { + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + moveAlerts(monitorCtx.client!!, jobId, null, monitorCtx) + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for workflow [$jobId]. Could be a monitor", e) + } + try { + monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { + if (monitorCtx.alertIndices!!.isAlertInitialized()) { + moveAlerts(monitorCtx.client!!, jobId, null) + } + } + } catch (e: Exception) { + logger.error("Failed to move active alerts for monitor [$jobId].", e) + } + } + } + + override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { + when (job) { + is Workflow -> { + launch { + runJob(job, periodStart, periodEnd, false) + } + } + is Monitor -> { + launch { + runJob(job, periodStart, periodEnd, false) + } + } + else -> { + throw IllegalArgumentException("Invalid job type") + } + } + } + + suspend fun runJob(workflow: Workflow, periodStart: Instant, periodEnd: Instant, dryrun: Boolean): WorkflowRunResult { + return CompositeWorkflowRunner.runWorkflow(workflow, monitorCtx, periodStart, periodEnd, dryrun) + } + + suspend fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant, dryrun: Boolean): MonitorRunResult<*> { + // Updating the scheduled job index at the start of monitor execution runs for when there is an upgrade the the schema mapping + // has not been updated. + if (!IndexUtils.scheduledJobIndexUpdated && monitorCtx.clusterService != null && monitorCtx.client != null) { + IndexUtils.updateIndexMapping( + ScheduledJob.SCHEDULED_JOBS_INDEX, + ScheduledJobIndices.scheduledJobMappings(), monitorCtx.clusterService!!.state(), monitorCtx.client!!.admin().indices(), + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + } + + override fun onFailure(t: Exception) { + logger.error("Failed to update config index schema", t) + } + } + ) + } + + if (job is Workflow) { + logger.info("Executing scheduled workflow - id: ${job.id}, periodStart: $periodStart, periodEnd: $periodEnd, dryrun: $dryrun") + CompositeWorkflowRunner.runWorkflow(workflow = job, monitorCtx, periodStart, periodEnd, dryrun) + } + val monitor = job as Monitor + val executionId = "${monitor.id}_${LocalDateTime.now(ZoneOffset.UTC)}_${UUID.randomUUID()}" + logger.info( + "Executing scheduled monitor - id: ${monitor.id}, type: ${monitor.monitorType.name}, periodStart: $periodStart, " + + "periodEnd: $periodEnd, dryrun: $dryrun, executionId: $executionId" + ) + val runResult = if (monitor.isBucketLevelMonitor()) { + BucketLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) + } else if (monitor.isDocLevelMonitor()) { + DocumentLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) + } else { + QueryLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) + } + return runResult + } + + // TODO: See if we can move below methods (or few of these) to a common utils + internal fun getRolesForMonitor(monitor: Monitor): List { + /* + * We need to handle 3 cases: + * 1. Monitors created by older versions and never updated. These monitors wont have User details in the + * monitor object. `monitor.user` will be null. Insert `all_access, AmazonES_all_access` role. + * 2. Monitors are created when security plugin is disabled, these will have empty User object. + * (`monitor.user.name`, `monitor.user.roles` are empty ) + * 3. Monitors are created when security plugin is enabled, these will have an User object. + */ + return if (monitor.user == null) { + // fixme: discuss and remove hardcoded to settings? + // TODO: Remove "AmazonES_all_access" role? + monitorCtx.settings!!.getAsList("", listOf("all_access", "AmazonES_all_access")) + } else { + monitor.user!!.roles + } + } + + // TODO: Can this be updated to just use 'Instant.now()'? + // 'threadPool.absoluteTimeInMillis()' is referring to a cached value of System.currentTimeMillis() that by default updates every 200ms + internal fun currentTime() = Instant.ofEpochMilli(monitorCtx.threadPool!!.absoluteTimeInMillis()) + + internal fun isActionActionable(action: Action, alert: Alert?): Boolean { + if (alert != null && alert.state == Alert.State.AUDIT) + return false + if (alert == null || action.throttle == null) { + return true + } + if (action.throttleEnabled) { + val result = alert.actionExecutionResults.firstOrNull { r -> r.actionId == action.id } + val lastExecutionTime: Instant? = result?.lastExecutionTime + val throttledTimeBound = currentTime().minus(action.throttle!!.value.toLong(), action.throttle!!.unit) + return (lastExecutionTime == null || lastExecutionTime.isBefore(throttledTimeBound)) + } + return true + } + + internal fun compileTemplate(template: Script, ctx: TriggerExecutionContext): String { + return monitorCtx.scriptService!!.compile(template, TemplateScript.CONTEXT) + .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) + .execute() + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt new file mode 100644 index 000000000..691071517 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt @@ -0,0 +1,101 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.QueryLevelTriggerRunResult +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.withClosableContext +import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext +import org.opensearch.alerting.util.isADMonitor +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import java.time.Instant + +object QueryLevelMonitorRunner : MonitorRunner() { + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runMonitor( + monitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryrun: Boolean, + workflowRunContext: WorkflowRunContext?, + executionId: String + ): MonitorRunResult { + val roles = MonitorRunnerService.getRolesForMonitor(monitor) + logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") + + if (periodStart == periodEnd) { + logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") + } + + var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) + val currentAlerts = try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) + monitorCtx.alertService!!.loadCurrentAlertsForQueryLevelMonitor(monitor, workflowRunContext) + } catch (e: Exception) { + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id + logger.error("Error loading alerts for monitor: $id", e) + return monitorResult.copy(error = e) + } + if (!isADMonitor(monitor)) { + withClosableContext(InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles)) { + monitorResult = monitorResult.copy( + inputResults = monitorCtx.inputService!!.collectInputResults(monitor, periodStart, periodEnd, null, workflowRunContext) + ) + } + } else { + monitorResult = monitorResult.copy( + inputResults = monitorCtx.inputService!!.collectInputResultsForADMonitor(monitor, periodStart, periodEnd) + ) + } + + val updatedAlerts = mutableListOf() + val triggerResults = mutableMapOf() + for (trigger in monitor.triggers) { + val currentAlert = currentAlerts[trigger] + val triggerCtx = QueryLevelTriggerExecutionContext(monitor, trigger as QueryLevelTrigger, monitorResult, currentAlert) + val triggerResult = monitorCtx.triggerService!!.runQueryLevelTrigger(monitor, trigger, triggerCtx) + triggerResults[trigger.id] = triggerResult + + if (monitorCtx.triggerService!!.isQueryLevelTriggerActionable(triggerCtx, triggerResult, workflowRunContext)) { + val actionCtx = triggerCtx.copy(error = monitorResult.error ?: triggerResult.error) + for (action in trigger.actions) { + triggerResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, monitor, dryrun) + } + } + + val updatedAlert = monitorCtx.alertService!!.composeQueryLevelAlert( + triggerCtx, + triggerResult, + monitorResult.alertError() ?: triggerResult.alertError(), + executionId, + workflowRunContext + ) + if (updatedAlert != null) updatedAlerts += updatedAlert + } + + // Don't save alerts if this is a test monitor + if (!dryrun && monitor.id != Monitor.NO_ID) { + monitorCtx.retryPolicy?.let { + monitorCtx.alertService!!.saveAlerts( + monitor.dataSources, + updatedAlerts, + it, + routingId = monitor.id + ) + } + } + return monitorResult.copy(triggerResults = triggerResults) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/TriggerService.kt b/alerting/bin/main/org/opensearch/alerting/TriggerService.kt new file mode 100644 index 000000000..f2356eddf --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/TriggerService.kt @@ -0,0 +1,186 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser +import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.ChainedAlertTriggerRunResult +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult +import org.opensearch.alerting.model.QueryLevelTriggerRunResult +import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext +import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext +import org.opensearch.alerting.script.TriggerScript +import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser +import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.alerting.workflow.WorkflowRunContext +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.BUCKET_INDICES +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.PARENT_BUCKET_PATH +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.script.Script +import org.opensearch.script.ScriptService +import org.opensearch.search.aggregations.Aggregation +import org.opensearch.search.aggregations.Aggregations +import org.opensearch.search.aggregations.support.AggregationPath + +/** Service that handles executing Triggers */ +class TriggerService(val scriptService: ScriptService) { + + private val logger = LogManager.getLogger(TriggerService::class.java) + private val ALWAYS_RUN = Script("return true") + private val NEVER_RUN = Script("return false") + + fun isQueryLevelTriggerActionable( + ctx: QueryLevelTriggerExecutionContext, + result: QueryLevelTriggerRunResult, + workflowRunContext: WorkflowRunContext?, + ): Boolean { + if (workflowRunContext?.auditDelegateMonitorAlerts == true) return false + // Suppress actions if the current alert is acknowledged and there are no errors. + val suppress = ctx.alert?.state == Alert.State.ACKNOWLEDGED && result.error == null && ctx.error == null + return result.triggered && !suppress + } + + fun isChainedAlertTriggerActionable( + ctx: ChainedAlertTriggerExecutionContext, + result: ChainedAlertTriggerRunResult, + ): Boolean { + // Suppress actions if the current alert is acknowledged and there are no errors. + val suppress = ctx.alert?.state == Alert.State.ACKNOWLEDGED && result.error == null && ctx.error == null + return result.triggered && !suppress + } + + fun runQueryLevelTrigger( + monitor: Monitor, + trigger: QueryLevelTrigger, + ctx: QueryLevelTriggerExecutionContext + ): QueryLevelTriggerRunResult { + return try { + val triggered = scriptService.compile(trigger.condition, TriggerScript.CONTEXT) + .newInstance(trigger.condition.params) + .execute(ctx) + QueryLevelTriggerRunResult(trigger.name, triggered, null) + } catch (e: Exception) { + logger.info("Error running script for monitor ${monitor.id}, trigger: ${trigger.id}", e) + // if the script fails we need to send an alert so set triggered = true + QueryLevelTriggerRunResult(trigger.name, true, e) + } + } + + // TODO: improve performance and support match all and match any + fun runDocLevelTrigger( + monitor: Monitor, + trigger: DocumentLevelTrigger, + queryToDocIds: Map> + ): DocumentLevelTriggerRunResult { + return try { + var triggeredDocs = mutableListOf() + + if (trigger.condition.idOrCode.equals(ALWAYS_RUN.idOrCode)) { + for (value in queryToDocIds.values) { + triggeredDocs.addAll(value) + } + } else if (!trigger.condition.idOrCode.equals(NEVER_RUN.idOrCode)) { + triggeredDocs = TriggerExpressionParser(trigger.condition.idOrCode).parse() + .evaluate(queryToDocIds).toMutableList() + } + + DocumentLevelTriggerRunResult(trigger.name, triggeredDocs, null) + } catch (e: Exception) { + logger.info("Error running script for monitor ${monitor.id}, trigger: ${trigger.id}", e) + // if the script fails we need to send an alert so set triggered = true + DocumentLevelTriggerRunResult(trigger.name, emptyList(), e) + } + } + + fun runChainedAlertTrigger( + workflow: Workflow, + trigger: ChainedAlertTrigger, + alertGeneratingMonitors: Set, + monitorIdToAlertIdsMap: Map>, + ): ChainedAlertTriggerRunResult { + val associatedAlertIds = mutableSetOf() + return try { + val parsedTriggerCondition = ChainedAlertExpressionParser(trigger.condition.idOrCode).parse() + val evaluate = parsedTriggerCondition.evaluate(alertGeneratingMonitors) + if (evaluate) { + val monitorIdsInTriggerCondition = parsedTriggerCondition.getMonitorIds(parsedTriggerCondition) + monitorIdsInTriggerCondition.forEach { associatedAlertIds.addAll(monitorIdToAlertIdsMap.getOrDefault(it, emptySet())) } + } + ChainedAlertTriggerRunResult(trigger.name, triggered = evaluate, null, associatedAlertIds = associatedAlertIds) + } catch (e: Exception) { + logger.error("Error running chained alert trigger script for workflow ${workflow.id}, trigger: ${trigger.id}", e) + ChainedAlertTriggerRunResult( + triggerName = trigger.name, + triggered = false, + error = e, + associatedAlertIds = emptySet() + ) + } + } + + @Suppress("UNCHECKED_CAST") + fun runBucketLevelTrigger( + monitor: Monitor, + trigger: BucketLevelTrigger, + ctx: BucketLevelTriggerExecutionContext + ): BucketLevelTriggerRunResult { + return try { + val bucketIndices = + ((ctx.results[0][Aggregations.AGGREGATIONS_FIELD] as HashMap<*, *>)[trigger.id] as HashMap<*, *>)[BUCKET_INDICES] as List<*> + val parentBucketPath = ( + (ctx.results[0][Aggregations.AGGREGATIONS_FIELD] as HashMap<*, *>) + .get(trigger.id) as HashMap<*, *> + )[PARENT_BUCKET_PATH] as String + val aggregationPath = AggregationPath.parse(parentBucketPath) + // TODO test this part by passing sub-aggregation path + var parentAgg = (ctx.results[0][Aggregations.AGGREGATIONS_FIELD] as HashMap<*, *>) + aggregationPath.pathElementsAsStringList.forEach { sub_agg -> + parentAgg = (parentAgg[sub_agg] as HashMap<*, *>) + } + val buckets = parentAgg[Aggregation.CommonFields.BUCKETS.preferredName] as List<*> + val selectedBuckets = mutableMapOf() + for (bucketIndex in bucketIndices) { + val bucketDict = buckets[bucketIndex as Int] as Map + val bucketKeyValuesList = getBucketKeyValuesList(bucketDict) + val aggResultBucket = AggregationResultBucket(parentBucketPath, bucketKeyValuesList, bucketDict) + selectedBuckets[aggResultBucket.getBucketKeysHash()] = aggResultBucket + } + BucketLevelTriggerRunResult(trigger.name, null, selectedBuckets) + } catch (e: Exception) { + logger.info("Error running trigger [${trigger.id}] for monitor [${monitor.id}]", e) + BucketLevelTriggerRunResult(trigger.name, e, emptyMap()) + } + } + + @Suppress("UNCHECKED_CAST") + private fun getBucketKeyValuesList(bucket: Map): List { + val keyField = Aggregation.CommonFields.KEY.preferredName + val keyValuesList = mutableListOf() + when { + bucket[keyField] is List<*> && bucket.containsKey(Aggregation.CommonFields.KEY_AS_STRING.preferredName) -> + keyValuesList.add(bucket[Aggregation.CommonFields.KEY_AS_STRING.preferredName] as String) + bucket[keyField] is String -> keyValuesList.add(bucket[keyField] as String) + // In the case where the key field is an Int + bucket[keyField] is Int -> keyValuesList.add(bucket[keyField].toString()) + // In the case where the key field is an object with multiple values (such as a composite aggregation with more than one source) + // the values will be iterated through and converted into a string + bucket[keyField] is Map<*, *> -> (bucket[keyField] as Map).values.map { keyValuesList.add(it.toString()) } + else -> throw IllegalArgumentException("Unexpected format for key in bucket [$bucket]") + } + + return keyValuesList + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt b/alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt new file mode 100644 index 000000000..9dc4fbcdd --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt @@ -0,0 +1,174 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchException +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.DocWriteResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneOffset +import java.util.UUID + +object WorkflowMetadataService : + CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("WorkflowMetadataService")) { + private val log = LogManager.getLogger(this::class.java) + + private lateinit var client: Client + private lateinit var xContentRegistry: NamedXContentRegistry + private lateinit var clusterService: ClusterService + private lateinit var settings: Settings + + @Volatile private lateinit var indexTimeout: TimeValue + + fun initialize( + client: Client, + clusterService: ClusterService, + xContentRegistry: NamedXContentRegistry, + settings: Settings + ) { + this.clusterService = clusterService + this.client = client + this.xContentRegistry = xContentRegistry + this.settings = settings + this.indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) + this.clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.INDEX_TIMEOUT) { indexTimeout = it } + } + + @Suppress("ComplexMethod", "ReturnCount") + suspend fun upsertWorkflowMetadata(metadata: WorkflowMetadata, updating: Boolean): WorkflowMetadata { + try { + val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(metadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .id(metadata.id) + .routing(metadata.workflowId) + .timeout(indexTimeout) + + if (updating) { + indexRequest.id(metadata.id) + } else { + indexRequest.opType(DocWriteRequest.OpType.CREATE) + } + val response: IndexResponse = client.suspendUntil { index(indexRequest, it) } + when (response.result) { + DocWriteResponse.Result.DELETED, DocWriteResponse.Result.NOOP, DocWriteResponse.Result.NOT_FOUND, null -> { + val failureReason = "The upsert metadata call failed with a ${response.result?.lowercase} result" + log.error(failureReason) + throw AlertingException(failureReason, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureReason)) + } + DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED -> { + log.debug("Successfully upserted WorkflowMetadata:${metadata.id} ") + } + } + return metadata + } catch (e: Exception) { + // If the update is set to false and id is set conflict exception will be thrown + if (e is OpenSearchException && e.status() == RestStatus.CONFLICT && !updating) { + log.debug( + "Metadata with ${metadata.id} for workflow ${metadata.workflowId} already exist." + + " Instead of creating new, updating existing metadata will be performed" + ) + return upsertWorkflowMetadata(metadata, true) + } + log.error("Error saving metadata", e) + throw AlertingException.wrap(e) + } + } + + suspend fun getOrCreateWorkflowMetadata( + workflow: Workflow, + skipIndex: Boolean = false, + executionId: String + ): Pair { + try { + val created = true + val metadata = getWorkflowMetadata(workflow) + return if (metadata != null) { + metadata to !created + } else { + val newMetadata = createNewWorkflowMetadata(workflow, executionId, skipIndex) + if (skipIndex) { + newMetadata to created + } else { + upsertWorkflowMetadata(newMetadata, updating = false) to created + } + } + } catch (e: Exception) { + throw AlertingException.wrap(e) + } + } + + private suspend fun getWorkflowMetadata(workflow: Workflow): WorkflowMetadata? { + try { + val metadataId = WorkflowMetadata.getId(workflow.id) + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, metadataId).routing(workflow.id) + + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + return if (getResponse.isExists) { + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + WorkflowMetadata.parse(xcp) + } else { + null + } + } catch (e: Exception) { + if (e.message?.contains("no such index") == true) { + return null + } else { + throw AlertingException.wrap(e) + } + } + } + + private fun createNewWorkflowMetadata(workflow: Workflow, executionId: String, isTempWorkflow: Boolean): WorkflowMetadata { + // In the case of temp workflow (ie. workflow is in dry-run) use timestampWithUUID-metadata format + // In the case of regular workflow execution, use the workflowId-metadata format + val id = if (isTempWorkflow) "${LocalDateTime.now(ZoneOffset.UTC)}${UUID.randomUUID()}" else workflow.id + return WorkflowMetadata( + id = WorkflowMetadata.getId(id), + workflowId = workflow.id, + monitorIds = (workflow.inputs[0] as CompositeInput).getMonitorIds(), + latestRunTime = Instant.now(), + latestExecutionId = executionId + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/WorkflowService.kt b/alerting/bin/main/org/opensearch/alerting/WorkflowService.kt new file mode 100644 index 000000000..04bd64b8d --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/WorkflowService.kt @@ -0,0 +1,144 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchException +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder + +private val log = LogManager.getLogger(WorkflowService::class.java) + +/** + * Contains util methods used in workflow execution + */ +class WorkflowService( + val client: Client, + val xContentRegistry: NamedXContentRegistry, +) { + /** + * Returns finding doc ids per index for the given workflow execution + * Used for pre-filtering the dataset in the case of creating a workflow with chained findings + * + * @param chainedMonitors Monitors that have previously executed + * @param workflowExecutionId Execution id of the current workflow + */ + suspend fun getFindingDocIdsByExecutionId(chainedMonitors: List, workflowExecutionId: String): Map> { + if (chainedMonitors.isEmpty()) + return emptyMap() + val dataSources = chainedMonitors[0].dataSources + try { + val existsResponse: IndicesExistsResponse = client.admin().indices().suspendUntil { + exists(IndicesExistsRequest(dataSources.findingsIndex).local(true), it) + } + if (existsResponse.isExists == false) return emptyMap() + // Search findings index to match id of monitors and workflow execution id + val bqb = QueryBuilders.boolQuery() + .filter( + QueryBuilders.termsQuery( + Finding.MONITOR_ID_FIELD, + chainedMonitors.map { it.id } + ) + ) + .filter(QueryBuilders.termQuery(Finding.EXECUTION_ID_FIELD, workflowExecutionId)) + val searchRequest = SearchRequest() + .source( + SearchSourceBuilder() + .query(bqb) + .version(true) + .seqNoAndPrimaryTerm(true) + ) + .indices(dataSources.findingsIndex) + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + + // Get the findings docs + val findings = mutableListOf() + for (hit in searchResponse.hits) { + val xcp = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val finding = Finding.parse(xcp) + findings.add(finding) + } + // Based on the findings get the document ids + val indexToRelatedDocIdsMap = mutableMapOf>() + for (finding in findings) { + indexToRelatedDocIdsMap.getOrPut(finding.index) { mutableListOf() }.addAll(finding.relatedDocIds) + } + return indexToRelatedDocIdsMap + } catch (t: Exception) { + log.error("Error getting finding doc ids: ${t.message}", t) + throw AlertingException.wrap(t) + } + } + + /** + * Returns the list of monitors for the given ids + * Used in workflow execution in order to figure out the monitor type + * + * @param monitors List of monitor ids + * @param size Expected number of monitors + */ + suspend fun getMonitorsById(monitors: List, size: Int): List { + try { + val bqb = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", monitors)) + + val searchRequest = SearchRequest() + .source( + SearchSourceBuilder() + .query(bqb) + .version(true) + .seqNoAndPrimaryTerm(true) + .size(size) + ) + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + return parseMonitors(searchResponse) + } catch (e: Exception) { + log.error("Error getting monitors: ${e.message}", e) + throw AlertingException.wrap(e) + } + } + + private fun parseMonitors(response: SearchResponse): List { + if (response.isTimedOut) { + log.error("Request for getting monitors timeout") + throw OpenSearchException("Cannot determine that the ${ScheduledJob.SCHEDULED_JOBS_INDEX} index is healthy") + } + val monitors = mutableListOf() + try { + for (hit in response.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, hit.sourceAsString + ).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor + monitors.add(monitor) + } + } + } catch (e: Exception) { + log.error("Error parsing monitors: ${e.message}", e) + throw AlertingException.wrap(e) + } + return monitors + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt new file mode 100644 index 000000000..4cc869b37 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class ExecuteMonitorAction private constructor() : ActionType(NAME, ::ExecuteMonitorResponse) { + companion object { + val INSTANCE = ExecuteMonitorAction() + const val NAME = "cluster:admin/opendistro/alerting/monitor/execute" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt new file mode 100644 index 000000000..ecc504677 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt @@ -0,0 +1,60 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import java.io.IOException + +class ExecuteMonitorRequest : ActionRequest { + val dryrun: Boolean + val requestEnd: TimeValue + val monitorId: String? + val monitor: Monitor? + + constructor( + dryrun: Boolean, + requestEnd: TimeValue, + monitorId: String?, + monitor: Monitor? + ) : super() { + this.dryrun = dryrun + this.requestEnd = requestEnd + this.monitorId = monitorId + this.monitor = monitor + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readBoolean(), // dryrun + sin.readTimeValue(), // requestEnd + sin.readOptionalString(), // monitorId + if (sin.readBoolean()) { + Monitor.readFrom(sin) // monitor + } else null + ) + + override fun validate(): ActionRequestValidationException? { + return null + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeBoolean(dryrun) + out.writeTimeValue(requestEnd) + out.writeOptionalString(monitorId) + if (monitor != null) { + out.writeBoolean(true) + monitor.writeTo(out) + } else { + out.writeBoolean(false) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt new file mode 100644 index 000000000..8d7a7c25a --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt @@ -0,0 +1,39 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +class ExecuteMonitorResponse : ActionResponse, ToXContentObject { + + val monitorRunResult: MonitorRunResult<*> + + constructor(monitorRunResult: MonitorRunResult<*>) : super() { + this.monitorRunResult = monitorRunResult + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + MonitorRunResult.readFrom(sin) // monitorRunResult + ) + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + monitorRunResult.writeTo(out) + } + + @Throws(IOException::class) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return monitorRunResult.toXContent(builder, ToXContent.EMPTY_PARAMS) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt new file mode 100644 index 000000000..efed1087d --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class ExecuteWorkflowAction private constructor() : ActionType(NAME, ::ExecuteWorkflowResponse) { + companion object { + val INSTANCE = ExecuteWorkflowAction() + const val NAME = "cluster:admin/opensearch/alerting/workflow/execute" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt new file mode 100644 index 000000000..3b3d48ed2 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt @@ -0,0 +1,70 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.action.ValidateActions +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import java.io.IOException + +/** + * A class containing workflow details. + */ +class ExecuteWorkflowRequest : ActionRequest { + val dryrun: Boolean + val requestEnd: TimeValue + val workflowId: String? + val workflow: Workflow? + + constructor( + dryrun: Boolean, + requestEnd: TimeValue, + workflowId: String?, + workflow: Workflow?, + ) : super() { + this.dryrun = dryrun + this.requestEnd = requestEnd + this.workflowId = workflowId + this.workflow = workflow + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readBoolean(), + sin.readTimeValue(), + sin.readOptionalString(), + if (sin.readBoolean()) { + Workflow.readFrom(sin) + } else null + ) + + override fun validate(): ActionRequestValidationException? { + var validationException: ActionRequestValidationException? = null + if (workflowId == null && workflow == null) { + validationException = ValidateActions.addValidationError( + "Both workflow and workflow id are missing", validationException + ) + } + return validationException + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeBoolean(dryrun) + out.writeTimeValue(requestEnd) + out.writeOptionalString(workflowId) + if (workflow != null) { + out.writeBoolean(true) + workflow.writeTo(out) + } else { + out.writeBoolean(false) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt new file mode 100644 index 000000000..7312a9470 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt @@ -0,0 +1,39 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +class ExecuteWorkflowResponse : ActionResponse, ToXContentObject { + val workflowRunResult: WorkflowRunResult + constructor( + workflowRunResult: WorkflowRunResult + ) : super() { + this.workflowRunResult = workflowRunResult + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + WorkflowRunResult(sin) + ) + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + workflowRunResult.writeTo(out) + } + + @Throws(IOException::class) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return workflowRunResult.toXContent(builder, params) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt new file mode 100644 index 000000000..76adde8c6 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class GetDestinationsAction private constructor() : ActionType(NAME, ::GetDestinationsResponse) { + companion object { + val INSTANCE = GetDestinationsAction() + const val NAME = "cluster:admin/opendistro/alerting/destination/get" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt new file mode 100644 index 000000000..92fae8247 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt @@ -0,0 +1,61 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.commons.alerting.model.Table +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.search.fetch.subphase.FetchSourceContext +import java.io.IOException + +class GetDestinationsRequest : ActionRequest { + val destinationId: String? + val version: Long + val srcContext: FetchSourceContext? + val table: Table + val destinationType: String + + constructor( + destinationId: String?, + version: Long, + srcContext: FetchSourceContext?, + table: Table, + destinationType: String + ) : super() { + this.destinationId = destinationId + this.version = version + this.srcContext = srcContext + this.table = table + this.destinationType = destinationType + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + destinationId = sin.readOptionalString(), + version = sin.readLong(), + srcContext = if (sin.readBoolean()) { + FetchSourceContext(sin) + } else null, + table = Table.readFrom(sin), + destinationType = sin.readString() + ) + + override fun validate(): ActionRequestValidationException? { + return null + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeOptionalString(destinationId) + out.writeLong(version) + out.writeBoolean(srcContext != null) + srcContext?.writeTo(out) + table.writeTo(out) + out.writeString(destinationType) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt new file mode 100644 index 000000000..01d79a50e --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt @@ -0,0 +1,66 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +class GetDestinationsResponse : ActionResponse, ToXContentObject { + var status: RestStatus + + // totalDestinations is not the same as the size of destinations because there can be 30 destinations from the request, but + // the request only asked for 5 destinations, so totalDestinations will be 30, but alerts will only contain 5 destinations + var totalDestinations: Int? + var destinations: List + + constructor( + status: RestStatus, + totalDestinations: Int?, + destinations: List + ) : super() { + this.status = status + this.totalDestinations = totalDestinations + this.destinations = destinations + } + + @Throws(IOException::class) + constructor(sin: StreamInput) { + this.status = sin.readEnum(RestStatus::class.java) + val destinations = mutableListOf() + this.totalDestinations = sin.readOptionalInt() + var currentSize = sin.readInt() + for (i in 0 until currentSize) { + destinations.add(Destination.readFrom(sin)) + } + this.destinations = destinations + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeEnum(status) + out.writeOptionalInt(totalDestinations) + out.writeInt(destinations.size) + for (destination in destinations) { + destination.writeTo(out) + } + } + + @Throws(IOException::class) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field("totalDestinations", totalDestinations) + .field("destinations", destinations) + + return builder.endObject() + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt new file mode 100644 index 000000000..c16a28e17 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class GetEmailAccountAction private constructor() : ActionType(NAME, ::GetEmailAccountResponse) { + companion object { + val INSTANCE = GetEmailAccountAction() + const val NAME = "cluster:admin/opendistro/alerting/destination/email_account/get" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt new file mode 100644 index 000000000..94b79726e --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt @@ -0,0 +1,60 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.rest.RestRequest +import org.opensearch.search.fetch.subphase.FetchSourceContext +import java.io.IOException + +class GetEmailAccountRequest : ActionRequest { + val emailAccountID: String + val version: Long + val method: RestRequest.Method + val srcContext: FetchSourceContext? + + constructor( + emailAccountID: String, + version: Long, + method: RestRequest.Method, + srcContext: FetchSourceContext? + ) : super() { + this.emailAccountID = emailAccountID + this.version = version + this.method = method + this.srcContext = srcContext + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // emailAccountID + sin.readLong(), // version + sin.readEnum(RestRequest.Method::class.java), // method + if (sin.readBoolean()) { + FetchSourceContext(sin) // srcContext + } else null + ) + + override fun validate(): ActionRequestValidationException? { + return null + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(emailAccountID) + out.writeLong(version) + out.writeEnum(method) + if (srcContext != null) { + out.writeBoolean(true) + srcContext.writeTo(out) + } else { + out.writeBoolean(false) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt new file mode 100644 index 000000000..6f26326dd --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt @@ -0,0 +1,86 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.commons.alerting.util.IndexUtils.Companion._ID +import org.opensearch.commons.alerting.util.IndexUtils.Companion._PRIMARY_TERM +import org.opensearch.commons.alerting.util.IndexUtils.Companion._SEQ_NO +import org.opensearch.commons.alerting.util.IndexUtils.Companion._VERSION +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +class GetEmailAccountResponse : ActionResponse, ToXContentObject { + var id: String + var version: Long + var seqNo: Long + var primaryTerm: Long + var status: RestStatus + var emailAccount: EmailAccount? + + constructor( + id: String, + version: Long, + seqNo: Long, + primaryTerm: Long, + status: RestStatus, + emailAccount: EmailAccount? + ) : super() { + this.id = id + this.version = version + this.seqNo = seqNo + this.primaryTerm = primaryTerm + this.status = status + this.emailAccount = emailAccount + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // id + sin.readLong(), // version + sin.readLong(), // seqNo + sin.readLong(), // primaryTerm + sin.readEnum(RestStatus::class.java), // RestStatus + if (sin.readBoolean()) { + EmailAccount.readFrom(sin) // emailAccount + } else null + ) + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeLong(version) + out.writeLong(seqNo) + out.writeLong(primaryTerm) + out.writeEnum(status) + if (emailAccount != null) { + out.writeBoolean(true) + emailAccount?.writeTo(out) + } else { + out.writeBoolean(false) + } + } + + @Throws(IOException::class) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(_ID, id) + .field(_VERSION, version) + .field(_SEQ_NO, seqNo) + .field(_PRIMARY_TERM, primaryTerm) + if (emailAccount != null) { + builder.field("email_account", emailAccount) + } + + return builder.endObject() + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt new file mode 100644 index 000000000..a9793f156 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType + +class GetEmailGroupAction private constructor() : ActionType(NAME, ::GetEmailGroupResponse) { + companion object { + val INSTANCE = GetEmailGroupAction() + const val NAME = "cluster:admin/opendistro/alerting/destination/email_group/get" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt new file mode 100644 index 000000000..bb245b075 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt @@ -0,0 +1,60 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.rest.RestRequest +import org.opensearch.search.fetch.subphase.FetchSourceContext +import java.io.IOException + +class GetEmailGroupRequest : ActionRequest { + val emailGroupID: String + val version: Long + val method: RestRequest.Method + val srcContext: FetchSourceContext? + + constructor( + emailGroupID: String, + version: Long, + method: RestRequest.Method, + srcContext: FetchSourceContext? + ) : super() { + this.emailGroupID = emailGroupID + this.version = version + this.method = method + this.srcContext = srcContext + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // emailGroupID + sin.readLong(), // version + sin.readEnum(RestRequest.Method::class.java), // method + if (sin.readBoolean()) { + FetchSourceContext(sin) // srcContext + } else null + ) + + override fun validate(): ActionRequestValidationException? { + return null + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(emailGroupID) + out.writeLong(version) + out.writeEnum(method) + if (srcContext != null) { + out.writeBoolean(true) + srcContext.writeTo(out) + } else { + out.writeBoolean(false) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt new file mode 100644 index 000000000..c688033e4 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt @@ -0,0 +1,86 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.commons.alerting.util.IndexUtils.Companion._ID +import org.opensearch.commons.alerting.util.IndexUtils.Companion._PRIMARY_TERM +import org.opensearch.commons.alerting.util.IndexUtils.Companion._SEQ_NO +import org.opensearch.commons.alerting.util.IndexUtils.Companion._VERSION +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +class GetEmailGroupResponse : ActionResponse, ToXContentObject { + var id: String + var version: Long + var seqNo: Long + var primaryTerm: Long + var status: RestStatus + var emailGroup: EmailGroup? + + constructor( + id: String, + version: Long, + seqNo: Long, + primaryTerm: Long, + status: RestStatus, + emailGroup: EmailGroup? + ) : super() { + this.id = id + this.version = version + this.seqNo = seqNo + this.primaryTerm = primaryTerm + this.status = status + this.emailGroup = emailGroup + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // id + sin.readLong(), // version + sin.readLong(), // seqNo + sin.readLong(), // primaryTerm + sin.readEnum(RestStatus::class.java), // RestStatus + if (sin.readBoolean()) { + EmailGroup.readFrom(sin) // emailGroup + } else null + ) + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeLong(version) + out.writeLong(seqNo) + out.writeLong(primaryTerm) + out.writeEnum(status) + if (emailGroup != null) { + out.writeBoolean(true) + emailGroup?.writeTo(out) + } else { + out.writeBoolean(false) + } + } + + @Throws(IOException::class) + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(_ID, id) + .field(_VERSION, version) + .field(_SEQ_NO, seqNo) + .field(_PRIMARY_TERM, primaryTerm) + if (emailGroup != null) { + builder.field("email_group", emailGroup) + } + + return builder.endObject() + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt new file mode 100644 index 000000000..12cf21299 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType +import org.opensearch.action.search.SearchResponse + +class SearchEmailAccountAction private constructor() : ActionType(NAME, ::SearchResponse) { + companion object { + val INSTANCE = SearchEmailAccountAction() + const val NAME = "cluster:admin/opendistro/alerting/destination/email_account/search" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt new file mode 100644 index 000000000..da113c857 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.action.ActionType +import org.opensearch.action.search.SearchResponse + +class SearchEmailGroupAction private constructor() : ActionType(NAME, ::SearchResponse) { + companion object { + val INSTANCE = SearchEmailGroupAction() + const val NAME = "cluster:admin/opendistro/alerting/destination/email_group/search" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt b/alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt new file mode 100644 index 000000000..bf8701c0f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt @@ -0,0 +1,588 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.alerts + +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.ResourceAlreadyExistsException +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.state.ClusterStateResponse +import org.opensearch.action.admin.indices.alias.Alias +import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.admin.indices.rollover.RolloverRequest +import org.opensearch.action.admin.indices.rollover.RolloverResponse +import org.opensearch.action.support.IndicesOptions +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_HISTORY_WRITE_INDEX +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_ENABLED +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_INDEX_MAX_AGE +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_MAX_DOCS +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_RETENTION_PERIOD +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_ROLLOVER_PERIOD +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_ENABLED +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_INDEX_MAX_AGE +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_MAX_DOCS +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_RETENTION_PERIOD +import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_ROLLOVER_PERIOD +import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.client.Client +import org.opensearch.cluster.ClusterChangedEvent +import org.opensearch.cluster.ClusterStateListener +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.core.action.ActionListener +import org.opensearch.threadpool.Scheduler.Cancellable +import org.opensearch.threadpool.ThreadPool +import java.time.Instant + +/** + * Class to manage the creation and rollover of alert indices and alert history indices. In progress alerts are stored + * in [ALERT_INDEX]. Completed alerts are written to [ALERT_HISTORY_WRITE_INDEX] which is an alias that points at the + * current index to which completed alerts are written. [ALERT_HISTORY_WRITE_INDEX] is periodically rolled over to a new + * date based index. The frequency of rolling over indices is controlled by the `opendistro.alerting.alert_rollover_period` setting. + * + * These indexes are created when first used and are then rolled over every `alert_rollover_period`. The rollover is + * initiated on the cluster manager node to ensure only a single node tries to roll it over. Once we have a curator functionality + * in Scheduled Jobs we can migrate to using that to rollover the index. + */ +// TODO: reafactor to make a generic version of this class for finding and alerts +class AlertIndices( + settings: Settings, + private val client: Client, + private val threadPool: ThreadPool, + private val clusterService: ClusterService +) : ClusterStateListener { + + init { + clusterService.addListener(this) + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ENABLED) { alertHistoryEnabled = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_MAX_DOCS) { alertHistoryMaxDocs = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_INDEX_MAX_AGE) { alertHistoryMaxAge = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ROLLOVER_PERIOD) { + alertHistoryRolloverPeriod = it + rescheduleAlertRollover() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_RETENTION_PERIOD) { + alertHistoryRetentionPeriod = it + } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_ENABLED) { findingHistoryEnabled = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_MAX_DOCS) { findingHistoryMaxDocs = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_INDEX_MAX_AGE) { findingHistoryMaxAge = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_ROLLOVER_PERIOD) { + findingHistoryRolloverPeriod = it + rescheduleFindingRollover() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_RETENTION_PERIOD) { + findingHistoryRetentionPeriod = it + } + } + + companion object { + + /** The in progress alert history index. */ + const val ALERT_INDEX = ".opendistro-alerting-alerts" + + /** The alias of the index in which to write alert history */ + const val ALERT_HISTORY_WRITE_INDEX = ".opendistro-alerting-alert-history-write" + + /** The alias of the index in which to write alert finding */ + const val FINDING_HISTORY_WRITE_INDEX = ".opensearch-alerting-finding-history-write" + + /** The index name pattern referring to all alert history indices */ + const val ALERT_HISTORY_ALL = ".opendistro-alerting-alert-history*" + + /** The index name pattern referring to all alert history indices */ + const val FINDING_HISTORY_ALL = ".opensearch-alerting-finding-history*" + + /** The index name pattern to create alert history indices */ + const val ALERT_HISTORY_INDEX_PATTERN = "<.opendistro-alerting-alert-history-{now/d}-1>" + + /** The index name pattern to create finding history indices */ + const val FINDING_HISTORY_INDEX_PATTERN = "<.opensearch-alerting-finding-history-{now/d}-1>" + + /** The index name pattern to query all alerts, history and current alerts. */ + const val ALL_ALERT_INDEX_PATTERN = ".opendistro-alerting-alert*" + + /** The index name pattern to query all findings, history and current findings. */ + const val ALL_FINDING_INDEX_PATTERN = ".opensearch-alerting-finding*" + + @JvmStatic + fun alertMapping() = + AlertIndices::class.java.getResource("alert_mapping.json").readText() + + @JvmStatic + fun findingMapping() = + AlertIndices::class.java.getResource("finding_mapping.json").readText() + + private val logger = LogManager.getLogger(AlertIndices::class.java) + } + + @Volatile private var alertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + + @Volatile private var findingHistoryEnabled = AlertingSettings.FINDING_HISTORY_ENABLED.get(settings) + + @Volatile private var alertHistoryMaxDocs = AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings) + + @Volatile private var findingHistoryMaxDocs = AlertingSettings.FINDING_HISTORY_MAX_DOCS.get(settings) + + @Volatile private var alertHistoryMaxAge = AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings) + + @Volatile private var findingHistoryMaxAge = AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.get(settings) + + @Volatile private var alertHistoryRolloverPeriod = AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings) + + @Volatile private var findingHistoryRolloverPeriod = AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.get(settings) + + @Volatile private var alertHistoryRetentionPeriod = AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.get(settings) + + @Volatile private var findingHistoryRetentionPeriod = AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD.get(settings) + + @Volatile private var requestTimeout = AlertingSettings.REQUEST_TIMEOUT.get(settings) + + @Volatile private var isClusterManager = false + + // for JobsMonitor to report + var lastRolloverTime: TimeValue? = null + + private var alertHistoryIndexInitialized: Boolean = false + + private var findingHistoryIndexInitialized: Boolean = false + + private var alertIndexInitialized: Boolean = false + + private var scheduledAlertRollover: Cancellable? = null + + private var scheduledFindingRollover: Cancellable? = null + + fun onMaster() { + try { + // try to rollover immediately as we might be restarting the cluster + rolloverAlertHistoryIndex() + rolloverFindingHistoryIndex() + // schedule the next rollover for approx MAX_AGE later + scheduledAlertRollover = threadPool + .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) + scheduledFindingRollover = threadPool + .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) + } catch (e: Exception) { + // This should be run on cluster startup + logger.error( + "Error creating alert/finding indices. " + + "Alerts/Findings can't be recorded until master node is restarted.", + e + ) + } + } + + fun offMaster() { + scheduledAlertRollover?.cancel() + scheduledFindingRollover?.cancel() + } + + private fun executorName(): String { + return ThreadPool.Names.MANAGEMENT + } + + override fun clusterChanged(event: ClusterChangedEvent) { + // Instead of using a LocalNodeClusterManagerListener to track master changes, this service will + // track them here to avoid conditions where master listener events run after other + // listeners that depend on what happened in the master listener + if (this.isClusterManager != event.localNodeClusterManager()) { + this.isClusterManager = event.localNodeClusterManager() + if (this.isClusterManager) { + onMaster() + } else { + offMaster() + } + } + + // if the indexes have been deleted they need to be reinitialized + alertIndexInitialized = event.state().routingTable().hasIndex(ALERT_INDEX) + alertHistoryIndexInitialized = event.state().metadata().hasAlias(ALERT_HISTORY_WRITE_INDEX) + findingHistoryIndexInitialized = event.state().metadata().hasAlias(FINDING_HISTORY_WRITE_INDEX) + } + + private fun rescheduleAlertRollover() { + if (clusterService.state().nodes.isLocalNodeElectedMaster) { + scheduledAlertRollover?.cancel() + scheduledAlertRollover = threadPool + .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) + } + } + + private fun rescheduleFindingRollover() { + if (clusterService.state().nodes.isLocalNodeElectedMaster) { + scheduledFindingRollover?.cancel() + scheduledFindingRollover = threadPool + .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) + } + } + + fun isAlertInitialized(): Boolean { + return alertIndexInitialized && alertHistoryIndexInitialized + } + + fun isAlertInitialized(dataSources: DataSources): Boolean { + val alertsIndex = dataSources.alertsIndex + val alertsHistoryIndex = dataSources.alertsHistoryIndex + if (alertsIndex == ALERT_INDEX && alertsHistoryIndex == ALERT_HISTORY_WRITE_INDEX) { + return alertIndexInitialized && alertHistoryIndexInitialized + } + if ( + clusterService.state().metadata.indices.containsKey(alertsIndex) && + clusterService.state().metadata.hasAlias(alertsHistoryIndex) + ) { + return true + } + return false + } + + fun isAlertHistoryEnabled(): Boolean { + return alertHistoryEnabled + } + + fun isFindingHistoryEnabled(): Boolean = findingHistoryEnabled + + suspend fun createOrUpdateAlertIndex() { + if (!alertIndexInitialized) { + alertIndexInitialized = createIndex(ALERT_INDEX, alertMapping()) + if (alertIndexInitialized) IndexUtils.alertIndexUpdated() + } else { + if (!IndexUtils.alertIndexUpdated) updateIndexMapping(ALERT_INDEX, alertMapping()) + } + alertIndexInitialized + } + suspend fun createOrUpdateAlertIndex(dataSources: DataSources) { + if (dataSources.alertsIndex == ALERT_INDEX) { + return createOrUpdateAlertIndex() + } + val alertsIndex = dataSources.alertsIndex + if (!clusterService.state().routingTable().hasIndex(alertsIndex)) { + alertIndexInitialized = createIndex(alertsIndex!!, alertMapping()) + } else { + updateIndexMapping(alertsIndex!!, alertMapping()) + } + } + + suspend fun createOrUpdateInitialAlertHistoryIndex(dataSources: DataSources) { + if (dataSources.alertsIndex == ALERT_INDEX) { + return createOrUpdateInitialAlertHistoryIndex() + } + if (!clusterService.state().metadata.hasAlias(dataSources.alertsHistoryIndex)) { + createIndex( + dataSources.alertsHistoryIndexPattern ?: ALERT_HISTORY_INDEX_PATTERN, + alertMapping(), + dataSources.alertsHistoryIndex + ) + } else { + updateIndexMapping( + dataSources.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX, + alertMapping(), + true + ) + } + } + suspend fun createOrUpdateInitialAlertHistoryIndex() { + if (!alertHistoryIndexInitialized) { + alertHistoryIndexInitialized = createIndex(ALERT_HISTORY_INDEX_PATTERN, alertMapping(), ALERT_HISTORY_WRITE_INDEX) + if (alertHistoryIndexInitialized) { + IndexUtils.lastUpdatedAlertHistoryIndex = IndexUtils.getIndexNameWithAlias( + clusterService.state(), + ALERT_HISTORY_WRITE_INDEX + ) + } + } else { + updateIndexMapping(ALERT_HISTORY_WRITE_INDEX, alertMapping(), true) + } + alertHistoryIndexInitialized + } + + suspend fun createOrUpdateInitialFindingHistoryIndex() { + if (!findingHistoryIndexInitialized) { + findingHistoryIndexInitialized = createIndex(FINDING_HISTORY_INDEX_PATTERN, findingMapping(), FINDING_HISTORY_WRITE_INDEX) + if (findingHistoryIndexInitialized) { + IndexUtils.lastUpdatedFindingHistoryIndex = IndexUtils.getIndexNameWithAlias( + clusterService.state(), + FINDING_HISTORY_WRITE_INDEX + ) + } + } else { + updateIndexMapping(FINDING_HISTORY_WRITE_INDEX, findingMapping(), true) + } + findingHistoryIndexInitialized + } + + suspend fun createOrUpdateInitialFindingHistoryIndex(dataSources: DataSources) { + if (dataSources.findingsIndex == FINDING_HISTORY_WRITE_INDEX) { + return createOrUpdateInitialFindingHistoryIndex() + } + val findingsIndex = dataSources.findingsIndex + val findingsIndexPattern = dataSources.findingsIndexPattern ?: FINDING_HISTORY_INDEX_PATTERN + if (!clusterService.state().metadata().hasAlias(findingsIndex)) { + createIndex( + findingsIndexPattern, + findingMapping(), + findingsIndex + ) + } else { + updateIndexMapping(findingsIndex, findingMapping(), true) + } + } + + private suspend fun createIndex(index: String, schemaMapping: String, alias: String? = null): Boolean { + // This should be a fast check of local cluster state. Should be exceedingly rare that the local cluster + // state does not contain the index and multiple nodes concurrently try to create the index. + // If it does happen that error is handled we catch the ResourceAlreadyExistsException + val existsResponse: IndicesExistsResponse = client.admin().indices().suspendUntil { + exists(IndicesExistsRequest(index).local(true), it) + } + if (existsResponse.isExists) return true + + val request = CreateIndexRequest(index) + .mapping(schemaMapping) + .settings(Settings.builder().put("index.hidden", true).build()) + + if (alias != null) request.alias(Alias(alias)) + return try { + val createIndexResponse: CreateIndexResponse = client.admin().indices().suspendUntil { create(request, it) } + createIndexResponse.isAcknowledged + } catch (t: Exception) { + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + true + } else { + throw AlertingException.wrap(t) + } + } + } + + private suspend fun updateIndexMapping(index: String, mapping: String, alias: Boolean = false) { + val clusterState = clusterService.state() + var targetIndex = index + if (alias) { + targetIndex = IndexUtils.getIndexNameWithAlias(clusterState, index) + } + + // TODO call getMapping and compare actual mappings here instead of this + if (targetIndex == IndexUtils.lastUpdatedAlertHistoryIndex || targetIndex == IndexUtils.lastUpdatedFindingHistoryIndex) { + return + } + + val putMappingRequest: PutMappingRequest = PutMappingRequest(targetIndex) + .source(mapping, XContentType.JSON) + val updateResponse: AcknowledgedResponse = client.admin().indices().suspendUntil { putMapping(putMappingRequest, it) } + if (updateResponse.isAcknowledged) { + logger.info("Index mapping of $targetIndex is updated") + setIndexUpdateFlag(index, targetIndex) + } else { + logger.info("Failed to update index mapping of $targetIndex") + } + } + + private fun setIndexUpdateFlag(index: String, targetIndex: String) { + when (index) { + ALERT_INDEX -> IndexUtils.alertIndexUpdated() + ALERT_HISTORY_WRITE_INDEX -> IndexUtils.lastUpdatedAlertHistoryIndex = targetIndex + FINDING_HISTORY_WRITE_INDEX -> IndexUtils.lastUpdatedFindingHistoryIndex = targetIndex + } + } + + private fun rolloverAndDeleteAlertHistoryIndices() { + if (alertHistoryEnabled) rolloverAlertHistoryIndex() + deleteOldIndices("History", ALERT_HISTORY_ALL) + } + + private fun rolloverAndDeleteFindingHistoryIndices() { + if (findingHistoryEnabled) rolloverFindingHistoryIndex() + deleteOldIndices("Finding", FINDING_HISTORY_ALL) + } + + private fun rolloverIndex( + initialized: Boolean, + index: String, + pattern: String, + map: String, + docsCondition: Long, + ageCondition: TimeValue, + writeIndex: String + ) { + if (!initialized) { + return + } + + // We have to pass null for newIndexName in order to get Elastic to increment the index count. + val request = RolloverRequest(index, null) + request.createIndexRequest.index(pattern) + .mapping(map) + .settings(Settings.builder().put("index.hidden", true).build()) + request.addMaxIndexDocsCondition(docsCondition) + request.addMaxIndexAgeCondition(ageCondition) + client.admin().indices().rolloverIndex( + request, + object : ActionListener { + override fun onResponse(response: RolloverResponse) { + if (!response.isRolledOver) { + logger.info("$writeIndex not rolled over. Conditions were: ${response.conditionStatus}") + } else { + lastRolloverTime = TimeValue.timeValueMillis(threadPool.absoluteTimeInMillis()) + } + } + override fun onFailure(e: Exception) { + logger.error("$writeIndex not roll over failed.") + } + } + ) + } + + private fun rolloverAlertHistoryIndex() { + rolloverIndex( + alertHistoryIndexInitialized, + ALERT_HISTORY_WRITE_INDEX, + ALERT_HISTORY_INDEX_PATTERN, + alertMapping(), + alertHistoryMaxDocs, + alertHistoryMaxAge, + ALERT_HISTORY_WRITE_INDEX + ) + } + + private fun rolloverFindingHistoryIndex() { + rolloverIndex( + findingHistoryIndexInitialized, + FINDING_HISTORY_WRITE_INDEX, + FINDING_HISTORY_INDEX_PATTERN, + findingMapping(), + findingHistoryMaxDocs, + findingHistoryMaxAge, + FINDING_HISTORY_WRITE_INDEX + ) + } + + private fun deleteOldIndices(tag: String, indices: String) { + logger.error("info deleteOldIndices") + val clusterStateRequest = ClusterStateRequest() + .clear() + .indices(indices) + .metadata(true) + .local(true) + .indicesOptions(IndicesOptions.strictExpand()) + client.admin().cluster().state( + clusterStateRequest, + object : ActionListener { + override fun onResponse(clusterStateResponse: ClusterStateResponse) { + if (clusterStateResponse.state.metadata.indices.isNotEmpty()) { + val indicesToDelete = getIndicesToDelete(clusterStateResponse) + logger.info("Deleting old $tag indices viz $indicesToDelete") + deleteAllOldHistoryIndices(indicesToDelete) + } else { + logger.info("No Old $tag Indices to delete") + } + } + override fun onFailure(e: Exception) { + logger.error("Error fetching cluster state") + } + } + ) + } + + private fun getIndicesToDelete(clusterStateResponse: ClusterStateResponse): List { + val indicesToDelete = mutableListOf() + for (entry in clusterStateResponse.state.metadata.indices) { + val indexMetaData = entry.value + getHistoryIndexToDelete(indexMetaData, alertHistoryRetentionPeriod.millis, ALERT_HISTORY_WRITE_INDEX, alertHistoryEnabled) + ?.let { indicesToDelete.add(it) } + getHistoryIndexToDelete(indexMetaData, findingHistoryRetentionPeriod.millis, FINDING_HISTORY_WRITE_INDEX, findingHistoryEnabled) + ?.let { indicesToDelete.add(it) } + } + return indicesToDelete + } + + private fun getHistoryIndexToDelete( + indexMetadata: IndexMetadata, + retentionPeriodMillis: Long, + writeIndex: String, + historyEnabled: Boolean + ): String? { + val creationTime = indexMetadata.creationDate + if ((Instant.now().toEpochMilli() - creationTime) > retentionPeriodMillis) { + val alias = indexMetadata.aliases.entries.firstOrNull { writeIndex == it.value.alias } + if (alias != null) { + if (historyEnabled) { + // If the index has the write alias and history is enabled, don't delete the index + return null + } else if (writeIndex == ALERT_HISTORY_WRITE_INDEX) { + // Otherwise reset alertHistoryIndexInitialized since index will be deleted + alertHistoryIndexInitialized = false + } else if (writeIndex == FINDING_HISTORY_WRITE_INDEX) { + // Otherwise reset findingHistoryIndexInitialized since index will be deleted + findingHistoryIndexInitialized = false + } + } + + return indexMetadata.index.name + } + return null + } + + private fun deleteAllOldHistoryIndices(indicesToDelete: List) { + if (indicesToDelete.isNotEmpty()) { + val deleteIndexRequest = DeleteIndexRequest(*indicesToDelete.toTypedArray()) + client.admin().indices().delete( + deleteIndexRequest, + object : ActionListener { + override fun onResponse(deleteIndicesResponse: AcknowledgedResponse) { + if (!deleteIndicesResponse.isAcknowledged) { + logger.error( + "Could not delete one or more Alerting/Finding history indices: $indicesToDelete. Retrying one by one." + ) + deleteOldHistoryIndex(indicesToDelete) + } + } + override fun onFailure(e: Exception) { + logger.error("Delete for Alerting/Finding History Indices $indicesToDelete Failed. Retrying one By one.") + deleteOldHistoryIndex(indicesToDelete) + } + } + ) + } + } + + private fun deleteOldHistoryIndex(indicesToDelete: List) { + for (index in indicesToDelete) { + val singleDeleteRequest = DeleteIndexRequest(*indicesToDelete.toTypedArray()) + client.admin().indices().delete( + singleDeleteRequest, + object : ActionListener { + override fun onResponse(acknowledgedResponse: AcknowledgedResponse?) { + if (acknowledgedResponse != null) { + if (!acknowledgedResponse.isAcknowledged) { + logger.error("Could not delete one or more Alerting/Finding history indices: $index") + } + } + } + override fun onFailure(e: Exception) { + logger.debug("Exception ${e.message} while deleting the index $index") + } + } + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt b/alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt new file mode 100644 index 000000000..07b1a3a91 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt @@ -0,0 +1,250 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.alerts + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.MonitorRunnerExecutionContext +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_HISTORY_WRITE_INDEX +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.ScheduledJobUtils +import org.opensearch.client.Client +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.VersionType +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder + +private val log = LogManager.getLogger(AlertMover::class.java) + +class AlertMover { + companion object { + /** + * Moves defunct active alerts to the alert history index when the corresponding monitor or trigger is deleted. + * + * The logic for moving alerts consists of: + * 1. Find active alerts: + * a. matching monitorId if no monitor is provided (postDelete) + * b. matching monitorId and no triggerIds if monitor is provided (postIndex) + * 2. Move alerts over to DataSources.alertsHistoryIndex as DELETED + * 3. Delete alerts from monitor's DataSources.alertsIndex + * 4. Schedule a retry if there were any failures + */ + suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor?) { + var alertIndex = monitor?.dataSources?.alertsIndex ?: ALERT_INDEX + var alertHistoryIndex = monitor?.dataSources?.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX + + val boolQuery = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) + + if (monitor != null) { + boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, monitor.triggers.map { it.id })) + } + + val activeAlertsQuery = SearchSourceBuilder.searchSource() + .query(boolQuery) + .version(true) + + val activeAlertsRequest = SearchRequest(alertIndex) + .routing(monitorId) + .source(activeAlertsQuery) + val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } + + // If no alerts are found, simply return + if (response.hits.totalHits?.value == 0L) return + val indexRequests = response.hits.map { hit -> + IndexRequest(alertHistoryIndex) + .routing(monitorId) + .source( + Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) + .copy(state = Alert.State.DELETED) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + .version(hit.version) + .versionType(VersionType.EXTERNAL_GTE) + .id(hit.id) + } + val copyRequest = BulkRequest().add(indexRequests) + val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } + + val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { + DeleteRequest(alertIndex, it.id) + .routing(monitorId) + .version(it.version) + .versionType(VersionType.EXTERNAL_GTE) + } + val deleteResponse: BulkResponse = client.suspendUntil { bulk(BulkRequest().add(deleteRequests), it) } + + if (copyResponse.hasFailures()) { + val retryCause = copyResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to copy alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + + copyResponse.buildFailureMessage(), + retryCause + ) + } + if (deleteResponse.hasFailures()) { + val retryCause = deleteResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to delete alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + + deleteResponse.buildFailureMessage(), + retryCause + ) + } + } + + private fun alertContentParser(bytesReference: BytesReference): XContentParser { + val xcp = XContentHelper.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, + bytesReference, XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + return xcp + } + + /** + * Moves defunct active alerts to the alert history index when the corresponding workflow or trigger is deleted. + * + * The logic for moving alerts consists of: + * 1. Find active alerts: + * a. matching workflowId if no workflow is provided (postDelete) + * b. matching workflowid and chained alert triggerIds if monitor is provided (postIndex) + * 2. Move alerts over to DataSources.alertsHistoryIndex as DELETED + * 3. Delete alerts from monitor's DataSources.alertsIndex + * 4. Schedule a retry if there were any failures + */ + suspend fun moveAlerts(client: Client, workflowId: String, workflow: Workflow?, monitorCtx: MonitorRunnerExecutionContext) { + var alertIndex = ALERT_INDEX + var alertHistoryIndex = ALERT_HISTORY_WRITE_INDEX + if (workflow != null) { + if ( + workflow.inputs.isNotEmpty() && workflow.inputs[0] is CompositeInput && + (workflow.inputs[0] as CompositeInput).sequence.delegates.isNotEmpty() + ) { + var i = 0 + val delegates = (workflow.inputs[0] as CompositeInput).sequence.delegates + try { + var getResponse: GetResponse? = null + while (i < delegates.size && (getResponse == null || getResponse.isExists == false)) { + getResponse = + client.suspendUntil { + client.get( + GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, delegates[i].monitorId), + it + ) + } + if (getResponse!!.isExists) { + val monitor = + ScheduledJobUtils.parseMonitorFromScheduledJobDocSource( + monitorCtx.xContentRegistry!!, + response = getResponse + ) + + alertIndex = monitor.dataSources.alertsIndex + alertHistoryIndex = + if (monitor.dataSources.alertsHistoryIndex == null) alertHistoryIndex + else monitor.dataSources.alertsHistoryIndex!! + } + i++ + } + } catch (e: Exception) { + log.error("Failed to get delegate monitor for workflow $workflowId. Assuming default alert indices", e) + } + } + } + val dataSources = DataSources().copy(alertsHistoryIndex = alertHistoryIndex, alertsIndex = alertIndex) + /** check if alert index is initialized **/ + if (monitorCtx.alertIndices!!.isAlertInitialized(dataSources) == false) + return + val boolQuery = QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowId)) + + if (workflow != null) { + boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, workflow.triggers.map { it.id })) + } + + val activeAlertsQuery = SearchSourceBuilder.searchSource() + .query(boolQuery) + .version(true) + + val activeAlertsRequest = SearchRequest(alertIndex) + .routing(workflowId) + .source(activeAlertsQuery) + val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } + + // If no alerts are found, simply return + if (response.hits.totalHits?.value == 0L) return + val indexRequests = response.hits.map { hit -> + IndexRequest(alertHistoryIndex) + .routing(workflowId) + .source( + Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) + .copy(state = Alert.State.DELETED) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + .version(hit.version) + .versionType(VersionType.EXTERNAL_GTE) + .id(hit.id) + } + val copyRequest = BulkRequest().add(indexRequests) + val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } + + val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { + DeleteRequest(alertIndex, it.id) + .routing(workflowId) + .version(it.version) + .versionType(VersionType.EXTERNAL_GTE) + } + val deleteResponse: BulkResponse = client.suspendUntil { bulk(BulkRequest().add(deleteRequests), it) } + + if (copyResponse.hasFailures()) { + val retryCause = copyResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to copy alerts for [$workflowId, ${workflow?.triggers?.map { it.id }}]: " + + copyResponse.buildFailureMessage(), + retryCause + ) + } + if (deleteResponse.hasFailures()) { + val retryCause = deleteResponse.items.filter { it.isFailed } + .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } + ?.failure?.cause + throw RuntimeException( + "Failed to delete alerts for [$workflowId, ${workflow?.triggers?.map { it.id }}]: " + + deleteResponse.buildFailureMessage(), + retryCause + ) + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json b/alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json new file mode 100644 index 000000000..53fb5b0a2 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json @@ -0,0 +1,174 @@ +{ + "dynamic": "strict", + "_routing": { + "required": true + }, + "_meta" : { + "schema_version": 5 + }, + "properties": { + "schema_version": { + "type": "integer" + }, + "monitor_id": { + "type": "keyword" + }, + "monitor_version": { + "type": "long" + }, + "id": { + "type": "keyword" + }, + "version": { + "type": "long" + }, + "severity": { + "type": "keyword" + }, + "monitor_name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "monitor_user": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "backend_roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "custom_attribute_names": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "execution_id": { + "type": "keyword" + }, + "workflow_id": { + "type": "keyword" + }, + "workflow_name": { + "type": "keyword" + }, + "trigger_id": { + "type": "keyword" + }, + "trigger_name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "finding_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "associated_alert_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "related_doc_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "state": { + "type": "keyword" + }, + "start_time": { + "type": "date" + }, + "last_notification_time": { + "type": "date" + }, + "acknowledged_time": { + "type": "date" + }, + "end_time": { + "type": "date" + }, + "error_message": { + "type": "text" + }, + "alert_history": { + "type": "nested", + "properties": { + "timestamp": { + "type": "date" + }, + "message": { + "type": "text" + } + } + }, + "action_execution_results": { + "type": "nested", + "properties": { + "action_id": { + "type": "keyword" + }, + "last_execution_time": { + "type": "date" + }, + "throttled_count": { + "type": "integer" + } + } + }, + "agg_alert_content": { + "dynamic": true, + "properties": { + "parent_bucket_path": { + "type": "text" + }, + "bucket_key": { + "type": "text" + } + } + } + } +} \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json b/alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json new file mode 100644 index 000000000..d2ecc0907 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json @@ -0,0 +1,71 @@ +{ + "dynamic": "strict", + "_meta" : { + "schema_version": 4 + }, + "properties": { + "schema_version": { + "type": "integer" + }, + "related_doc_ids": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "monitor_id": { + "type": "keyword" + }, + "monitor_name": { + "type": "keyword" + }, + "id": { + "type": "keyword" + }, + "index": { + "type": "keyword" + }, + "queries" : { + "type": "nested", + "properties": { + "id": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "query": { + "type": "text" + }, + "tags": { + "type": "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "fields": { + "type": "text" + } + } + }, + "timestamp": { + "type": "long" + }, + "correlated_doc_ids": { + "type" : "text", + "analyzer": "whitespace", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "execution_id": { + "type": "keyword" + } + } +} \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt new file mode 100644 index 000000000..999b9a977 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt @@ -0,0 +1,53 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.parsers + +import org.opensearch.alerting.chainedAlertCondition.resolvers.ChainedAlertRPNResolver +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator + +/** + * The postfix (Reverse Polish Notation) parser. + * Uses the Shunting-yard algorithm to parse a mathematical expression + * @param triggerExpression String containing the trigger expression for the monitor + */ +class ChainedAlertExpressionParser( + triggerExpression: String +) : ChainedAlertExpressionRPNBaseParser(triggerExpression) { + + override fun parse(): ChainedAlertRPNResolver { + val expression = expressionToParse.replace(" ", "") + + val splitters = ArrayList() + CAExpressionOperator.values().forEach { splitters.add(it.value) } + + val breaks = ArrayList().apply { add(expression) } + for (s in splitters) { + val a = ArrayList() + for (ind in 0 until breaks.size) { + breaks[ind].let { + if (it.length > 1) { + a.addAll(breakString(breaks[ind], s)) + } else a.add(it) + } + } + breaks.clear() + breaks.addAll(a) + } + + return ChainedAlertRPNResolver(convertInfixToPostfix(breaks)) + } + + private fun breakString(input: String, delimeter: String): ArrayList { + val tokens = input.split(delimeter) + val array = ArrayList() + for (t in tokens) { + array.add(t) + array.add(delimeter) + } + array.removeAt(array.size - 1) + return array + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt new file mode 100644 index 000000000..ff3c29db7 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt @@ -0,0 +1,114 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.parsers + +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionToken +import org.opensearch.alerting.chainedAlertCondition.tokens.ChainedAlertExpressionConstant +import org.opensearch.alerting.chainedAlertCondition.tokens.ExpressionToken +import java.util.Stack + +/** + * This is the abstract base class which holds the trigger expression parsing logic; + * using the Infix to Postfix a.k.a. Reverse Polish Notation (RPN) parser. + * It also uses the Shunting-Yard algorithm to parse the given trigger expression. + * + * @param expressionToParse Complete string containing the trigger expression + */ +abstract class ChainedAlertExpressionRPNBaseParser( + protected val expressionToParse: String +) : ExpressionParser { + /** + * To perform the Infix-to-postfix conversion of the trigger expression + */ + protected fun convertInfixToPostfix(expTokens: List): ArrayList { + val expTokenStack = Stack() + val outputExpTokens = ArrayList() + + for (tokenString in expTokens) { + if (tokenString.isEmpty()) continue + when (val expToken = assignToken(tokenString)) { + is CAExpressionToken -> outputExpTokens.add(expToken) + is CAExpressionOperator -> { + when (expToken) { + CAExpressionOperator.PAR_LEFT -> expTokenStack.push(expToken) + CAExpressionOperator.PAR_RIGHT -> { + var topExpToken = expTokenStack.popExpTokenOrNull() + while (topExpToken != null && topExpToken != CAExpressionOperator.PAR_LEFT) { + outputExpTokens.add(topExpToken) + topExpToken = expTokenStack.popExpTokenOrNull() + } + if (topExpToken != CAExpressionOperator.PAR_LEFT) + throw java.lang.IllegalArgumentException("No matching left parenthesis.") + } + else -> { + var op2 = expTokenStack.peekExpTokenOrNull() + while (op2 != null) { + val c = expToken.precedence.compareTo(op2.precedence) + if (c < 0 || !expToken.rightAssociative && c <= 0) { + outputExpTokens.add(expTokenStack.pop()) + } else { + break + } + op2 = expTokenStack.peekExpTokenOrNull() + } + expTokenStack.push(expToken) + } + } + } + } + } + + while (!expTokenStack.isEmpty()) { + expTokenStack.peekExpTokenOrNull()?.let { + if (it == CAExpressionOperator.PAR_LEFT) + throw java.lang.IllegalArgumentException("No matching right parenthesis.") + } + val top = expTokenStack.pop() + outputExpTokens.add(top) + } + + return outputExpTokens + } + + /** + * Looks up and maps the expression token that matches the string version of that expression unit + */ + private fun assignToken(tokenString: String): ExpressionToken { + + // Check "query" string in trigger expression such as in 'query[name="abc"]' + if (tokenString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) + return CAExpressionToken(tokenString) + + // Check operators in trigger expression such as in [&&, ||, !] + for (op in CAExpressionOperator.values()) { + if (op.value == tokenString) return op + } + + // Check any constants in trigger expression such as in ["name, "id", "tag", [", "]", "="] + for (con in ChainedAlertExpressionConstant.ConstantType.values()) { + if (tokenString == con.ident) return ChainedAlertExpressionConstant(con) + } + + throw IllegalArgumentException("Error while processing the trigger expression '$tokenString'") + } + + private inline fun Stack.popExpTokenOrNull(): T? { + return try { + pop() as T + } catch (e: java.lang.Exception) { + null + } + } + + private inline fun Stack.peekExpTokenOrNull(): T? { + return try { + peek() as T + } catch (e: java.lang.Exception) { + null + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt new file mode 100644 index 000000000..e2ece9d40 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.parsers + +import org.opensearch.alerting.chainedAlertCondition.resolvers.ChainedAlertTriggerResolver + +interface ExpressionParser { + fun parse(): ChainedAlertTriggerResolver +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt new file mode 100644 index 000000000..dfec9614f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt @@ -0,0 +1,110 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.resolvers + +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator +import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionToken +import org.opensearch.alerting.chainedAlertCondition.tokens.ChainedAlertExpressionConstant +import org.opensearch.alerting.chainedAlertCondition.tokens.ExpressionToken +import java.util.Stack + +/** + * Solves the Trigger Expression using the Reverse Polish Notation (RPN) based solver + * @param polishNotation an array of expression tokens organized in the RPN order + */ +class ChainedAlertRPNResolver( + private val polishNotation: ArrayList, +) : ChainedAlertTriggerResolver { + + private val eqString by lazy { + val stringBuilder = StringBuilder() + for (expToken in polishNotation) { + when (expToken) { + is CAExpressionToken -> stringBuilder.append(expToken.value) + is CAExpressionOperator -> stringBuilder.append(expToken.value) + is ChainedAlertExpressionConstant -> stringBuilder.append(expToken.type.ident) + else -> throw Exception() + } + stringBuilder.append(" ") + } + stringBuilder.toString() + } + + override fun toString(): String = eqString + + /** + * Evaluates the trigger expression expressed provided in form of the RPN token array. + * @param queryToDocIds Map to hold the resultant document id per query id + * @return evaluates the final set of document id + */ + override fun evaluate(alertGeneratingMonitors: Set): Boolean { + val tokenStack = Stack() + val res = true + for (expToken in polishNotation) { + when (expToken) { + is CAExpressionToken -> tokenStack.push(resolveMonitorExpression(expToken.value, alertGeneratingMonitors)) + is CAExpressionOperator -> { + val right = tokenStack.pop() + val expr = when (expToken) { + CAExpressionOperator.AND -> ChainedAlertTriggerExpression.And(tokenStack.pop(), right) + CAExpressionOperator.OR -> ChainedAlertTriggerExpression.Or(tokenStack.pop(), right) + CAExpressionOperator.NOT -> ChainedAlertTriggerExpression.Not(res, right) + else -> throw IllegalArgumentException("No matching operator.") + } + tokenStack.push(expr.resolve()) + } + } + } + return tokenStack.pop() + } + + override fun getMonitorIds(parsedTriggerCondition: ChainedAlertRPNResolver): Set { + val monitorIds = mutableSetOf() + for (expToken in polishNotation) { + when (expToken) { + is CAExpressionToken -> { + val monitorExpString = expToken.value + if (!monitorExpString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) + continue + val token = monitorExpString.substringAfter(ChainedAlertExpressionConstant.ConstantType.BRACKET_LEFT.ident) + .substringBefore(ChainedAlertExpressionConstant.ConstantType.BRACKET_RIGHT.ident) + if (token.isEmpty()) continue + val tokens = token.split(ChainedAlertExpressionConstant.ConstantType.EQUALS.ident) + if (tokens.isEmpty() || tokens.size != 2) continue + val identifier = tokens[0] + val value = tokens[1] + when (identifier) { + ChainedAlertExpressionConstant.ConstantType.ID.ident -> { + monitorIds.add(value) + } + } + } + is CAExpressionOperator -> { + continue + } + } + } + return monitorIds + } + + private fun resolveMonitorExpression(monitorExpString: String, alertGeneratingMonitors: Set): Boolean { + if (!monitorExpString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) return false + val token = monitorExpString.substringAfter(ChainedAlertExpressionConstant.ConstantType.BRACKET_LEFT.ident) + .substringBefore(ChainedAlertExpressionConstant.ConstantType.BRACKET_RIGHT.ident) + if (token.isEmpty()) return false + + val tokens = token.split(ChainedAlertExpressionConstant.ConstantType.EQUALS.ident) + if (tokens.isEmpty() || tokens.size != 2) return false + + val identifier = tokens[0] + val value = tokens[1] + + return when (identifier) { + ChainedAlertExpressionConstant.ConstantType.ID.ident -> alertGeneratingMonitors.contains(value) + else -> false + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt new file mode 100644 index 000000000..4b373d853 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt @@ -0,0 +1,32 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.resolvers + +sealed class ChainedAlertTriggerExpression { + + fun resolve(): Boolean = when (this) { + is And -> resolveAnd(boolean1, boolean2) + is Or -> resolveOr(boolean1, boolean2) + is Not -> resolveNot(result, boolean2) + } + + private fun resolveAnd(boolean1: Boolean, boolean2: Boolean): Boolean { + return boolean1 && boolean2 + } + + private fun resolveOr(boolean1: Boolean, boolean2: Boolean): Boolean { + return boolean1 || boolean2 + } + + private fun resolveNot(result: Boolean, boolean2: Boolean): Boolean { + return result && !boolean2 + } + + // Operators implemented as operator functions + class And(val boolean1: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() + class Or(val boolean1: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() + class Not(val result: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt new file mode 100644 index 000000000..6f2ff2de0 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.resolvers + +interface ChainedAlertTriggerResolver { + fun getMonitorIds(parsedTriggerCondition: ChainedAlertRPNResolver): Set + fun evaluate(alertGeneratingMonitors: Set): Boolean +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt new file mode 100644 index 000000000..084b6aa70 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +/** + * To define all the operators used in the trigger expression + */ +enum class CAExpressionOperator(val value: String, val precedence: Int, val rightAssociative: Boolean) : ExpressionToken { + + AND("&&", 2, false), + OR("||", 2, false), + + NOT("!", 3, true), + + PAR_LEFT("(", 1, false), + PAR_RIGHT(")", 1, false) +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt new file mode 100644 index 000000000..ddf439d3f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +/** + * To define the tokens in Trigger expression such as monitor[id=“id1"] or monitor[id=“id2"] and monitor[id=“id3"] + */ +internal data class CAExpressionToken(val value: String) : ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt new file mode 100644 index 000000000..4b35bc4a8 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt @@ -0,0 +1,24 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +/** + * To define all the tokens which could be part of expression constant such as query[id=new_id], query[name=new_name], + * query[tag=new_tag] + */ +class ChainedAlertExpressionConstant(val type: ConstantType) : ExpressionToken { + + enum class ConstantType(val ident: String) { + MONITOR("monitor"), + + ID("id"), + + BRACKET_LEFT("["), + BRACKET_RIGHT("]"), + + EQUALS("=") + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt new file mode 100644 index 000000000..38efed313 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt @@ -0,0 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition.tokens + +interface ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt b/alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt new file mode 100644 index 000000000..2e2b24b19 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt @@ -0,0 +1,63 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.withContext +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.client.Client +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.xcontent.NamedXContentRegistry + +/** + * This is an accessor class to retrieve documents/information from the Alerting config index. + */ +class AlertingConfigAccessor { + companion object { + + suspend fun getEmailAccountInfo(client: Client, xContentRegistry: NamedXContentRegistry, emailAccountId: String): EmailAccount { + val source = getAlertingConfigDocumentSource(client, "Email account", emailAccountId) + return withContext(Dispatchers.IO) { + val xcp = XContentHelper.createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source, XContentType.JSON) + val emailAccount = EmailAccount.parseWithType(xcp) + emailAccount + } + } + + suspend fun getEmailGroupInfo(client: Client, xContentRegistry: NamedXContentRegistry, emailGroupId: String): EmailGroup { + val source = getAlertingConfigDocumentSource(client, "Email group", emailGroupId) + return withContext(Dispatchers.IO) { + val xcp = XContentHelper.createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source, XContentType.JSON) + val emailGroup = EmailGroup.parseWithType(xcp) + emailGroup + } + } + + private suspend fun getAlertingConfigDocumentSource( + client: Client, + type: String, + docId: String + ): BytesReference { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, docId).routing(docId) + val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } + if (!getResponse.isExists || getResponse.isSourceEmpty) { + throw IllegalStateException("$type document with id $docId not found or source is empty") + } + + val finalResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } + + return finalResponse.sourceAsBytesRef + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt new file mode 100644 index 000000000..ffc302d98 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt @@ -0,0 +1,58 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException + +data class BucketLevelTriggerRunResult( + override var triggerName: String, + override var error: Exception? = null, + var aggregationResultBuckets: Map, + var actionResultsMap: MutableMap> = mutableMapOf() +) : TriggerRunResult(triggerName, error) { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + sin.readString(), + sin.readException() as Exception?, // error + sin.readMap(StreamInput::readString, ::AggregationResultBucket), + sin.readMap() as MutableMap> + ) + + override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder + .field(AGG_RESULT_BUCKETS, aggregationResultBuckets) + .field(ACTIONS_RESULTS, actionResultsMap as Map) + } + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeMap(aggregationResultBuckets, StreamOutput::writeString) { + valueOut: StreamOutput, aggResultBucket: AggregationResultBucket -> + aggResultBucket.writeTo(valueOut) + } + out.writeMap(actionResultsMap as Map) + } + + companion object { + const val AGG_RESULT_BUCKETS = "agg_result_buckets" + const val ACTIONS_RESULTS = "action_results" + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): TriggerRunResult { + return BucketLevelTriggerRunResult(sin) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt new file mode 100644 index 000000000..b95e533e9 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt @@ -0,0 +1,69 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.script.ScriptException +import java.io.IOException +import java.time.Instant + +data class ChainedAlertTriggerRunResult( + override var triggerName: String, + var triggered: Boolean, + override var error: Exception?, + var actionResults: MutableMap = mutableMapOf(), + val associatedAlertIds: Set, +) : TriggerRunResult(triggerName, error) { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + triggerName = sin.readString(), + error = sin.readException(), + triggered = sin.readBoolean(), + actionResults = sin.readMap() as MutableMap, + associatedAlertIds = sin.readStringList().toSet() + ) + + override fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") + } + for (actionResult in actionResults.values) { + if (actionResult.error != null) { + return AlertError(Instant.now(), "Failed running action:\n${actionResult.error.userErrorMessage()}") + } + } + return null + } + + override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) + return builder + .field("triggered", triggered) + .field("action_results", actionResults as Map) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeBoolean(triggered) + out.writeMap(actionResults as Map) + out.writeStringCollection(associatedAlertIds) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): TriggerRunResult { + return ChainedAlertTriggerRunResult(sin) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt new file mode 100644 index 000000000..0caad1f4a --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt @@ -0,0 +1,14 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.model.DocLevelQuery + +data class DocumentExecutionContext( + val queries: List, + val lastRunContext: Map, + val updatedLastRunContext: Map +) diff --git a/alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt new file mode 100644 index 000000000..9d98aab42 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt @@ -0,0 +1,52 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.script.ScriptException +import java.io.IOException + +data class DocumentLevelTriggerRunResult( + override var triggerName: String, + var triggeredDocs: List, + override var error: Exception?, + var actionResultsMap: MutableMap> = mutableMapOf() +) : TriggerRunResult(triggerName, error) { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + triggerName = sin.readString(), + error = sin.readException(), + triggeredDocs = sin.readStringList(), + actionResultsMap = sin.readMap() as MutableMap> + ) + + override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) + return builder + .field("triggeredDocs", triggeredDocs as List) + .field("action_results", actionResultsMap as Map) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeStringCollection(triggeredDocs) + out.writeMap(actionResultsMap as Map) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): TriggerRunResult { + return DocumentLevelTriggerRunResult(sin) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt b/alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt new file mode 100644 index 000000000..d1c5c240e --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt @@ -0,0 +1,198 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.model.destination.Destination.Companion.NO_ID +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.util.instant +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.seqno.SequenceNumbers +import java.io.IOException +import java.time.Instant + +data class MonitorMetadata( + val id: String, + val seqNo: Long = SequenceNumbers.UNASSIGNED_SEQ_NO, + val primaryTerm: Long = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + val monitorId: String, + val lastActionExecutionTimes: List, + val lastRunContext: Map, + // Maps (sourceIndex + monitorId) --> concreteQueryIndex + val sourceToQueryIndexMapping: MutableMap = mutableMapOf() +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + id = sin.readString(), + seqNo = sin.readLong(), + primaryTerm = sin.readLong(), + monitorId = sin.readString(), + lastActionExecutionTimes = sin.readList(ActionExecutionTime::readFrom), + lastRunContext = Monitor.suppressWarning(sin.readMap()), + sourceToQueryIndexMapping = sin.readMap() as MutableMap + ) + + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeLong(seqNo) + out.writeLong(primaryTerm) + out.writeString(monitorId) + out.writeCollection(lastActionExecutionTimes) + out.writeMap(lastRunContext) + out.writeMap(sourceToQueryIndexMapping as MutableMap) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(METADATA) + builder.field(MONITOR_ID_FIELD, monitorId) + .field(LAST_ACTION_EXECUTION_FIELD, lastActionExecutionTimes.toTypedArray()) + if (lastRunContext.isNotEmpty()) builder.field(LAST_RUN_CONTEXT_FIELD, lastRunContext) + if (sourceToQueryIndexMapping.isNotEmpty()) { + builder.field(SOURCE_TO_QUERY_INDEX_MAP_FIELD, sourceToQueryIndexMapping as MutableMap) + } + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + + companion object { + const val METADATA = "metadata" + const val MONITOR_ID_FIELD = "monitor_id" + const val LAST_ACTION_EXECUTION_FIELD = "last_action_execution_times" + const val LAST_RUN_CONTEXT_FIELD = "last_run_context" + const val SOURCE_TO_QUERY_INDEX_MAP_FIELD = "source_to_query_index_mapping" + + @JvmStatic + @JvmOverloads + @Throws(IOException::class) + fun parse( + xcp: XContentParser, + id: String = NO_ID, + seqNo: Long = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm: Long = SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ): MonitorMetadata { + lateinit var monitorId: String + val lastActionExecutionTimes = mutableListOf() + var lastRunContext: Map = mapOf() + var sourceToQueryIndexMapping: MutableMap = mutableMapOf() + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + MONITOR_ID_FIELD -> monitorId = xcp.text() + LAST_ACTION_EXECUTION_FIELD -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + lastActionExecutionTimes.add(ActionExecutionTime.parse(xcp)) + } + } + LAST_RUN_CONTEXT_FIELD -> lastRunContext = xcp.map() + SOURCE_TO_QUERY_INDEX_MAP_FIELD -> sourceToQueryIndexMapping = xcp.map() as MutableMap + } + } + + return MonitorMetadata( + if (id != NO_ID) id else "$monitorId-metadata", + seqNo = seqNo, + primaryTerm = primaryTerm, + monitorId = monitorId, + lastActionExecutionTimes = lastActionExecutionTimes, + lastRunContext = lastRunContext, + sourceToQueryIndexMapping = sourceToQueryIndexMapping + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): MonitorMetadata { + return MonitorMetadata(sin) + } + + /** workflowMetadataId is used as key for monitor metadata in the case when the workflow execution happens + so the monitor lastRunContext (in the case of doc level monitor) is not interfering with the monitor execution + WorkflowMetadataId will be either workflowId-metadata (when executing the workflow as it is scheduled) + or timestampWithUUID-metadata (when a workflow is executed in a dry-run mode) + In the case of temp workflow, doc level monitors must have lastRunContext created from scratch + That's why we are using workflowMetadataId - in order to ensure that the doc level monitor metadata is created from scratch + **/ + fun getId(monitor: Monitor, workflowMetadataId: String? = null): String { + return if (workflowMetadataId.isNullOrEmpty()) "${monitor.id}-metadata" + // WorkflowMetadataId already contains -metadata suffix + else "$workflowMetadataId-${monitor.id}-metadata" + } + } +} + +/** + * A value object containing action execution time. + */ +data class ActionExecutionTime( + val actionId: String, + val executionTime: Instant +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // actionId + sin.readInstant() // executionTime + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field(ACTION_ID_FIELD, actionId) + .field(EXECUTION_TIME_FIELD, executionTime) + .endObject() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(actionId) + out.writeInstant(executionTime) + } + + companion object { + const val ACTION_ID_FIELD = "action_id" + const val EXECUTION_TIME_FIELD = "execution_time" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): ActionExecutionTime { + lateinit var actionId: String + lateinit var executionTime: Instant + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + ACTION_ID_FIELD -> actionId = xcp.text() + EXECUTION_TIME_FIELD -> executionTime = xcp.instant()!! + } + } + + return ActionExecutionTime( + actionId, + executionTime + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): ActionExecutionTime { + return ActionExecutionTime(sin) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt new file mode 100644 index 000000000..07d839291 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt @@ -0,0 +1,216 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchException +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.script.ScriptException +import java.io.IOException +import java.time.Instant + +data class MonitorRunResult( + val monitorName: String, + val periodStart: Instant, + val periodEnd: Instant, + val error: Exception? = null, + val inputResults: InputRunResults = InputRunResults(), + val triggerResults: Map = mapOf() +) : Writeable, ToXContent { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + sin.readString(), // monitorName + sin.readInstant(), // periodStart + sin.readInstant(), // periodEnd + sin.readException(), // error + InputRunResults.readFrom(sin), // inputResults + suppressWarning(sin.readMap()) as Map // triggerResults + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field("monitor_name", monitorName) + .optionalTimeField("period_start", periodStart) + .optionalTimeField("period_end", periodEnd) + .field("error", error?.message) + .field("input_results", inputResults) + .field("trigger_results", triggerResults) + .endObject() + } + + /** Returns error information to store in the Alert. Currently it's just the stack trace but it can be more */ + fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Failed running monitor:\n${error.userErrorMessage()}") + } + + if (inputResults.error != null) { + return AlertError(Instant.now(), "Failed fetching inputs:\n${inputResults.error.userErrorMessage()}") + } + return null + } + + fun scriptContextError(trigger: Trigger): Exception? { + return error ?: inputResults.error ?: triggerResults[trigger.id]?.error + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): MonitorRunResult { + return MonitorRunResult(sin) + } + + @Suppress("UNCHECKED_CAST") + fun suppressWarning(map: MutableMap?): Map { + return map as Map + } + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(monitorName) + out.writeInstant(periodStart) + out.writeInstant(periodEnd) + out.writeException(error) + inputResults.writeTo(out) + out.writeMap(triggerResults) + } +} + +data class InputRunResults( + val results: List> = listOf(), + val error: Exception? = null, + val aggTriggersAfterKey: MutableMap? = null +) : Writeable, ToXContent { + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field("results", results) + .field("error", error?.message) + .endObject() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeVInt(results.size) + for (map in results) { + out.writeMap(map) + } + out.writeException(error) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): InputRunResults { + val count = sin.readVInt() // count + val list = mutableListOf>() + for (i in 0 until count) { + list.add(suppressWarning(sin.readMap())) // result(map) + } + val error = sin.readException() // error + return InputRunResults(list, error) + } + + @Suppress("UNCHECKED_CAST") + fun suppressWarning(map: MutableMap?): Map { + return map as Map + } + } + + fun afterKeysPresent(): Boolean { + aggTriggersAfterKey?.forEach { + if (it.value.afterKey != null && !it.value.lastPage) { + return true + } + } + return false + } +} + +data class TriggerAfterKey(val afterKey: Map?, val lastPage: Boolean) + +data class ActionRunResult( + val actionId: String, + val actionName: String, + val output: Map, + val throttled: Boolean = false, + val executionTime: Instant? = null, + val error: Exception? = null +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString(), // actionId + sin.readString(), // actionName + suppressWarning(sin.readMap()), // output + sin.readBoolean(), // throttled + sin.readOptionalInstant(), // executionTime + sin.readException() // error + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field("id", actionId) + .field("name", actionName) + .field("output", output) + .field("throttled", throttled) + .optionalTimeField("executionTime", executionTime) + .field("error", error?.message) + .endObject() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(actionId) + out.writeString(actionName) + out.writeMap(output) + out.writeBoolean(throttled) + out.writeOptionalInstant(executionTime) + out.writeException(error) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): ActionRunResult { + return ActionRunResult(sin) + } + + @Suppress("UNCHECKED_CAST") + fun suppressWarning(map: MutableMap?): MutableMap { + return map as MutableMap + } + } +} + +private val logger = LogManager.getLogger(MonitorRunResult::class.java) + +/** Constructs an error message from an exception suitable for human consumption. */ +fun Throwable.userErrorMessage(): String { + return when { + this is ScriptException -> this.scriptStack.joinToString(separator = "\n", limit = 100) + this is OpenSearchException -> this.detailedMessage + this.message != null -> { + logger.info("Internal error: ${this.message}. See the opensearch.log for details", this) + this.message!! + } + else -> { + logger.info("Unknown Internal error. See the OpenSearch log for details.", this) + "Unknown Internal error. See the OpenSearch log for details." + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt new file mode 100644 index 000000000..d123dbae4 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt @@ -0,0 +1,66 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.script.ScriptException +import java.io.IOException +import java.time.Instant + +data class QueryLevelTriggerRunResult( + override var triggerName: String, + var triggered: Boolean, + override var error: Exception?, + var actionResults: MutableMap = mutableMapOf() +) : TriggerRunResult(triggerName, error) { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + triggerName = sin.readString(), + error = sin.readException(), + triggered = sin.readBoolean(), + actionResults = sin.readMap() as MutableMap + ) + + override fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") + } + for (actionResult in actionResults.values) { + if (actionResult.error != null) { + return AlertError(Instant.now(), "Failed running action:\n${actionResult.error.userErrorMessage()}") + } + } + return null + } + + override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) + return builder + .field("triggered", triggered) + .field("action_results", actionResults as Map) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeBoolean(triggered) + out.writeMap(actionResults as Map) + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): TriggerRunResult { + return QueryLevelTriggerRunResult(sin) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt new file mode 100644 index 000000000..c3aec89f2 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt @@ -0,0 +1,55 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException +import java.time.Instant + +abstract class TriggerRunResult( + open var triggerName: String, + open var error: Exception? = null +) : Writeable, ToXContent { + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field("name", triggerName) + + internalXContent(builder, params) + val msg = error?.message + + builder.field("error", msg) + .endObject() + return builder + } + + abstract fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder + + /** Returns error information to store in the Alert. Currently it's just the stack trace but it can be more */ + open fun alertError(): AlertError? { + if (error != null) { + return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") + } + return null + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(triggerName) + out.writeException(error) + } + + companion object { + @Suppress("UNCHECKED_CAST") + fun suppressWarning(map: MutableMap?): MutableMap { + return map as MutableMap + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt b/alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt new file mode 100644 index 000000000..9ab7b43f8 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt @@ -0,0 +1,105 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.commons.alerting.util.instant +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import java.io.IOException +import java.time.Instant + +data class WorkflowMetadata( + val id: String, + val workflowId: String, + val monitorIds: List, + val latestRunTime: Instant, + val latestExecutionId: String +) : Writeable, ToXContent { + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + id = sin.readString(), + workflowId = sin.readString(), + monitorIds = sin.readStringList(), + latestRunTime = sin.readInstant(), + latestExecutionId = sin.readString() + ) + + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeString(workflowId) + out.writeStringCollection(monitorIds) + out.writeInstant(latestRunTime) + out.writeString(latestExecutionId) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(METADATA) + builder.field(WORKFLOW_ID_FIELD, workflowId) + .field(MONITOR_IDS_FIELD, monitorIds) + .optionalTimeField(LATEST_RUN_TIME, latestRunTime) + .field(LATEST_EXECUTION_ID, latestExecutionId) + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + + companion object { + const val METADATA = "workflow_metadata" + const val WORKFLOW_ID_FIELD = "workflow_id" + const val MONITOR_IDS_FIELD = "monitor_ids" + const val LATEST_RUN_TIME = "latest_run_time" + const val LATEST_EXECUTION_ID = "latest_execution_id" + + @JvmStatic @JvmOverloads + @Throws(IOException::class) + fun parse(xcp: XContentParser): WorkflowMetadata { + lateinit var workflowId: String + var monitorIds = mutableListOf() + lateinit var latestRunTime: Instant + lateinit var latestExecutionId: String + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + WORKFLOW_ID_FIELD -> workflowId = xcp.text() + MONITOR_IDS_FIELD -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + monitorIds.add(xcp.text()) + } + } + LATEST_RUN_TIME -> latestRunTime = xcp.instant()!! + LATEST_EXECUTION_ID -> latestExecutionId = xcp.text() + } + } + return WorkflowMetadata( + id = "$workflowId-metadata", + workflowId = workflowId, + monitorIds = monitorIds, + latestRunTime = latestRunTime, + latestExecutionId = latestExecutionId + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): WorkflowMetadata { + return WorkflowMetadata(sin) + } + + fun getId(workflowId: String? = null) = "$workflowId-metadata" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt new file mode 100644 index 000000000..cabdc6330 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt @@ -0,0 +1,82 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException +import java.lang.Exception +import java.time.Instant + +data class WorkflowRunResult( + val workflowId: String, + val workflowName: String, + val monitorRunResults: List> = mutableListOf(), + val executionStartTime: Instant, + var executionEndTime: Instant? = null, + val executionId: String, + val error: Exception? = null, + val triggerResults: Map = mapOf(), +) : Writeable, ToXContent { + + @Throws(IOException::class) + @Suppress("UNCHECKED_CAST") + constructor(sin: StreamInput) : this( + workflowId = sin.readString(), + workflowName = sin.readString(), + monitorRunResults = sin.readList> { s: StreamInput -> MonitorRunResult.readFrom(s) }, + executionStartTime = sin.readInstant(), + executionEndTime = sin.readOptionalInstant(), + executionId = sin.readString(), + error = sin.readException(), + triggerResults = suppressWarning(sin.readMap()) as Map + ) + + override fun writeTo(out: StreamOutput) { + out.writeString(workflowId) + out.writeString(workflowName) + out.writeList(monitorRunResults) + out.writeInstant(executionStartTime) + out.writeOptionalInstant(executionEndTime) + out.writeString(executionId) + out.writeException(error) + out.writeMap(triggerResults) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + builder.field("execution_id", executionId) + builder.field("workflow_name", workflowName) + builder.field("workflow_id", workflowId) + builder.field("trigger_results", triggerResults) + builder.startArray("monitor_run_results") + for (monitorResult in monitorRunResults) { + monitorResult.toXContent(builder, ToXContent.EMPTY_PARAMS) + } + builder.endArray() + .field("execution_start_time", executionStartTime) + .field("execution_end_time", executionEndTime) + .field("error", error?.message) + .endObject() + return builder + } + + companion object { + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): WorkflowRunResult { + return WorkflowRunResult(sin) + } + + @Suppress("UNCHECKED_CAST") + fun suppressWarning(map: MutableMap?): Map { + return map as Map + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt new file mode 100644 index 000000000..06d066ded --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt @@ -0,0 +1,74 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination + +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +/** + * A value object that represents a Chime message. Chime message will be + * submitted to the Chime destination + */ +data class Chime(val url: String) : ToXContent { + + init { + require(!Strings.isNullOrEmpty(url)) { "URL is null or empty" } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(TYPE) + .field(URL, url) + .endObject() + } + + @Throws(IOException::class) + fun writeTo(out: StreamOutput) { + out.writeString(url) + } + + companion object { + const val URL = "url" + const val TYPE = "chime" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): Chime { + lateinit var url: String + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + URL -> url = xcp.text() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing Chime destination") + } + } + } + return Chime(url) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): Chime? { + return if (sin.readBoolean()) { + Chime(sin.readString()) + } else null + } + } + + // Complete JSON structure is now constructed in the notification plugin + fun constructMessageContent(subject: String?, message: String): String { + return if (Strings.isNullOrEmpty(subject)) message else "$subject \n\n $message" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt new file mode 100644 index 000000000..5758576d8 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt @@ -0,0 +1,143 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination + +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +/** + * A value object that represents a Custom webhook message. Webhook message will be + * submitted to the Custom webhook destination + */ +data class CustomWebhook( + val url: String?, + val scheme: String?, + val host: String?, + val port: Int, + val path: String?, + val method: String?, + val queryParams: Map, + val headerParams: Map, + val username: String?, + val password: String? +) : ToXContent { + + init { + require(!(Strings.isNullOrEmpty(url) && Strings.isNullOrEmpty(host))) { + "Url or Host name must be provided." + } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(TYPE) + .field(URL, url) + .field(SCHEME_FIELD, scheme) + .field(HOST_FIELD, host) + .field(PORT_FIELD, port) + .field(PATH_FIELD, path) + .field(METHOD_FIELD, method) + .field(QUERY_PARAMS_FIELD, queryParams) + .field(HEADER_PARAMS_FIELD, headerParams) + .field(USERNAME_FIELD, username) + .field(PASSWORD_FIELD, password) + .endObject() + } + + @Throws(IOException::class) + fun writeTo(out: StreamOutput) { + out.writeString(url) + out.writeOptionalString(scheme) + out.writeString(host) + out.writeOptionalInt(port) + out.writeOptionalString(path) + out.writeOptionalString(method) + out.writeMap(queryParams) + out.writeMap(headerParams) + out.writeOptionalString(username) + out.writeOptionalString(password) + } + + companion object { + const val URL = "url" + const val TYPE = "custom_webhook" + const val SCHEME_FIELD = "scheme" + const val HOST_FIELD = "host" + const val PORT_FIELD = "port" + const val PATH_FIELD = "path" + const val METHOD_FIELD = "method" + const val QUERY_PARAMS_FIELD = "query_params" + const val HEADER_PARAMS_FIELD = "header_params" + const val USERNAME_FIELD = "username" + const val PASSWORD_FIELD = "password" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): CustomWebhook { + var url: String? = null + var scheme: String? = null + var host: String? = null + var port: Int = -1 + var path: String? = null + var method: String? = null + var queryParams: Map = mutableMapOf() + var headerParams: Map = mutableMapOf() + var username: String? = null + var password: String? = null + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + URL -> url = xcp.textOrNull() + SCHEME_FIELD -> scheme = xcp.textOrNull() + HOST_FIELD -> host = xcp.textOrNull() + PORT_FIELD -> port = xcp.intValue() + PATH_FIELD -> path = xcp.textOrNull() + METHOD_FIELD -> method = xcp.textOrNull() + QUERY_PARAMS_FIELD -> queryParams = xcp.mapStrings() + HEADER_PARAMS_FIELD -> headerParams = xcp.mapStrings() + USERNAME_FIELD -> username = xcp.textOrNull() + PASSWORD_FIELD -> password = xcp.textOrNull() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing custom webhook destination") + } + } + } + return CustomWebhook(url, scheme, host, port, path, method, queryParams, headerParams, username, password) + } + + @Suppress("UNCHECKED_CAST") + fun suppressWarning(map: MutableMap?): Map { + return map as Map + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): CustomWebhook? { + return if (sin.readBoolean()) { + CustomWebhook( + sin.readString(), // url + sin.readOptionalString(), // scheme + sin.readString(), // host + sin.readOptionalInt(), // port + sin.readOptionalString(), // path + sin.readOptionalString(), // method + suppressWarning(sin.readMap()), // queryParams) + suppressWarning(sin.readMap()), // headerParams) + sin.readOptionalString(), // username + sin.readOptionalString() // password + ) + } else null + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt new file mode 100644 index 000000000..c5c5fc4bd --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt @@ -0,0 +1,308 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.model.destination.email.Email +import org.opensearch.alerting.opensearchapi.convertToMap +import org.opensearch.alerting.util.DestinationType +import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertAlertingToNotificationMethodType +import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION +import org.opensearch.commons.alerting.util.instant +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.commons.alerting.util.optionalUserField +import org.opensearch.commons.authuser.User +import org.opensearch.commons.destination.message.LegacyBaseMessage +import org.opensearch.commons.destination.message.LegacyChimeMessage +import org.opensearch.commons.destination.message.LegacyCustomWebhookMessage +import org.opensearch.commons.destination.message.LegacyEmailMessage +import org.opensearch.commons.destination.message.LegacySlackMessage +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.time.Instant +import java.util.Locale + +/** + * A value object that represents a Destination message. + */ +data class Destination( + val id: String = NO_ID, + val version: Long = NO_VERSION, + val schemaVersion: Int = NO_SCHEMA_VERSION, + val seqNo: Int = NO_SEQ_NO, + val primaryTerm: Int = NO_PRIMARY_TERM, + val type: DestinationType, + val name: String, + val user: User?, + val lastUpdateTime: Instant, + val chime: Chime?, + val slack: Slack?, + val customWebhook: CustomWebhook?, + val email: Email? +) : ToXContent { + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return createXContentBuilder(builder, params, true) + } + + fun toXContentWithUser(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return createXContentBuilder(builder, params, false) + } + private fun createXContentBuilder(builder: XContentBuilder, params: ToXContent.Params, secure: Boolean): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(DESTINATION) + builder.field(ID_FIELD, id) + .field(TYPE_FIELD, type.value) + .field(NAME_FIELD, name) + + if (!secure) { + builder.optionalUserField(USER_FIELD, user) + } + + builder.field(SCHEMA_VERSION, schemaVersion) + .field(SEQ_NO_FIELD, seqNo) + .field(PRIMARY_TERM_FIELD, primaryTerm) + .optionalTimeField(LAST_UPDATE_TIME_FIELD, lastUpdateTime) + .field(type.value, constructResponseForDestinationType(type)) + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + fun toXContent(builder: XContentBuilder): XContentBuilder { + return toXContent(builder, ToXContent.EMPTY_PARAMS) + } + + @Throws(IOException::class) + fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeLong(version) + out.writeInt(schemaVersion) + out.writeInt(seqNo) + out.writeInt(primaryTerm) + out.writeEnum(type) + out.writeString(name) + out.writeBoolean(user != null) + user?.writeTo(out) + out.writeInstant(lastUpdateTime) + out.writeBoolean(chime != null) + chime?.writeTo(out) + out.writeBoolean(slack != null) + slack?.writeTo(out) + out.writeBoolean(customWebhook != null) + customWebhook?.writeTo(out) + out.writeBoolean(email != null) + email?.writeTo(out) + } + + companion object { + const val DESTINATION = "destination" + const val ID_FIELD = "id" + const val TYPE_FIELD = "type" + const val NAME_FIELD = "name" + const val USER_FIELD = "user" + const val NO_ID = "" + const val NO_VERSION = 1L + const val NO_SEQ_NO = 0 + const val NO_PRIMARY_TERM = 0 + const val SCHEMA_VERSION = "schema_version" + const val SEQ_NO_FIELD = "seq_no" + const val PRIMARY_TERM_FIELD = "primary_term" + const val LAST_UPDATE_TIME_FIELD = "last_update_time" + const val CHIME = "chime" + const val SLACK = "slack" + const val CUSTOMWEBHOOK = "custom_webhook" + const val EMAIL = "email" + + // This constant is used for test actions created part of integ tests + const val TEST_ACTION = "test" + + private val logger = LogManager.getLogger(Destination::class.java) + + @JvmStatic + @JvmOverloads + @Throws(IOException::class) + fun parse( + xcp: XContentParser, + id: String = NO_ID, + version: Long = NO_VERSION, + seqNo: Int = NO_SEQ_NO, + primaryTerm: Int = NO_PRIMARY_TERM + ): Destination { + lateinit var name: String + var user: User? = null + lateinit var type: String + var slack: Slack? = null + var chime: Chime? = null + var customWebhook: CustomWebhook? = null + var email: Email? = null + var lastUpdateTime: Instant? = null + var schemaVersion = NO_SCHEMA_VERSION + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + NAME_FIELD -> name = xcp.text() + USER_FIELD -> user = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) null else User.parse(xcp) + TYPE_FIELD -> { + type = xcp.text() + val allowedTypes = DestinationType.values().map { it.value } + if (!allowedTypes.contains(type)) { + throw IllegalStateException("Type should be one of the $allowedTypes") + } + } + LAST_UPDATE_TIME_FIELD -> lastUpdateTime = xcp.instant() + CHIME -> { + chime = Chime.parse(xcp) + } + SLACK -> { + slack = Slack.parse(xcp) + } + CUSTOMWEBHOOK -> { + customWebhook = CustomWebhook.parse(xcp) + } + EMAIL -> { + email = Email.parse(xcp) + } + TEST_ACTION -> { + // This condition is for integ tests to avoid parsing + } + SCHEMA_VERSION -> { + schemaVersion = xcp.intValue() + } + else -> { + xcp.skipChildren() + } + } + } + return Destination( + id, + version, + schemaVersion, + seqNo, + primaryTerm, + DestinationType.valueOf(type.uppercase(Locale.ROOT)), + requireNotNull(name) { "Destination name is null" }, + user, + lastUpdateTime ?: Instant.now(), + chime, + slack, + customWebhook, + email + ) + } + + @JvmStatic + @Throws(IOException::class) + fun parseWithType(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): Destination { + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val destination = parse(xcp, id, version) + ensureExpectedToken(XContentParser.Token.END_OBJECT, xcp.nextToken(), xcp) + return destination + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): Destination { + return Destination( + id = sin.readString(), + version = sin.readLong(), + schemaVersion = sin.readInt(), + seqNo = sin.readInt(), + primaryTerm = sin.readInt(), + type = sin.readEnum(DestinationType::class.java), + name = sin.readString(), + user = if (sin.readBoolean()) { + User(sin) + } else null, + lastUpdateTime = sin.readInstant(), + chime = Chime.readFrom(sin), + slack = Slack.readFrom(sin), + customWebhook = CustomWebhook.readFrom(sin), + email = Email.readFrom(sin) + ) + } + } + + fun buildLegacyBaseMessage( + compiledSubject: String?, + compiledMessage: String, + destinationCtx: DestinationContext + ): LegacyBaseMessage { + val destinationMessage: LegacyBaseMessage + when (type) { + DestinationType.CHIME -> { + val messageContent = chime?.constructMessageContent(compiledSubject, compiledMessage) + destinationMessage = LegacyChimeMessage.Builder(name) + .withUrl(chime?.url) + .withMessage(messageContent) + .build() + } + DestinationType.SLACK -> { + val messageContent = slack?.constructMessageContent(compiledSubject, compiledMessage) + destinationMessage = LegacySlackMessage.Builder(name) + .withUrl(slack?.url) + .withMessage(messageContent) + .build() + } + DestinationType.CUSTOM_WEBHOOK -> { + destinationMessage = LegacyCustomWebhookMessage.Builder(name) + .withUrl(getLegacyCustomWebhookMessageURL(customWebhook, compiledMessage)) + .withHeaderParams(customWebhook?.headerParams) + .withMessage(compiledMessage).build() + } + DestinationType.EMAIL -> { + val emailAccount = destinationCtx.emailAccount + destinationMessage = LegacyEmailMessage.Builder(name) + .withAccountName(emailAccount?.name) + .withHost(emailAccount?.host) + .withPort(emailAccount?.port) + .withMethod(emailAccount?.method?.let { convertAlertingToNotificationMethodType(it).toString() }) + .withFrom(emailAccount?.email) + .withRecipients(destinationCtx.recipients) + .withSubject(compiledSubject) + .withMessage(compiledMessage).build() + } + else -> throw IllegalArgumentException("Unsupported Destination type [$type] for building legacy message") + } + return destinationMessage + } + + private fun constructResponseForDestinationType(type: DestinationType): Any { + var content: Any? = null + when (type) { + DestinationType.CHIME -> content = chime?.convertToMap()?.get(type.value) + DestinationType.SLACK -> content = slack?.convertToMap()?.get(type.value) + DestinationType.CUSTOM_WEBHOOK -> content = customWebhook?.convertToMap()?.get(type.value) + DestinationType.EMAIL -> content = email?.convertToMap()?.get(type.value) + DestinationType.TEST_ACTION -> content = "dummy" + } + if (content == null) { + throw IllegalArgumentException("Content is NULL for destination type ${type.value}") + } + return content + } + + private fun getLegacyCustomWebhookMessageURL(customWebhook: CustomWebhook?, message: String): String { + return LegacyCustomWebhookMessage.Builder(name) + .withUrl(customWebhook?.url) + .withScheme(customWebhook?.scheme) + .withHost(customWebhook?.host) + .withPort(customWebhook?.port) + .withPath(customWebhook?.path) + .withQueryParams(customWebhook?.queryParams) + .withMessage(message) + .build().uri.toString() + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt new file mode 100644 index 000000000..5b3febc87 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt @@ -0,0 +1,17 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination + +import org.opensearch.alerting.model.destination.email.EmailAccount + +/** + * DestinationContext is a value object that contains additional context information needed at runtime to publish to a destination. + * For now it only contains the information retrieved from documents by ID for Email (such as email account and email group recipients). + */ +data class DestinationContext( + val emailAccount: EmailAccount? = null, + val recipients: List = emptyList() +) diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt new file mode 100644 index 000000000..263962ac7 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt @@ -0,0 +1,80 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination + +import org.opensearch.alerting.model.AlertingConfigAccessor +import org.opensearch.alerting.model.destination.email.Email +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.Recipient +import org.opensearch.alerting.settings.DestinationSettings.Companion.SecureDestinationSettings +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.Client +import org.opensearch.core.common.settings.SecureString +import org.opensearch.core.xcontent.NamedXContentRegistry + +/** + * This class is responsible for generating [DestinationContext]. + */ +class DestinationContextFactory( + val client: Client, + val xContentRegistry: NamedXContentRegistry, + private var destinationSettings: Map +) { + + fun updateDestinationSettings(destinationSettings: Map) { + this.destinationSettings = destinationSettings + } + + suspend fun getDestinationContext(destination: Destination): DestinationContext { + var destinationContext = DestinationContext() + // Populate DestinationContext based on Destination type + if (destination.type == DestinationType.EMAIL) { + val email = destination.email + requireNotNull(email) { "Email in Destination: $destination was null" } + + var emailAccount = AlertingConfigAccessor.getEmailAccountInfo(client, xContentRegistry, email.emailAccountID) + + emailAccount = addEmailCredentials(emailAccount) + + // Get the email recipients as a unique list of email strings since + // recipients can be a combination of EmailGroups and single emails + val uniqueListOfRecipients = getUniqueListOfEmailRecipients(email) + + destinationContext = destinationContext.copy(emailAccount = emailAccount, recipients = uniqueListOfRecipients) + } + + return destinationContext + } + + private fun addEmailCredentials(emailAccount: EmailAccount): EmailAccount { + // Retrieve and populate the EmailAccount object with credentials if authentication is enabled + if (emailAccount.method != EmailAccount.MethodType.NONE) { + val emailUsername: SecureString? = destinationSettings[emailAccount.name]?.emailUsername + val emailPassword: SecureString? = destinationSettings[emailAccount.name]?.emailPassword + + return emailAccount.copy(username = emailUsername, password = emailPassword) + } + + return emailAccount + } + + private suspend fun getUniqueListOfEmailRecipients(email: Email): List { + val uniqueRecipients: MutableSet = mutableSetOf() + email.recipients.forEach { recipient -> + when (recipient.type) { + // Recipient attributes are checked for being non-null based on type during initialization + // so non-null assertion calls are made here + Recipient.RecipientType.EMAIL -> uniqueRecipients.add(recipient.email!!) + Recipient.RecipientType.EMAIL_GROUP -> { + val emailGroup = AlertingConfigAccessor.getEmailGroupInfo(client, xContentRegistry, recipient.emailGroupID!!) + emailGroup.getEmailsAsListOfString().map { uniqueRecipients.add(it) } + } + } + } + + return uniqueRecipients.toList() + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt new file mode 100644 index 000000000..f9c6ec59f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt @@ -0,0 +1,63 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination + +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import java.io.IOException +import java.lang.IllegalStateException +import java.util.regex.Pattern + +data class SNS(val topicARN: String, val roleARN: String) : ToXContent { + + init { + require(SNS_ARN_REGEX.matcher(topicARN).find()) { "Invalid AWS SNS topic ARN: $topicARN" } + require(IAM_ARN_REGEX.matcher(roleARN).find()) { "Invalid AWS role ARN: $roleARN " } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(SNS_TYPE) + .field(TOPIC_ARN_FIELD, topicARN) + .field(ROLE_ARN_FIELD, roleARN) + .endObject() + } + + companion object { + + private val SNS_ARN_REGEX = Pattern.compile("^arn:aws(-[^:]+)?:sns:([a-zA-Z0-9-]+):([0-9]{12}):([a-zA-Z0-9-_]+)$") + private val IAM_ARN_REGEX = Pattern.compile("^arn:aws(-[^:]+)?:iam::([0-9]{12}):([a-zA-Z0-9-/_]+)$") + + const val TOPIC_ARN_FIELD = "topic_arn" + const val ROLE_ARN_FIELD = "role_arn" + const val SNS_TYPE = "sns" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): SNS { + lateinit var topicARN: String + lateinit var roleARN: String + + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + TOPIC_ARN_FIELD -> topicARN = xcp.textOrNull() + ROLE_ARN_FIELD -> roleARN = xcp.textOrNull() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing SNS destination") + } + } + } + return SNS( + requireNotNull(topicARN) { "SNS Action topic_arn is null" }, + requireNotNull(roleARN) { "SNS Action role_arn is null" } + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt new file mode 100644 index 000000000..14f623616 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt @@ -0,0 +1,74 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination + +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +/** + * A value object that represents a Slack message. Slack message will be + * submitted to the Slack destination + */ +data class Slack(val url: String) : ToXContent { + + init { + require(!Strings.isNullOrEmpty(url)) { "URL is null or empty" } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(TYPE) + .field(URL, url) + .endObject() + } + + @Throws(IOException::class) + fun writeTo(out: StreamOutput) { + out.writeString(url) + } + + companion object { + const val URL = "url" + const val TYPE = "slack" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): Slack { + lateinit var url: String + + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + URL -> url = xcp.text() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing Slack destination") + } + } + } + return Slack(url) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): Slack? { + return if (sin.readBoolean()) { + Slack(sin.readString()) + } else null + } + } + + // Complete JSON structure is now constructed in the notification plugin + fun constructMessageContent(subject: String?, message: String): String { + return if (Strings.isNullOrEmpty(subject)) message else "$subject \n\n $message" + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt new file mode 100644 index 000000000..75635ab38 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt @@ -0,0 +1,188 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination.email + +import org.opensearch.alerting.util.isValidEmail +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException +import java.lang.IllegalStateException +import java.util.Locale + +/** + * A value object that represents an Email message. Email messages will be + * submitted to the Email destination. + */ +data class Email( + val emailAccountID: String, + val recipients: List +) : Writeable, ToXContent { + + init { + require(recipients.isNotEmpty()) { "At least one recipient must be provided" } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject(TYPE) + .field(EMAIL_ACCOUNT_ID_FIELD, emailAccountID) + .field(RECIPIENTS_FIELD, recipients.toTypedArray()) + .endObject() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(emailAccountID) + out.writeCollection(recipients) + } + + companion object { + const val TYPE = "email" + const val EMAIL_ACCOUNT_ID_FIELD = "email_account_id" + const val RECIPIENTS_FIELD = "recipients" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): Email { + lateinit var emailAccountID: String + val recipients: MutableList = mutableListOf() + + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + EMAIL_ACCOUNT_ID_FIELD -> emailAccountID = xcp.text() + RECIPIENTS_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_ARRAY) { + recipients.add(Recipient.parse(xcp)) + } + } + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing email destination") + } + } + } + + return Email( + requireNotNull(emailAccountID) { "Email account ID is null" }, + recipients + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): Email? { + return if (sin.readBoolean()) { + Email( + sin.readString(), // emailAccountID + sin.readList(::Recipient) // recipients + ) + } else null + } + } +} + +/** + * A value object containing a recipient for an Email. + */ +data class Recipient( + val type: RecipientType, + val emailGroupID: String?, + val email: String? +) : Writeable, ToXContent { + + init { + when (type) { + RecipientType.EMAIL_GROUP -> requireNotNull(emailGroupID) { "Email group ID is null" } + RecipientType.EMAIL -> { + requireNotNull(email) { "Email is null" } + require(isValidEmail(email)) { "Invalid email" } + } + } + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readEnum(Recipient.RecipientType::class.java), // type + sin.readOptionalString(), // emailGroupId + sin.readOptionalString() // email + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject().field(TYPE_FIELD, type.value) + + when (type) { + RecipientType.EMAIL_GROUP -> builder.field(EMAIL_GROUP_ID_FIELD, emailGroupID) + RecipientType.EMAIL -> builder.field(EMAIL_FIELD, email) + } + + return builder.endObject() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeEnum(type) + out.writeOptionalString(emailGroupID) + out.writeOptionalString(email) + } + + enum class RecipientType(val value: String) { + EMAIL("email"), + EMAIL_GROUP("email_group") + } + + companion object { + const val TYPE_FIELD = "type" + const val EMAIL_GROUP_ID_FIELD = "email_group_id" + const val EMAIL_FIELD = "email" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): Recipient { + lateinit var type: String + var emailGroupID: String? = null + var email: String? = null + + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + TYPE_FIELD -> { + type = xcp.text() + val allowedTypes = RecipientType.values().map { it.value } + if (!allowedTypes.contains(type)) { + throw IllegalStateException("Type should be one of $allowedTypes") + } + } + EMAIL_GROUP_ID_FIELD -> emailGroupID = xcp.text() + EMAIL_FIELD -> email = xcp.text() + } + } + + return Recipient( + RecipientType.valueOf(type.uppercase(Locale.ROOT)), + emailGroupID, + email + ) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): Recipient { + return Recipient(sin) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt new file mode 100644 index 000000000..ea18e76a0 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt @@ -0,0 +1,175 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination.email + +import org.opensearch.alerting.util.isValidEmail +import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.common.settings.SecureString +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +/** + * A value object that represents an Email Account. Email Accounts contain the configuration + * information for sender emails when sending email messages through the Email destination. + */ +data class EmailAccount( + val id: String = NO_ID, + val version: Long = NO_VERSION, + val schemaVersion: Int = NO_SCHEMA_VERSION, + val name: String, + val email: String, + val host: String, + val port: Int, + val method: MethodType, + val username: SecureString? = null, + val password: SecureString? = null +) : Writeable, ToXContent { + + init { + // Excluding dashes (-) from valid names for EmailAccount since the name is used + // to namespace the associated OpenSearch keystore settings and dashes do not work for those settings. + val validNamePattern = Regex("[A-Z0-9_]+", RegexOption.IGNORE_CASE) + require(validNamePattern.matches(name)) { + "Invalid email account name. Valid characters are upper and lowercase a-z, 0-9, and _ (underscore)." + } + + require(isValidEmail(email)) { "Invalid email" } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(EMAIL_ACCOUNT_TYPE) + builder.field(SCHEMA_VERSION, schemaVersion) + .field(NAME_FIELD, name) + .field(EMAIL_FIELD, email) + .field(HOST_FIELD, host) + .field(PORT_FIELD, port) + .field(METHOD_FIELD, method.value) + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + + fun toXContent(builder: XContentBuilder): XContentBuilder { + return toXContent(builder, ToXContent.EMPTY_PARAMS) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeLong(version) + out.writeInt(schemaVersion) + out.writeString(name) + out.writeString(email) + out.writeString(host) + out.writeInt(port) + out.writeEnum(method) + out.writeOptionalSecureString(username) + out.writeOptionalSecureString(password) + } + + enum class MethodType(val value: String) { + NONE("none"), + SSL("ssl"), + TLS("starttls"); + + companion object { + private val values = values() + + // Created this method since MethodType value does not necessarily match enum name + fun getByValue(value: String) = values.firstOrNull { it.value == value } + } + } + + companion object { + const val EMAIL_ACCOUNT_TYPE = "email_account" + const val NO_ID = "" + const val NO_VERSION = 1L + const val SCHEMA_VERSION = "schema_version" + const val NAME_FIELD = "name" + const val EMAIL_FIELD = "email" + const val HOST_FIELD = "host" + const val PORT_FIELD = "port" + const val METHOD_FIELD = "method" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailAccount { + var schemaVersion = NO_SCHEMA_VERSION + lateinit var name: String + lateinit var email: String + lateinit var host: String + var port: Int = -1 + lateinit var method: String + + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + SCHEMA_VERSION -> schemaVersion = xcp.intValue() + NAME_FIELD -> name = xcp.text() + EMAIL_FIELD -> email = xcp.text() + HOST_FIELD -> host = xcp.text() + PORT_FIELD -> port = xcp.intValue() + METHOD_FIELD -> { + method = xcp.text() + val allowedMethods = MethodType.values().map { it.value } + if (!allowedMethods.contains(method)) { + throw IllegalStateException("Method should be one of $allowedMethods") + } + } + } + } + + return EmailAccount( + id, + version, + schemaVersion, + name, + email, + host, + port, + requireNotNull(MethodType.getByValue(method)) { "Method type was null" } + ) + } + + @JvmStatic + @Throws(IOException::class) + fun parseWithType(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailAccount { + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) + ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) + val emailAccount = parse(xcp, id, version) + ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) + return emailAccount + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): EmailAccount { + return EmailAccount( + sin.readString(), // id + sin.readLong(), // version + sin.readInt(), // schemaVersion + sin.readString(), // name + sin.readString(), // email + sin.readString(), // host + sin.readInt(), // port + sin.readEnum(MethodType::class.java), // method + sin.readOptionalSecureString(), // username + sin.readOptionalSecureString() // password + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt new file mode 100644 index 000000000..a960da5f5 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt @@ -0,0 +1,190 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model.destination.email + +import org.opensearch.alerting.util.isValidEmail +import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION +import org.opensearch.core.common.Strings +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import java.io.IOException + +/** + * A value object that represents a group of recipient emails to send emails to. + */ +data class EmailGroup( + val id: String = NO_ID, + val version: Long = NO_VERSION, + val schemaVersion: Int = NO_SCHEMA_VERSION, + val name: String, + val emails: List +) : Writeable, ToXContent { + + init { + val validNamePattern = Regex("[A-Z0-9_-]+", RegexOption.IGNORE_CASE) + require(validNamePattern.matches(name)) { + "Invalid email group name. Valid characters are upper and lowercase a-z, 0-9, _ (underscore) and - (hyphen)." + } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + if (params.paramAsBoolean("with_type", false)) builder.startObject(EMAIL_GROUP_TYPE) + builder.field(SCHEMA_VERSION, schemaVersion) + .field(NAME_FIELD, name) + .field(EMAILS_FIELD, emails.toTypedArray()) + if (params.paramAsBoolean("with_type", false)) builder.endObject() + return builder.endObject() + } + + fun toXContent(builder: XContentBuilder): XContentBuilder { + return toXContent(builder, ToXContent.EMPTY_PARAMS) + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(id) + out.writeLong(version) + out.writeInt(schemaVersion) + out.writeString(name) + out.writeCollection(emails) + } + + fun getEmailsAsListOfString(): List { + val emailsAsListOfString: MutableList = mutableListOf() + emails.map { emailsAsListOfString.add(it.email) } + return emailsAsListOfString + } + + companion object { + const val EMAIL_GROUP_TYPE = "email_group" + const val NO_ID = "" + const val NO_VERSION = 1L + const val SCHEMA_VERSION = "schema_version" + const val NAME_FIELD = "name" + const val EMAILS_FIELD = "emails" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailGroup { + var schemaVersion = NO_SCHEMA_VERSION + lateinit var name: String + val emails: MutableList = mutableListOf() + + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + SCHEMA_VERSION -> schemaVersion = xcp.intValue() + NAME_FIELD -> name = xcp.text() + EMAILS_FIELD -> { + ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_ARRAY) { + emails.add(EmailEntry.parse(xcp)) + } + } + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing email group") + } + } + } + + return EmailGroup( + id, + version, + schemaVersion, + requireNotNull(name) { "Email group name is null" }, + emails + ) + } + + @JvmStatic + @Throws(IOException::class) + fun parseWithType(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailGroup { + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) + ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) + val emailGroup = parse(xcp, id, version) + ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) + return emailGroup + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): EmailGroup { + return EmailGroup( + sin.readString(), // id + sin.readLong(), // version + sin.readInt(), // schemaVersion + sin.readString(), // name + sin.readList(::EmailEntry) // emails + ) + } + } +} + +data class EmailEntry(val email: String) : Writeable, ToXContent { + + init { + require(!Strings.isEmpty(email)) { "Email entry must have a non-empty email" } + require(isValidEmail(email)) { "Invalid email" } + } + + @Throws(IOException::class) + constructor(sin: StreamInput) : this( + sin.readString() // email + ) + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + return builder.startObject() + .field(EMAIL_FIELD, email) + .endObject() + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + out.writeString(email) + } + + companion object { + const val EMAIL_FIELD = "email" + + @JvmStatic + @Throws(IOException::class) + fun parse(xcp: XContentParser): EmailEntry { + lateinit var email: String + + ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) + while (xcp.nextToken() != Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + + when (fieldName) { + EMAIL_FIELD -> email = xcp.text() + else -> { + throw IllegalStateException("Unexpected field: $fieldName, while parsing email entry") + } + } + } + + return EmailEntry(email) + } + + @JvmStatic + @Throws(IOException::class) + fun readFrom(sin: StreamInput): EmailEntry { + return EmailEntry(sin) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt b/alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt new file mode 100644 index 000000000..bd1f94482 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt @@ -0,0 +1,52 @@ + # Copyright OpenSearch Contributors +# SPDX-License-Identifier: Apache-2.0 + +# Painless definition of classes used by alerting plugin + +class org.opensearch.alerting.script.TriggerScript { + Map getParams() + boolean execute(QueryLevelTriggerExecutionContext) + String[] PARAMETERS +} + +class org.opensearch.alerting.script.TriggerScript$Factory { + TriggerScript newInstance(Map) +} + +class org.opensearch.alerting.script.TriggerExecutionContext { + Monitor getMonitor() + List getResults() + java.time.Instant getPeriodStart() + java.time.Instant getPeriodEnd() + Exception getError() +} + +class org.opensearch.alerting.script.QueryLevelTriggerExecutionContext { + Monitor getMonitor() + QueryLevelTrigger getTrigger() + List getResults() + java.time.Instant getPeriodStart() + java.time.Instant getPeriodEnd() + Alert getAlert() + Exception getError() +} + +class org.opensearch.commons.alerting.model.Monitor { + String getId() + long getVersion() + String getName() + boolean getEnabled() +} + +class org.opensearch.commons.alerting.model.QueryLevelTrigger { + String getId() + String getName() + String getSeverity() + List getActions() +} + +class org.opensearch.commons.alerting.model.Alert { + String getId() + long getVersion() + boolean isAcknowledged() +} \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt new file mode 100644 index 000000000..ae837d8d9 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt @@ -0,0 +1,17 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.opensearch.client.node.NodeClient +import org.opensearch.rest.BytesRestResponse +import org.opensearch.rest.RestChannel + +abstract class AsyncActionHandler(protected val client: NodeClient, protected val channel: RestChannel) { + + protected fun onFailure(e: Exception) { + channel.sendResponse(BytesRestResponse(channel, e)) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt new file mode 100644 index 000000000..f953876a4 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt @@ -0,0 +1,92 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.apache.logging.log4j.Logger +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.REFRESH +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.POST +import org.opensearch.rest.action.RestToXContentListener +import java.io.IOException + +private val log: Logger = LogManager.getLogger(RestAcknowledgeAlertAction::class.java) + +/** + * This class consists of the REST handler to acknowledge alerts. + * The user provides the monitorID to which these alerts pertain and in the content of the request provides + * the ids to the alerts he would like to acknowledge. + */ +class RestAcknowledgeAlertAction : BaseRestHandler() { + + override fun getName(): String { + return "acknowledge_alert_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + // Acknowledge alerts + return mutableListOf( + ReplacedRoute( + POST, + "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_acknowledge/alerts", + POST, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}/_acknowledge/alerts" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_acknowledge/alerts") + + val monitorId = request.param("monitorID") + require(!monitorId.isNullOrEmpty()) { "Missing monitor id." } + val alertIds = getAlertIds(request.contentParser()) + require(alertIds.isNotEmpty()) { "You must provide at least one alert id." } + val refreshPolicy = RefreshPolicy.parse(request.param(REFRESH, RefreshPolicy.IMMEDIATE.value)) + + val acknowledgeAlertRequest = AcknowledgeAlertRequest(monitorId, alertIds, refreshPolicy) + return RestChannelConsumer { channel -> + client.execute(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, acknowledgeAlertRequest, RestToXContentListener(channel)) + } + } + + /** + * Parse the request content and return a list of the alert ids to acknowledge + */ + private fun getAlertIds(xcp: XContentParser): List { + val ids = mutableListOf() + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + "alerts" -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + ids.add(xcp.text()) + } + } + } + } + return ids + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt new file mode 100644 index 000000000..968856a48 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt @@ -0,0 +1,82 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.apache.logging.log4j.Logger +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.POST +import org.opensearch.rest.action.RestToXContentListener +import java.io.IOException + +private val log: Logger = LogManager.getLogger(RestAcknowledgeAlertAction::class.java) + +/** + * This class consists of the REST handler to acknowledge chained alerts. + * The user provides the workflowID to which these alerts pertain and in the content of the request provides + * the ids to the chained alerts user would like to acknowledge. + */ +class RestAcknowledgeChainedAlertAction : BaseRestHandler() { + + override fun getName(): String { + return "acknowledge_chained_alert_action" + } + + override fun routes(): List { + // Acknowledge alerts + return mutableListOf( + Route( + POST, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_acknowledge/alerts" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_acknowledge/alerts") + + val workflowId = request.param("workflowID") + require(!workflowId.isNullOrEmpty()) { "Missing workflow id." } + val alertIds = getAlertIds(request.contentParser()) + require(alertIds.isNotEmpty()) { "You must provide at least one alert id." } + + val acknowledgeAlertRequest = AcknowledgeChainedAlertRequest(workflowId, alertIds) + return RestChannelConsumer { channel -> + client.execute(AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, acknowledgeAlertRequest, RestToXContentListener(channel)) + } + } + + /** + * Parse the request content and return a list of the alert ids to acknowledge + */ + private fun getAlertIds(xcp: XContentParser): List { + val ids = mutableListOf() + ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + val fieldName = xcp.currentName() + xcp.nextToken() + when (fieldName) { + "alerts" -> { + ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) + while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { + ids.add(xcp.text()) + } + } + } + } + return ids + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt new file mode 100644 index 000000000..a2276349e --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt @@ -0,0 +1,66 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.apache.logging.log4j.Logger +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.REFRESH +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.DELETE +import org.opensearch.rest.action.RestToXContentListener +import java.io.IOException + +private val log: Logger = LogManager.getLogger(RestDeleteMonitorAction::class.java) + +/** + * This class consists of the REST handler to delete monitors. + * When a monitor is deleted, all alerts are moved to the [Alert.State.DELETED] state and moved to the alert history index. + * If this process fails the monitor is not deleted. + */ +class RestDeleteMonitorAction : BaseRestHandler() { + + override fun getName(): String { + return "delete_monitor_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + DELETE, + "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", + DELETE, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}") + + val monitorId = request.param("monitorID") + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/$monitorId") + + val refreshPolicy = RefreshPolicy.parse(request.param(REFRESH, RefreshPolicy.IMMEDIATE.value)) + val deleteMonitorRequest = DeleteMonitorRequest(monitorId, refreshPolicy) + + return RestChannelConsumer { channel -> + client.execute(AlertingActions.DELETE_MONITOR_ACTION_TYPE, deleteMonitorRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt new file mode 100644 index 000000000..a61a9b51c --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt @@ -0,0 +1,60 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.REFRESH +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteWorkflowRequest +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestToXContentListener +import java.io.IOException + +/** + * This class consists of the REST handler to delete workflows. + */ +class RestDeleteWorkflowAction : BaseRestHandler() { + + private val log = LogManager.getLogger(javaClass) + + override fun getName(): String { + return "delete_workflow_action" + } + + override fun routes(): List { + return listOf( + RestHandler.Route( + RestRequest.Method.DELETE, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}") + + val workflowId = request.param("workflowID") + val deleteDelegateMonitors = request.paramAsBoolean("deleteDelegateMonitors", false) + log.debug("${request.method()} ${request.uri()}") + + val refreshPolicy = + WriteRequest.RefreshPolicy.parse(request.param(REFRESH, WriteRequest.RefreshPolicy.IMMEDIATE.value)) + val deleteWorkflowRequest = DeleteWorkflowRequest(workflowId, deleteDelegateMonitors) + + return RestChannelConsumer { channel -> + client.execute( + AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, deleteWorkflowRequest, + RestToXContentListener(channel) + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt new file mode 100644 index 000000000..740dcb2d6 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt @@ -0,0 +1,77 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.ExecuteMonitorAction +import org.opensearch.alerting.action.ExecuteMonitorRequest +import org.opensearch.client.node.NodeClient +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.core.xcontent.XContentParser.Token.START_OBJECT +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.POST +import org.opensearch.rest.action.RestToXContentListener +import java.time.Instant + +private val log = LogManager.getLogger(RestExecuteMonitorAction::class.java) + +class RestExecuteMonitorAction : BaseRestHandler() { + + override fun getName(): String = "execute_monitor_action" + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + POST, + "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_execute", + POST, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}/_execute" + ), + ReplacedRoute( + POST, + "${AlertingPlugin.MONITOR_BASE_URI}/_execute", + POST, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/_execute" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/_execute") + + return RestChannelConsumer { channel -> + val dryrun = request.paramAsBoolean("dryrun", false) + val requestEnd = request.paramAsTime("period_end", TimeValue(Instant.now().toEpochMilli())) + + if (request.hasParam("monitorID")) { + val monitorId = request.param("monitorID") + val execMonitorRequest = ExecuteMonitorRequest(dryrun, requestEnd, monitorId, null) + client.execute(ExecuteMonitorAction.INSTANCE, execMonitorRequest, RestToXContentListener(channel)) + } else { + val xcp = request.contentParser() + ensureExpectedToken(START_OBJECT, xcp.nextToken(), xcp) + val monitor = Monitor.parse(xcp, Monitor.NO_ID, Monitor.NO_VERSION) + val execMonitorRequest = ExecuteMonitorRequest(dryrun, requestEnd, null, monitor) + client.execute(ExecuteMonitorAction.INSTANCE, execMonitorRequest, RestToXContentListener(channel)) + } + } + } + + override fun responseParams(): Set { + return setOf("dryrun", "period_end", "monitorID") + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt new file mode 100644 index 000000000..de8da1bac --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt @@ -0,0 +1,59 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.alerting.action.ExecuteWorkflowRequest +import org.opensearch.client.node.NodeClient +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestToXContentListener +import java.time.Instant + +private val log = LogManager.getLogger(RestExecuteWorkflowAction::class.java) + +class RestExecuteWorkflowAction : BaseRestHandler() { + + override fun getName(): String = "execute_workflow_action" + + override fun routes(): List { + return listOf( + RestHandler.Route(RestRequest.Method.POST, "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_execute") + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/_execute") + + return RestChannelConsumer { channel -> + val dryrun = request.paramAsBoolean("dryrun", false) + val requestEnd = request.paramAsTime("period_end", TimeValue(Instant.now().toEpochMilli())) + + if (request.hasParam("workflowID")) { + val workflowId = request.param("workflowID") + val execWorkflowRequest = ExecuteWorkflowRequest(dryrun, requestEnd, workflowId, null) + client.execute(ExecuteWorkflowAction.INSTANCE, execWorkflowRequest, RestToXContentListener(channel)) + } else { + val xcp = request.contentParser() + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val workflow = Workflow.parse(xcp, Workflow.NO_ID, Workflow.NO_VERSION) + val execWorkflowRequest = ExecuteWorkflowRequest(dryrun, requestEnd, null, workflow) + client.execute(ExecuteWorkflowAction.INSTANCE, execWorkflowRequest, RestToXContentListener(channel)) + } + } + } + + override fun responseParams(): Set { + return setOf("dryrun", "period_end", "workflowID") + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt new file mode 100644 index 000000000..aabcf8d6c --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt @@ -0,0 +1,82 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.model.Table +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.action.RestToXContentListener + +/** + * This class consists of the REST handler to retrieve alerts . + */ +class RestGetAlertsAction : BaseRestHandler() { + + private val log = LogManager.getLogger(RestGetAlertsAction::class.java) + + override fun getName(): String { + return "get_alerts_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + GET, + "${AlertingPlugin.MONITOR_BASE_URI}/alerts", + GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/alerts" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/alerts") + + val sortString = request.param("sortString", "monitor_name.keyword") + val sortOrder = request.param("sortOrder", "asc") + val missing: String? = request.param("missing") + val size = request.paramAsInt("size", 20) + val startIndex = request.paramAsInt("startIndex", 0) + val searchString = request.param("searchString", "") + val severityLevel = request.param("severityLevel", "ALL") + val alertState = request.param("alertState", "ALL") + val monitorId: String? = request.param("monitorId") + val workflowId: String? = request.param("workflowIds") + val workflowIds = mutableListOf() + if (workflowId.isNullOrEmpty() == false) { + workflowIds.add(workflowId) + } else { + workflowIds.add("") + } + val table = Table( + sortOrder, + sortString, + missing, + size, + startIndex, + searchString + ) + + val getAlertsRequest = GetAlertsRequest(table, severityLevel, alertState, monitorId, null, workflowIds = workflowIds) + return RestChannelConsumer { + channel -> + client.execute(AlertingActions.GET_ALERTS_ACTION_TYPE, getAlertsRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt new file mode 100644 index 000000000..7e5e1530f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt @@ -0,0 +1,96 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.GetDestinationsAction +import org.opensearch.alerting.action.GetDestinationsRequest +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.model.Table +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestActions +import org.opensearch.rest.action.RestToXContentListener +import org.opensearch.search.fetch.subphase.FetchSourceContext + +/** + * This class consists of the REST handler to retrieve destinations . + */ +class RestGetDestinationsAction : BaseRestHandler() { + + private val log = LogManager.getLogger(RestGetDestinationsAction::class.java) + + override fun getName(): String { + return "get_destinations_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + // Get a specific destination + ReplacedRoute( + RestRequest.Method.GET, + "${AlertingPlugin.DESTINATION_BASE_URI}/{destinationID}", + RestRequest.Method.GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_DESTINATION_BASE_URI}/{destinationID}" + ), + ReplacedRoute( + RestRequest.Method.GET, + AlertingPlugin.DESTINATION_BASE_URI, + RestRequest.Method.GET, + AlertingPlugin.LEGACY_OPENDISTRO_DESTINATION_BASE_URI + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${request.path()}") + + val destinationId: String? = request.param("destinationID") + + var srcContext = context(request) + if (request.method() == RestRequest.Method.HEAD) { + srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE + } + + val sortString = request.param("sortString", "destination.name.keyword") + val sortOrder = request.param("sortOrder", "asc") + val missing: String? = request.param("missing") + val size = request.paramAsInt("size", 20) + val startIndex = request.paramAsInt("startIndex", 0) + val searchString = request.param("searchString", "") + val destinationType = request.param("destinationType", "ALL") + + val table = Table( + sortOrder, + sortString, + missing, + size, + startIndex, + searchString + ) + + val getDestinationsRequest = GetDestinationsRequest( + destinationId, + RestActions.parseVersion(request), + srcContext, + table, + destinationType + ) + return RestChannelConsumer { + channel -> + client.execute(GetDestinationsAction.INSTANCE, getDestinationsRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt new file mode 100644 index 000000000..ba5dbdb8a --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt @@ -0,0 +1,68 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.GetEmailAccountAction +import org.opensearch.alerting.action.GetEmailAccountRequest +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestActions +import org.opensearch.rest.action.RestToXContentListener +import org.opensearch.search.fetch.subphase.FetchSourceContext +import java.lang.IllegalArgumentException + +/** + * Rest handler to retrieve an EmailAccount. + */ +class RestGetEmailAccountAction : BaseRestHandler() { + + override fun getName(): String { + return "get_email_account_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + RestRequest.Method.GET, + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}", + RestRequest.Method.GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}" + ), + ReplacedRoute( + RestRequest.Method.HEAD, + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}", + RestRequest.Method.HEAD, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val emailAccountID = request.param("emailAccountID") + if (emailAccountID == null || emailAccountID.isEmpty()) { + throw IllegalArgumentException("Missing email account ID") + } + + var srcContext = context(request) + if (request.method() == RestRequest.Method.HEAD) { + srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE + } + + val getEmailAccountRequest = GetEmailAccountRequest(emailAccountID, RestActions.parseVersion(request), request.method(), srcContext) + return RestChannelConsumer { channel -> + client.execute(GetEmailAccountAction.INSTANCE, getEmailAccountRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt new file mode 100644 index 000000000..7fe37c17c --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt @@ -0,0 +1,68 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.GetEmailGroupAction +import org.opensearch.alerting.action.GetEmailGroupRequest +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestActions +import org.opensearch.rest.action.RestToXContentListener +import org.opensearch.search.fetch.subphase.FetchSourceContext +import java.lang.IllegalArgumentException + +/** + * Rest handlers to retrieve an EmailGroup + */ +class RestGetEmailGroupAction : BaseRestHandler() { + + override fun getName(): String { + return "get_email_group_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + RestRequest.Method.GET, + "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/{emailGroupID}", + RestRequest.Method.GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/{emailGroupID}" + ), + ReplacedRoute( + RestRequest.Method.HEAD, + "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/{emailGroupID}", + RestRequest.Method.HEAD, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/{emailGroupID}" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val emailGroupID = request.param("emailGroupID") + if (emailGroupID == null || emailGroupID.isEmpty()) { + throw IllegalArgumentException("Missing email group ID") + } + + var srcContext = context(request) + if (request.method() == RestRequest.Method.HEAD) { + srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE + } + + val getEmailGroupRequest = GetEmailGroupRequest(emailGroupID, RestActions.parseVersion(request), request.method(), srcContext) + return RestChannelConsumer { channel -> + client.execute(GetEmailGroupAction.INSTANCE, getEmailGroupRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt new file mode 100644 index 000000000..75607a701 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt @@ -0,0 +1,67 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.model.Table +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.action.RestToXContentListener + +/** + * This class consists of the REST handler to search findings . + */ +class RestGetFindingsAction : BaseRestHandler() { + + private val log = LogManager.getLogger(RestGetFindingsAction::class.java) + + override fun getName(): String { + return "get_findings_action" + } + + override fun routes(): List { + return listOf( + Route(GET, "${AlertingPlugin.FINDING_BASE_URI}/_search") + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.info("${request.method()} ${request.path()}") + + val findingID: String? = request.param("findingId") + val sortString = request.param("sortString", "id") + val sortOrder = request.param("sortOrder", "asc") + val missing: String? = request.param("missing") + val size = request.paramAsInt("size", 20) + val startIndex = request.paramAsInt("startIndex", 0) + val searchString = request.param("searchString", "") + + val table = Table( + sortOrder, + sortString, + missing, + size, + startIndex, + searchString + ) + + val getFindingsSearchRequest = GetFindingsRequest( + findingID, + table + ) + return RestChannelConsumer { + channel -> + client.execute(AlertingActions.GET_FINDINGS_ACTION_TYPE, getFindingsSearchRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt new file mode 100644 index 000000000..54270b717 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt @@ -0,0 +1,75 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.RestRequest.Method.HEAD +import org.opensearch.rest.action.RestActions +import org.opensearch.rest.action.RestToXContentListener +import org.opensearch.search.fetch.subphase.FetchSourceContext + +private val log = LogManager.getLogger(RestGetMonitorAction::class.java) + +/** + * This class consists of the REST handler to retrieve a monitor . + */ +class RestGetMonitorAction : BaseRestHandler() { + + override fun getName(): String { + return "get_monitor_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + // Get a specific monitor + ReplacedRoute( + GET, + "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", + GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" + ), + ReplacedRoute( + HEAD, + "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", + HEAD, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}") + + val monitorId = request.param("monitorID") + if (monitorId == null || monitorId.isEmpty()) { + throw IllegalArgumentException("missing id") + } + + var srcContext = context(request) + if (request.method() == HEAD) { + srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE + } + val getMonitorRequest = GetMonitorRequest(monitorId, RestActions.parseVersion(request), request.method(), srcContext) + return RestChannelConsumer { + channel -> + client.execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt new file mode 100644 index 000000000..1a2ca4426 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt @@ -0,0 +1,59 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetWorkflowRequest +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.action.RestToXContentListener +import org.opensearch.search.fetch.subphase.FetchSourceContext + +/** + * This class consists of the REST handler to retrieve a workflow . + */ +class RestGetWorkflowAction : BaseRestHandler() { + + private val log = LogManager.getLogger(javaClass) + + override fun getName(): String { + return "get_workflow_action" + } + + override fun routes(): List { + return listOf( + RestHandler.Route( + RestRequest.Method.GET, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}") + + val workflowId = request.param("workflowID") + if (workflowId == null || workflowId.isEmpty()) { + throw IllegalArgumentException("missing id") + } + + var srcContext = context(request) + if (request.method() == RestRequest.Method.HEAD) { + srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE + } + val getWorkflowRequest = + GetWorkflowRequest(workflowId, request.method()) + return RestChannelConsumer { + channel -> + client.execute(AlertingActions.GET_WORKFLOW_ACTION_TYPE, getWorkflowRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt new file mode 100644 index 000000000..474c32d4a --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt @@ -0,0 +1,92 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest +import org.opensearch.commons.alerting.model.Table +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.action.RestToXContentListener + +/** + * This class consists of the REST handler to retrieve chained alerts by workflow id. + */ +class RestGetWorkflowAlertsAction : BaseRestHandler() { + + private val log = LogManager.getLogger(RestGetWorkflowAlertsAction::class.java) + + override fun getName(): String { + return "get_workflow_alerts_action" + } + + override fun routes(): List { + return mutableListOf( + Route( + GET, + "${AlertingPlugin.WORKFLOW_BASE_URI}/alerts" + ) + ) + } + + override fun replacedRoutes(): MutableList { + return mutableListOf() + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/alerts") + + val sortString = request.param("sortString", "monitor_name.keyword") + val sortOrder = request.param("sortOrder", "asc") + val missing: String? = request.param("missing") + val size = request.paramAsInt("size", 20) + val startIndex = request.paramAsInt("startIndex", 0) + val searchString = request.param("searchString", "") + val severityLevel = request.param("severityLevel", "ALL") + val alertState = request.param("alertState", "ALL") + val workflowId: String? = request.param("workflowIds") + val alertId: String? = request.param("alertIds") + val getAssociatedAlerts: Boolean = request.param("getAssociatedAlerts", "false").toBoolean() + val workflowIds = mutableListOf() + if (workflowId.isNullOrEmpty() == false) { + workflowIds.add(workflowId) + } + val alertIds = mutableListOf() + if (alertId.isNullOrEmpty() == false) { + alertIds.add(alertId) + } + val table = Table( + sortOrder, + sortString, + missing, + size, + startIndex, + searchString + ) + + val getWorkflowAlertsRequest = GetWorkflowAlertsRequest( + table, + severityLevel, + alertState, + alertIndex = null, + associatedAlertsIndex = null, + workflowIds = workflowIds, + monitorIds = emptyList(), + getAssociatedAlerts = getAssociatedAlerts, + alertIds = alertIds + ) + return RestChannelConsumer { channel -> + client.execute(AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, getWorkflowAlertsRequest, RestToXContentListener(channel)) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt new file mode 100644 index 000000000..8b1a1f78c --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt @@ -0,0 +1,174 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.util.IF_PRIMARY_TERM +import org.opensearch.alerting.util.IF_SEQ_NO +import org.opensearch.alerting.util.REFRESH +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexMonitorRequest +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser.Token +import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken +import org.opensearch.index.seqno.SequenceNumbers +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.BytesRestResponse +import org.opensearch.rest.RestChannel +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.POST +import org.opensearch.rest.RestRequest.Method.PUT +import org.opensearch.rest.RestResponse +import org.opensearch.rest.action.RestResponseListener +import java.io.IOException +import java.time.Instant + +private val log = LogManager.getLogger(RestIndexMonitorAction::class.java) + +/** + * Rest handlers to create and update monitors. + */ +class RestIndexMonitorAction : BaseRestHandler() { + + override fun getName(): String { + return "index_monitor_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + POST, + AlertingPlugin.MONITOR_BASE_URI, + POST, + AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI + ), + ReplacedRoute( + PUT, + "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", + PUT, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}") + + val id = request.param("monitorID", Monitor.NO_ID) + if (request.method() == PUT && Monitor.NO_ID == id) { + throw IllegalArgumentException("Missing monitor ID") + } + + // Validate request by parsing JSON to Monitor + val xcp = request.contentParser() + ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) + val monitor = Monitor.parse(xcp, id).copy(lastUpdateTime = Instant.now()) + val rbacRoles = request.contentParser().map()["rbac_roles"] as List? + + validateDataSources(monitor) + validateOwner(monitor.owner) + val monitorType = monitor.monitorType + val triggers = monitor.triggers + when (monitorType) { + Monitor.MonitorType.QUERY_LEVEL_MONITOR -> { + triggers.forEach { + if (it !is QueryLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for query level monitor") + } + } + } + Monitor.MonitorType.BUCKET_LEVEL_MONITOR -> { + triggers.forEach { + if (it !is BucketLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for bucket level monitor") + } + } + } + Monitor.MonitorType.CLUSTER_METRICS_MONITOR -> { + triggers.forEach { + if (it !is QueryLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for cluster metrics monitor") + } + } + } + Monitor.MonitorType.DOC_LEVEL_MONITOR -> { + triggers.forEach { + if (it !is DocumentLevelTrigger) { + throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for document level monitor") + } + } + } + } + val seqNo = request.paramAsLong(IF_SEQ_NO, SequenceNumbers.UNASSIGNED_SEQ_NO) + val primaryTerm = request.paramAsLong(IF_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_PRIMARY_TERM) + val refreshPolicy = if (request.hasParam(REFRESH)) { + WriteRequest.RefreshPolicy.parse(request.param(REFRESH)) + } else { + WriteRequest.RefreshPolicy.IMMEDIATE + } + val indexMonitorRequest = IndexMonitorRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), monitor, rbacRoles) + + return RestChannelConsumer { channel -> + client.execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, indexMonitorRequest, indexMonitorResponse(channel, request.method())) + } + } + + private fun validateDataSources(monitor: Monitor) { // Data Sources will currently be supported only at transport layer. + if (monitor.dataSources != null) { + if ( + monitor.dataSources.queryIndex != ScheduledJob.DOC_LEVEL_QUERIES_INDEX || + monitor.dataSources.findingsIndex != AlertIndices.FINDING_HISTORY_WRITE_INDEX || + monitor.dataSources.alertsIndex != AlertIndices.ALERT_INDEX + ) { + throw IllegalArgumentException("Custom Data Sources are not allowed.") + } + } + } + + private fun validateOwner(owner: String?) { + if (owner != "alerting") { + throw IllegalArgumentException("Invalid owner field") + } + } + + private fun indexMonitorResponse(channel: RestChannel, restMethod: RestRequest.Method): + RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: IndexMonitorResponse): RestResponse { + var returnStatus = RestStatus.CREATED + if (restMethod == RestRequest.Method.PUT) { + returnStatus = RestStatus.OK + } + + val restResponse = BytesRestResponse(returnStatus, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)) + if (returnStatus == RestStatus.CREATED) { + val location = "${AlertingPlugin.MONITOR_BASE_URI}/${response.id}" + restResponse.addHeader("Location", location) + } + return restResponse + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt new file mode 100644 index 000000000..d631ed710 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt @@ -0,0 +1,99 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting.resthandler + +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IF_PRIMARY_TERM +import org.opensearch.alerting.util.IF_SEQ_NO +import org.opensearch.alerting.util.REFRESH +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexWorkflowRequest +import org.opensearch.commons.alerting.action.IndexWorkflowResponse +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.seqno.SequenceNumbers +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.BytesRestResponse +import org.opensearch.rest.RestChannel +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestResponse +import org.opensearch.rest.action.RestResponseListener +import java.io.IOException +import java.time.Instant + +/** + * Rest handlers to create and update workflows. + */ +class RestIndexWorkflowAction : BaseRestHandler() { + + override fun getName(): String { + return "index_workflow_action" + } + + override fun routes(): List { + return listOf( + RestHandler.Route(RestRequest.Method.POST, AlertingPlugin.WORKFLOW_BASE_URI), + RestHandler.Route( + RestRequest.Method.PUT, + "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val id = request.param("workflowID", Workflow.NO_ID) + if (request.method() == RestRequest.Method.PUT && Workflow.NO_ID == id) { + throw AlertingException.wrap(IllegalArgumentException("Missing workflow ID")) + } + + // Validate request by parsing JSON to Monitor + val xcp = request.contentParser() + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val workflow = Workflow.parse(xcp, id).copy(lastUpdateTime = Instant.now()) + val rbacRoles = request.contentParser().map()["rbac_roles"] as List? + + val seqNo = request.paramAsLong(IF_SEQ_NO, SequenceNumbers.UNASSIGNED_SEQ_NO) + val primaryTerm = request.paramAsLong(IF_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_PRIMARY_TERM) + val refreshPolicy = if (request.hasParam(REFRESH)) { + WriteRequest.RefreshPolicy.parse(request.param(REFRESH)) + } else { + WriteRequest.RefreshPolicy.IMMEDIATE + } + val workflowRequest = + IndexWorkflowRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), workflow, rbacRoles) + + return RestChannelConsumer { channel -> + client.execute(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, workflowRequest, indexMonitorResponse(channel, request.method())) + } + } + + private fun indexMonitorResponse(channel: RestChannel, restMethod: RestRequest.Method): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: IndexWorkflowResponse): RestResponse { + var returnStatus = RestStatus.CREATED + if (restMethod == RestRequest.Method.PUT) + returnStatus = RestStatus.OK + + val restResponse = + BytesRestResponse(returnStatus, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)) + if (returnStatus == RestStatus.CREATED) { + val location = "${AlertingPlugin.WORKFLOW_BASE_URI}/${response.id}" + restResponse.addHeader("Location", location) + } + return restResponse + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt new file mode 100644 index 000000000..1d2224e3b --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt @@ -0,0 +1,108 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.SearchEmailAccountAction +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory.jsonBuilder +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS +import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BytesRestResponse +import org.opensearch.rest.RestChannel +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestResponse +import org.opensearch.rest.action.RestResponseListener +import org.opensearch.search.builder.SearchSourceBuilder +import java.io.IOException + +/** + * Rest handlers to search for EmailAccount + */ +class RestSearchEmailAccountAction : BaseRestHandler() { + + override fun getName(): String { + return "search_email_account_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + RestRequest.Method.POST, + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", + RestRequest.Method.POST, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/_search" + ), + ReplacedRoute( + RestRequest.Method.GET, + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", + RestRequest.Method.GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/_search" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val searchSourceBuilder = SearchSourceBuilder() + searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) + searchSourceBuilder.fetchSource(context(request)) + + // An exists query is added on top of the user's query to ensure that only documents of email_account type + // are searched + searchSourceBuilder.query( + QueryBuilders.boolQuery().must(searchSourceBuilder.query()) + .filter(QueryBuilders.existsQuery(EmailAccount.EMAIL_ACCOUNT_TYPE)) + ) + .seqNoAndPrimaryTerm(true) + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(SCHEDULED_JOBS_INDEX) + return RestChannelConsumer { channel -> + client.execute(SearchEmailAccountAction.INSTANCE, searchRequest, searchEmailAccountResponse(channel)) + } + } + + private fun searchEmailAccountResponse(channel: RestChannel): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: SearchResponse): RestResponse { + if (response.isTimedOut) { + return BytesRestResponse(RestStatus.REQUEST_TIMEOUT, response.toString()) + } + + for (hit in response.hits) { + XContentType.JSON.xContent().createParser( + channel.request().xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceAsString + ).use { hitsParser -> + val emailAccount = EmailAccount.parseWithType(hitsParser, hit.id, hit.version) + val xcb = emailAccount.toXContent(jsonBuilder(), EMPTY_PARAMS) + hit.sourceRef(BytesReference.bytes(xcb)) + } + } + return BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), EMPTY_PARAMS)) + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt new file mode 100644 index 000000000..e29dd8872 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt @@ -0,0 +1,109 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.SearchEmailGroupAction +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.BytesRestResponse +import org.opensearch.rest.RestChannel +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestResponse +import org.opensearch.rest.action.RestResponseListener +import org.opensearch.search.builder.SearchSourceBuilder +import java.io.IOException + +/** + * Rest handlers to search for EmailGroup + */ +class RestSearchEmailGroupAction : BaseRestHandler() { + + override fun getName(): String { + return "search_email_group_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + ReplacedRoute( + RestRequest.Method.POST, + "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/_search", + RestRequest.Method.POST, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/_search" + ), + ReplacedRoute( + RestRequest.Method.GET, + "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/_search", + RestRequest.Method.GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/_search" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val searchSourceBuilder = SearchSourceBuilder() + searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) + searchSourceBuilder.fetchSource(context(request)) + + // An exists query is added on top of the user's query to ensure that only documents of email_group type + // are searched + searchSourceBuilder.query( + QueryBuilders.boolQuery().must(searchSourceBuilder.query()) + .filter(QueryBuilders.existsQuery(EmailGroup.EMAIL_GROUP_TYPE)) + ) + .seqNoAndPrimaryTerm(true) + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(SCHEDULED_JOBS_INDEX) + return RestChannelConsumer { channel -> + client.execute(SearchEmailGroupAction.INSTANCE, searchRequest, searchEmailGroupResponse(channel)) + } + } + + private fun searchEmailGroupResponse(channel: RestChannel): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: SearchResponse): RestResponse { + if (response.isTimedOut) { + return BytesRestResponse(RestStatus.REQUEST_TIMEOUT, response.toString()) + } + + for (hit in response.hits) { + XContentType.JSON.xContent().createParser( + channel.request().xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceAsString + ).use { hitsParser -> + val emailGroup = EmailGroup.parseWithType(hitsParser, hit.id, hit.version) + val xcb = emailGroup.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) + hit.sourceRef(BytesReference.bytes(xcb)) + } + } + return BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)) + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt new file mode 100644 index 000000000..1bf51678e --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt @@ -0,0 +1,136 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_ALERT_INDEX_PATTERN +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.context +import org.opensearch.client.node.NodeClient +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory.jsonBuilder +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.SearchMonitorRequest +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.BaseRestHandler.RestChannelConsumer +import org.opensearch.rest.BytesRestResponse +import org.opensearch.rest.RestChannel +import org.opensearch.rest.RestHandler.ReplacedRoute +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.RestRequest.Method.POST +import org.opensearch.rest.RestResponse +import org.opensearch.rest.action.RestResponseListener +import org.opensearch.search.builder.SearchSourceBuilder +import java.io.IOException + +private val log = LogManager.getLogger(RestSearchMonitorAction::class.java) + +/** + * Rest handlers to search for monitors. + * TODO: Deprecate API for a set of new APIs that will support this APIs use cases + */ +class RestSearchMonitorAction( + val settings: Settings, + clusterService: ClusterService +) : BaseRestHandler() { + + @Volatile private var filterBy = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.FILTER_BY_BACKEND_ROLES) { filterBy = it } + } + + override fun getName(): String { + return "search_monitor_action" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + // Search for monitors + ReplacedRoute( + POST, + "${AlertingPlugin.MONITOR_BASE_URI}/_search", + POST, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/_search" + ), + ReplacedRoute( + GET, + "${AlertingPlugin.MONITOR_BASE_URI}/_search", + GET, + "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/_search" + ) + ) + } + + @Throws(IOException::class) + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/_search") + + val index = request.param("index", SCHEDULED_JOBS_INDEX) + if (index != SCHEDULED_JOBS_INDEX && index != ALL_ALERT_INDEX_PATTERN) { + throw IllegalArgumentException("Invalid index name.") + } + + val searchSourceBuilder = SearchSourceBuilder() + searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) + searchSourceBuilder.fetchSource(context(request)) + + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(index) + + val searchMonitorRequest = SearchMonitorRequest(searchRequest) + return RestChannelConsumer { channel -> + client.execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, searchMonitorRequest, searchMonitorResponse(channel)) + } + } + + private fun searchMonitorResponse(channel: RestChannel): RestResponseListener { + return object : RestResponseListener(channel) { + @Throws(Exception::class) + override fun buildResponse(response: SearchResponse): RestResponse { + if (response.isTimedOut) { + return BytesRestResponse(RestStatus.REQUEST_TIMEOUT, response.toString()) + } + + // Swallow exception and return response as is + try { + for (hit in response.hits) { + XContentType.JSON.xContent().createParser( + channel.request().xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceAsString + ).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) + val xcb = monitor.toXContent(jsonBuilder(), EMPTY_PARAMS) + hit.sourceRef(BytesReference.bytes(xcb)) + } + } + } catch (e: Exception) { + log.info("The monitor parsing failed. Will return response as is.") + } + return BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), EMPTY_PARAMS)) + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt new file mode 100644 index 000000000..72518ed48 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt @@ -0,0 +1,51 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import java.time.Instant + +data class BucketLevelTriggerExecutionContext( + override val monitor: Monitor, + val trigger: BucketLevelTrigger, + override val results: List>, + override val periodStart: Instant, + override val periodEnd: Instant, + val dedupedAlerts: List = listOf(), + val newAlerts: List = listOf(), + val completedAlerts: List = listOf(), + override val error: Exception? = null +) : TriggerExecutionContext(monitor, results, periodStart, periodEnd, error) { + + constructor( + monitor: Monitor, + trigger: BucketLevelTrigger, + monitorRunResult: MonitorRunResult, + dedupedAlerts: List = listOf(), + newAlerts: List = listOf(), + completedAlerts: List = listOf() + ) : this( + monitor, trigger, monitorRunResult.inputResults.results, monitorRunResult.periodStart, monitorRunResult.periodEnd, + dedupedAlerts, newAlerts, completedAlerts, monitorRunResult.scriptContextError(trigger) + ) + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + override fun asTemplateArg(): Map { + val tempArg = super.asTemplateArg().toMutableMap() + tempArg["trigger"] = trigger.asTemplateArg() + tempArg["dedupedAlerts"] = dedupedAlerts.map { it.asTemplateArg() } + tempArg["newAlerts"] = newAlerts.map { it.asTemplateArg() } + tempArg["completedAlerts"] = completedAlerts.map { it.asTemplateArg() } + return tempArg + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt new file mode 100644 index 000000000..d4bf4cb59 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt @@ -0,0 +1,40 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.Workflow +import java.time.Instant + +data class ChainedAlertTriggerExecutionContext( + val workflow: Workflow, + val workflowRunResult: WorkflowRunResult, + val periodStart: Instant, + val periodEnd: Instant?, + val error: Exception? = null, + val trigger: ChainedAlertTrigger, + val alertGeneratingMonitors: Set, + val monitorIdToAlertIdsMap: Map>, + val alert: Alert? = null +) { + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + open fun asTemplateArg(): Map { + return mapOf( + "monitor" to workflow.asTemplateArg(), + "results" to workflowRunResult, + "periodStart" to periodStart, + "error" to error, + "alertGeneratingMonitors" to alertGeneratingMonitors, + "monitorIdToAlertIdsMap" to monitorIdToAlertIdsMap + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt new file mode 100644 index 000000000..66de731f6 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt @@ -0,0 +1,44 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import java.time.Instant + +data class DocumentLevelTriggerExecutionContext( + override val monitor: Monitor, + val trigger: DocumentLevelTrigger, + override val results: List>, + override val periodStart: Instant, + override val periodEnd: Instant, + val alerts: List = listOf(), + val triggeredDocs: List, + val relatedFindings: List, + override val error: Exception? = null +) : TriggerExecutionContext(monitor, results, periodStart, periodEnd, error) { + + constructor( + monitor: Monitor, + trigger: DocumentLevelTrigger, + alerts: List = listOf() + ) : this( + monitor, trigger, emptyList(), Instant.now(), Instant.now(), + alerts, emptyList(), emptyList(), null + ) + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + override fun asTemplateArg(): Map { + val tempArg = super.asTemplateArg().toMutableMap() + tempArg["trigger"] = trigger.asTemplateArg() + tempArg["alerts"] = alerts.map { it.asTemplateArg() } + return tempArg + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt new file mode 100644 index 000000000..729aa18d0 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt @@ -0,0 +1,50 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.QueryLevelTriggerRunResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import java.time.Instant + +data class QueryLevelTriggerExecutionContext( + override val monitor: Monitor, + val trigger: QueryLevelTrigger, + override val results: List>, + override val periodStart: Instant, + override val periodEnd: Instant, + val alert: Alert? = null, + override val error: Exception? = null +) : TriggerExecutionContext(monitor, results, periodStart, periodEnd, error) { + + constructor( + monitor: Monitor, + trigger: QueryLevelTrigger, + monitorRunResult: MonitorRunResult, + alert: Alert? = null + ) : this( + monitor, + trigger, + monitorRunResult.inputResults.results, + monitorRunResult.periodStart, + monitorRunResult.periodEnd, + alert, + monitorRunResult.scriptContextError(trigger) + ) + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + override fun asTemplateArg(): Map { + val tempArg = super.asTemplateArg().toMutableMap() + tempArg["trigger"] = trigger.asTemplateArg() + tempArg["alert"] = alert?.asTemplateArg() + return tempArg + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt new file mode 100644 index 000000000..1f5bd8be5 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt @@ -0,0 +1,43 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.Trigger +import java.time.Instant + +abstract class TriggerExecutionContext( + open val monitor: Monitor, + open val results: List>, + open val periodStart: Instant, + open val periodEnd: Instant, + open val error: Exception? = null +) { + + constructor(monitor: Monitor, trigger: Trigger, monitorRunResult: MonitorRunResult<*>) : + this( + monitor, + monitorRunResult.inputResults.results, + monitorRunResult.periodStart, + monitorRunResult.periodEnd, + monitorRunResult.scriptContextError(trigger) + ) + + /** + * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we + * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. + */ + open fun asTemplateArg(): Map { + return mapOf( + "monitor" to monitor.asTemplateArg(), + "results" to results, + "periodStart" to periodStart, + "periodEnd" to periodEnd, + "error" to error + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt b/alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt new file mode 100644 index 000000000..f6ed1ec6a --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt @@ -0,0 +1,46 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.script + +import org.opensearch.script.Script +import org.opensearch.script.ScriptContext + +abstract class TriggerScript(_scriptParams: Map) { + + /** + * [scriptParams] are the [user-defined parameters][Script.getParams] specified in the script definition. + * The [scriptParams] are defined when the script is compiled and DON'T change every time the script executes. This field + * is named **script**Params to avoid confusion with the [PARAMETERS] field. However to remain consistent with every other + * painless script context we surface it to the painless script as just `params` using a custom getter name. + */ + val scriptParams: Map = _scriptParams + @JvmName("getParams") + get + + companion object { + /** + * [PARAMETERS] contains the names of the formal arguments to the [execute] method which define the + * script's execution context. These argument names (`_results` etc.) are available as named parameters + * in the painless script. These arguments passed to the [execute] method change every time the trigger is executed. + * In a sane world this would have been named `ARGUMENTS` to avoid confusing the hell out of everyone who has to + * work with this code. + */ + @JvmField val PARAMETERS = arrayOf("ctx") + + val CONTEXT = ScriptContext("trigger", Factory::class.java) + } + + /** + * Run a trigger script with the given context. + * + * @param ctx - the trigger execution context + */ + abstract fun execute(ctx: QueryLevelTriggerExecutionContext): Boolean + + interface Factory { + fun newInstance(scriptParams: Map): TriggerScript + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt b/alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt new file mode 100644 index 000000000..b26ae2473 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt @@ -0,0 +1,186 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.service + +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.SupervisorJob +import org.apache.logging.log4j.LogManager +import org.apache.lucene.search.join.ScoreMode +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest +import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.delete.DeleteResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.IndicesOptions +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.MonitorMetadataService +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH +import org.opensearch.client.Client +import org.opensearch.commons.alerting.action.DeleteMonitorResponse +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.action.ActionListener +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import kotlin.coroutines.resume +import kotlin.coroutines.resumeWithException +import kotlin.coroutines.suspendCoroutine + +/** + * Component used when deleting the monitors + */ +object DeleteMonitorService : + CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("WorkflowMetadataService")) { + private val log = LogManager.getLogger(this.javaClass) + + private lateinit var client: Client + + fun initialize( + client: Client, + ) { + DeleteMonitorService.client = client + } + + /** + * Deletes the monitor, docLevelQueries and monitor metadata + * @param monitor monitor to be deleted + * @param refreshPolicy + */ + suspend fun deleteMonitor(monitor: Monitor, refreshPolicy: RefreshPolicy): DeleteMonitorResponse { + val deleteResponse = deleteMonitor(monitor.id, refreshPolicy) + deleteDocLevelMonitorQueriesAndIndices(monitor) + deleteMetadata(monitor) + return DeleteMonitorResponse(deleteResponse.id, deleteResponse.version) + } + + private suspend fun deleteMonitor(monitorId: String, refreshPolicy: RefreshPolicy): DeleteResponse { + val deleteMonitorRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) + .setRefreshPolicy(refreshPolicy) + return client.suspendUntil { delete(deleteMonitorRequest, it) } + } + + private suspend fun deleteMetadata(monitor: Monitor) { + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, "${monitor.id}-metadata") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + try { + val deleteResponse: DeleteResponse = client.suspendUntil { delete(deleteRequest, it) } + log.debug("Monitor metadata: ${deleteResponse.id} deletion result: ${deleteResponse.result}") + } catch (e: Exception) { + // we only log the error and don't fail the request because if monitor document has been deleted, + // we cannot retry based on this failure + log.error("Failed to delete monitor metadata ${deleteRequest.id()}.", e) + } + } + + private suspend fun deleteDocLevelMonitorQueriesAndIndices(monitor: Monitor) { + try { + val metadata = MonitorMetadataService.getMetadata(monitor) + metadata?.sourceToQueryIndexMapping?.forEach { (_, queryIndex) -> + + val indicesExistsResponse: IndicesExistsResponse = + client.suspendUntil { + client.admin().indices().exists(IndicesExistsRequest(queryIndex), it) + } + if (indicesExistsResponse.isExists == false) { + return + } + // Check if there's any queries from other monitors in this queryIndex, + // to avoid unnecessary doc deletion, if we could just delete index completely + val searchResponse: SearchResponse = client.suspendUntil { + search( + SearchRequest(queryIndex).source( + SearchSourceBuilder() + .size(0) + .query( + QueryBuilders.boolQuery().mustNot( + QueryBuilders.matchQuery("monitor_id", monitor.id) + ) + ) + ).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), + it + ) + } + if (searchResponse.hits.totalHits.value == 0L) { + val ack: AcknowledgedResponse = client.suspendUntil { + client.admin().indices().delete( + DeleteIndexRequest(queryIndex).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), + it + ) + } + if (ack.isAcknowledged == false) { + log.error("Deletion of concrete queryIndex:$queryIndex is not ack'd!") + } + } else { + // Delete all queries added by this monitor + val response: BulkByScrollResponse = suspendCoroutine { cont -> + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(queryIndex) + .filter(QueryBuilders.matchQuery("monitor_id", monitor.id)) + .refresh(true) + .execute( + object : ActionListener { + override fun onResponse(response: BulkByScrollResponse) = cont.resume(response) + override fun onFailure(t: Exception) = cont.resumeWithException(t) + } + ) + } + } + } + } catch (e: Exception) { + // we only log the error and don't fail the request because if monitor document has been deleted successfully, + // we cannot retry based on this failure + log.error("Failed to delete doc level queries from query index.", e) + } + } + + /** + * Checks if the monitor is part of the workflow + * + * @param monitorId id of monitor that is checked if it is a workflow delegate + */ + suspend fun monitorIsWorkflowDelegate(monitorId: String): Boolean { + val queryBuilder = QueryBuilders.nestedQuery( + WORKFLOW_DELEGATE_PATH, + QueryBuilders.boolQuery().must( + QueryBuilders.matchQuery( + WORKFLOW_MONITOR_PATH, + monitorId + ) + ), + ScoreMode.None + ) + try { + val searchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .source(SearchSourceBuilder().query(queryBuilder)) + + client.threadPool().threadContext.stashContext().use { + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + if (searchResponse.hits.totalHits?.value == 0L) { + return false + } + + val workflowIds = searchResponse.hits.hits.map { it.id }.joinToString() + log.info("Monitor $monitorId can't be deleted since it belongs to $workflowIds") + return true + } + } catch (ex: Exception) { + log.error("Error getting the monitor workflows", ex) + throw AlertingException.wrap(ex) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt new file mode 100644 index 000000000..7dd90b106 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt @@ -0,0 +1,180 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.settings + +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.common.settings.Setting +import org.opensearch.common.unit.TimeValue +import java.util.concurrent.TimeUnit + +/** + * settings specific to [AlertingPlugin]. These settings include things like history index max age, request timeout, etc... + */ +class AlertingSettings { + + companion object { + const val DEFAULT_MAX_ACTIONABLE_ALERT_COUNT = 50L + + val ALERTING_MAX_MONITORS = Setting.intSetting( + "plugins.alerting.monitor.max_monitors", + LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val INPUT_TIMEOUT = Setting.positiveTimeSetting( + "plugins.alerting.input_timeout", + LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val INDEX_TIMEOUT = Setting.positiveTimeSetting( + "plugins.alerting.index_timeout", + LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val BULK_TIMEOUT = Setting.positiveTimeSetting( + "plugins.alerting.bulk_timeout", + LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val ALERT_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "plugins.alerting.alert_backoff_millis", + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val ALERT_BACKOFF_COUNT = Setting.intSetting( + "plugins.alerting.alert_backoff_count", + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val MOVE_ALERTS_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "plugins.alerting.move_alerts_backoff_millis", + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val MOVE_ALERTS_BACKOFF_COUNT = Setting.intSetting( + "plugins.alerting.move_alerts_backoff_count", + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val ALERT_HISTORY_ENABLED = Setting.boolSetting( + "plugins.alerting.alert_history_enabled", + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + // TODO: Do we want to let users to disable this? If so, we need to fix the rollover logic + // such that the main index is findings and rolls over to the finding history index + val FINDING_HISTORY_ENABLED = Setting.boolSetting( + "plugins.alerting.alert_finding_enabled", + true, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val ALERT_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( + "plugins.alerting.alert_history_rollover_period", + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val FINDING_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( + "plugins.alerting.alert_finding_rollover_period", + TimeValue.timeValueHours(12), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val ALERT_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( + "plugins.alerting.alert_history_max_age", + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val FINDING_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( + "plugins.alerting.finding_history_max_age", + TimeValue(30, TimeUnit.DAYS), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val ALERT_HISTORY_MAX_DOCS = Setting.longSetting( + "plugins.alerting.alert_history_max_docs", + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val FINDING_HISTORY_MAX_DOCS = Setting.longSetting( + "plugins.alerting.alert_finding_max_docs", + 1000L, + 0L, + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( + "plugins.alerting.alert_history_retention_period", + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val FINDING_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( + "plugins.alerting.finding_history_retention_period", + TimeValue(60, TimeUnit.DAYS), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( + "plugins.alerting.request_timeout", + LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val MAX_ACTION_THROTTLE_VALUE = Setting.positiveTimeSetting( + "plugins.alerting.action_throttle_max_value", + LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val FILTER_BY_BACKEND_ROLES = Setting.boolSetting( + "plugins.alerting.filter_by_backend_roles", + LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val MAX_ACTIONABLE_ALERT_COUNT = Setting.longSetting( + "plugins.alerting.max_actionable_alert_count", + DEFAULT_MAX_ACTIONABLE_ALERT_COUNT, + -1L, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt new file mode 100644 index 000000000..14086ce68 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt @@ -0,0 +1,109 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.settings + +import org.opensearch.common.settings.SecureSetting +import org.opensearch.common.settings.Setting +import org.opensearch.common.settings.Setting.AffixSetting +import org.opensearch.common.settings.Settings +import org.opensearch.core.common.settings.SecureString +import java.util.function.Function + +/** + * Settings specific to Destinations. This class is separated from the general AlertingSettings since some Destination + * types require SecureSettings and need additional logic for retrieving and loading them. + */ +class DestinationSettings { + companion object { + + const val DESTINATION_SETTING_PREFIX = "plugins.alerting.destination." + const val EMAIL_DESTINATION_SETTING_PREFIX = DESTINATION_SETTING_PREFIX + "email." + val ALLOW_LIST_NONE = emptyList() + + val ALLOW_LIST: Setting> = Setting.listSetting( + DESTINATION_SETTING_PREFIX + "allow_list", + LegacyOpenDistroDestinationSettings.ALLOW_LIST, + Function.identity(), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ) + + val EMAIL_USERNAME: Setting.AffixSetting = Setting.affixKeySetting( + EMAIL_DESTINATION_SETTING_PREFIX, + "username", + // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error + Function { key: String -> + SecureSetting.secureString( + key, + fallback(key, LegacyOpenDistroDestinationSettings.EMAIL_USERNAME, "plugins", "opendistro") + ) + } + ) + + val EMAIL_PASSWORD: Setting.AffixSetting = Setting.affixKeySetting( + EMAIL_DESTINATION_SETTING_PREFIX, + "password", + // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error + Function { key: String -> + SecureSetting.secureString( + key, + fallback(key, LegacyOpenDistroDestinationSettings.EMAIL_PASSWORD, "plugins", "opendistro") + ) + } + ) + + val HOST_DENY_LIST: Setting> = Setting.listSetting( + "plugins.destination.host.deny_list", + LegacyOpenDistroDestinationSettings.HOST_DENY_LIST, + Function.identity(), + Setting.Property.NodeScope, + Setting.Property.Final + ) + + fun loadDestinationSettings(settings: Settings): Map { + // Only loading Email Destination settings for now since those are the only secure settings needed. + // If this logic needs to be expanded to support other Destinations, different groups can be retrieved similar + // to emailAccountNames based on the setting namespace and SecureDestinationSettings should be expanded to support + // these new settings. + val emailAccountNames: Set = settings.getGroups(EMAIL_DESTINATION_SETTING_PREFIX).keys + val emailAccounts: MutableMap = mutableMapOf() + for (emailAccountName in emailAccountNames) { + // Only adding the settings if they exist + getSecureDestinationSettings(settings, emailAccountName)?.let { + emailAccounts[emailAccountName] = it + } + } + + return emailAccounts + } + + private fun getSecureDestinationSettings(settings: Settings, emailAccountName: String): SecureDestinationSettings? { + // Using 'use' to emulate Java's try-with-resources on multiple closeable resources. + // Values are cloned so that we maintain a SecureString, the original SecureStrings will be closed after + // they have left the scope of this function. + return getEmailSettingValue(settings, emailAccountName, EMAIL_USERNAME)?.use { emailUsername -> + getEmailSettingValue(settings, emailAccountName, EMAIL_PASSWORD)?.use { emailPassword -> + SecureDestinationSettings(emailUsername = emailUsername.clone(), emailPassword = emailPassword.clone()) + } + } + } + + private fun getEmailSettingValue(settings: Settings, emailAccountName: String, emailSetting: Setting.AffixSetting): T? { + val concreteSetting = emailSetting.getConcreteSettingForNamespace(emailAccountName) + return concreteSetting.get(settings) + } + + private fun fallback(key: String, affixSetting: AffixSetting, regex: String, replacement: String): Setting? { + return if ("_na_" == key) { + affixSetting.getConcreteSettingForNamespace(key) + } else { + affixSetting.getConcreteSetting(key.replace(regex.toRegex(), replacement)) + } + } + + data class SecureDestinationSettings(val emailUsername: SecureString, val emailPassword: SecureString) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt new file mode 100644 index 000000000..e3603aab2 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt @@ -0,0 +1,157 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.settings + +import org.opensearch.common.settings.Setting +import org.opensearch.common.unit.TimeValue +import java.util.concurrent.TimeUnit + +/** + * Legacy Opendistro settings specific to [AlertingPlugin]. These settings include things like history index max age, request timeout, etc... + */ + +class LegacyOpenDistroAlertingSettings { + + companion object { + + val ALERTING_MAX_MONITORS = Setting.intSetting( + "opendistro.alerting.monitor.max_monitors", + 1000, + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val INPUT_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.input_timeout", + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val INDEX_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.index_timeout", + TimeValue.timeValueSeconds(60), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val BULK_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.bulk_timeout", + TimeValue.timeValueSeconds(120), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "opendistro.alerting.alert_backoff_millis", + TimeValue.timeValueMillis(50), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_BACKOFF_COUNT = Setting.intSetting( + "opendistro.alerting.alert_backoff_count", + 2, + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val MOVE_ALERTS_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "opendistro.alerting.move_alerts_backoff_millis", + TimeValue.timeValueMillis(250), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val MOVE_ALERTS_BACKOFF_COUNT = Setting.intSetting( + "opendistro.alerting.move_alerts_backoff_count", + 3, + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_HISTORY_ENABLED = Setting.boolSetting( + "opendistro.alerting.alert_history_enabled", + true, + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( + "opendistro.alerting.alert_history_rollover_period", + TimeValue.timeValueHours(12), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( + "opendistro.alerting.alert_history_max_age", + TimeValue(30, TimeUnit.DAYS), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_HISTORY_MAX_DOCS = Setting.longSetting( + "opendistro.alerting.alert_history_max_docs", + 1000L, + 0L, + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( + "opendistro.alerting.alert_history_retention_period", + TimeValue(60, TimeUnit.DAYS), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val ALERT_FINDING_RETENTION_PERIOD = Setting.positiveTimeSetting( + "opendistro.alerting.alert_finding_retention_period", + TimeValue(60, TimeUnit.DAYS), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.alerting.request_timeout", + TimeValue.timeValueSeconds(10), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val MAX_ACTION_THROTTLE_VALUE = Setting.positiveTimeSetting( + "opendistro.alerting.action_throttle_max_value", + TimeValue.timeValueHours(24), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val FILTER_BY_BACKEND_ROLES = Setting.boolSetting( + "opendistro.alerting.filter_by_backend_roles", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt new file mode 100644 index 000000000..73bae6463 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt @@ -0,0 +1,99 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.settings + +import org.opensearch.alerting.util.DestinationType +import org.opensearch.common.settings.SecureSetting +import org.opensearch.common.settings.Setting +import org.opensearch.common.settings.Settings +import org.opensearch.core.common.settings.SecureString +import java.util.function.Function + +/** + * Settings specific to Destinations. This class is separated from the general AlertingSettings since some Destination + * types require SecureSettings and need additional logic for retrieving and loading them. + */ +class LegacyOpenDistroDestinationSettings { + + companion object { + + const val DESTINATION_SETTING_PREFIX = "opendistro.alerting.destination." + const val EMAIL_DESTINATION_SETTING_PREFIX = DESTINATION_SETTING_PREFIX + "email." + val ALLOW_LIST_ALL = DestinationType.values().toList().map { it.value } + val HOST_DENY_LIST_NONE = emptyList() + + val ALLOW_LIST: Setting> = Setting.listSetting( + DESTINATION_SETTING_PREFIX + "allow_list", + ALLOW_LIST_ALL, + Function.identity(), + Setting.Property.NodeScope, + Setting.Property.Dynamic, + Setting.Property.Deprecated + ) + + val EMAIL_USERNAME: Setting.AffixSetting = Setting.affixKeySetting( + EMAIL_DESTINATION_SETTING_PREFIX, + "username", + // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error + Function { key: String -> SecureSetting.secureString(key, null) } + ) + + val EMAIL_PASSWORD: Setting.AffixSetting = Setting.affixKeySetting( + EMAIL_DESTINATION_SETTING_PREFIX, + "password", + // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error + Function { key: String -> SecureSetting.secureString(key, null) } + ) + + val HOST_DENY_LIST: Setting> = Setting.listSetting( + "opendistro.destination.host.deny_list", + HOST_DENY_LIST_NONE, + Function.identity(), + Setting.Property.NodeScope, + Setting.Property.Final, + Setting.Property.Deprecated + ) + + fun loadLegacyDestinationSettings(settings: Settings): Map { + // Only loading Email Destination settings for now since those are the only secure settings needed. + // If this logic needs to be expanded to support other Destinations, different groups can be retrieved similar + // to emailAccountNames based on the setting namespace and SecureDestinationSettings should be expanded to support + // these new settings. + val emailAccountNames: Set = settings.getGroups(EMAIL_DESTINATION_SETTING_PREFIX).keys + val emailAccounts: MutableMap = mutableMapOf() + for (emailAccountName in emailAccountNames) { + // Only adding the settings if they exist + getLegacySecureDestinationSettings(settings, emailAccountName)?.let { + emailAccounts[emailAccountName] = it + } + } + + return emailAccounts + } + + private fun getLegacySecureDestinationSettings(settings: Settings, emailAccountName: String): SecureDestinationSettings? { + // Using 'use' to emulate Java's try-with-resources on multiple closeable resources. + // Values are cloned so that we maintain a SecureString, the original SecureStrings will be closed after + // they have left the scope of this function. + return getLegacyEmailSettingValue(settings, emailAccountName, EMAIL_USERNAME)?.use { emailUsername -> + getLegacyEmailSettingValue(settings, emailAccountName, EMAIL_PASSWORD)?.use { emailPassword -> + SecureDestinationSettings(emailUsername = emailUsername.clone(), emailPassword = emailPassword.clone()) + } + } + } + + private fun getLegacyEmailSettingValue( + settings: Settings, + emailAccountName: String, + emailSetting: Setting.AffixSetting + ): T? { + val concreteSetting = emailSetting.getConcreteSettingForNamespace(emailAccountName) + return concreteSetting.get(settings) + } + + data class SecureDestinationSettings(val emailUsername: SecureString, val emailPassword: SecureString) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt new file mode 100644 index 000000000..a3e2a98c1 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt @@ -0,0 +1,154 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.settings + +import org.opensearch.action.ActionRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest +import org.opensearch.action.admin.indices.recovery.RecoveryRequest +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatIndicesRequestWrapper +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatShardsRequestWrapper +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.ClusterMetricsInput.ClusterMetricType +import org.opensearch.commons.alerting.util.IndexUtils.Companion.supportedClusterMetricsSettings + +/** + * A class that supports storing a unique set of API paths that can be accessed by general users. + */ +class SupportedClusterMetricsSettings : org.opensearch.commons.alerting.settings.SupportedClusterMetricsSettings { + companion object { + const val RESOURCE_FILE = "supported_json_payloads.json" + + /** + * The key in this map represents the path to call an API. + * + * NOTE: Paths should conform to the following pattern: + * "/_cluster/stats" + * + * The value in these maps represents a path root mapped to a list of paths to field values. + * If the value mapped to an API is an empty map, no fields will be redacted from the API response. + * + * NOTE: Keys in this map should consist of root components of the response body; e.g.,: + * "indices" + * + * Values in these maps should consist of the remaining fields in the path + * to the supported value separated by periods; e.g.,: + * "shards.total", + * "shards.index.shards.min" + * + * In this example for ClusterStats, the response will only include + * the values at the end of these two paths: + * "/_cluster/stats": { + * "indices": [ + * "shards.total", + * "shards.index.shards.min" + * ] + * } + */ + private var supportedApiList = HashMap>>() + + init { + val supportedJsonPayloads = SupportedClusterMetricsSettings::class.java.getResource(RESOURCE_FILE) + + @Suppress("UNCHECKED_CAST") + if (supportedJsonPayloads != null) { + supportedApiList = XContentHelper.convertToMap(JsonXContent.jsonXContent, supportedJsonPayloads.readText(), false) + as HashMap>> + } + } + + /** + * Returns the map of all supported json payload associated with the provided path from supportedApiList. + * @param path The path for the requested API. + * @return The map of the supported json payload for the requested API. + * @throws IllegalArgumentException When supportedApiList does not contain a value for the provided key. + */ + fun getSupportedJsonPayload(path: String): Map> { + return supportedApiList[path] ?: throw IllegalArgumentException("API path not in supportedApiList.") + } + + /** + * Will return an [ActionRequest] for the API associated with that path. + * Will otherwise throw an exception. + * @param clusterMetricsInput The [ClusterMetricsInput] to resolve. + * @throws IllegalArgumentException when the requested API is not supported. + * @return The [ActionRequest] for the API associated with the provided [ClusterMetricsInput]. + */ + fun resolveToActionRequest(clusterMetricsInput: ClusterMetricsInput): ActionRequest { + val pathParams = clusterMetricsInput.parsePathParams() + return when (clusterMetricsInput.clusterMetricType) { + ClusterMetricType.CAT_INDICES -> CatIndicesRequestWrapper(pathParams) + ClusterMetricType.CAT_PENDING_TASKS -> PendingClusterTasksRequest() + ClusterMetricType.CAT_RECOVERY -> { + if (pathParams.isEmpty()) return RecoveryRequest() + val pathParamsArray = pathParams.split(",").toTypedArray() + return RecoveryRequest(*pathParamsArray) + } + ClusterMetricType.CAT_SHARDS -> CatShardsRequestWrapper(pathParams) + ClusterMetricType.CAT_SNAPSHOTS -> { + return GetSnapshotsRequest(pathParams, arrayOf(GetSnapshotsRequest.ALL_SNAPSHOTS)) + } + ClusterMetricType.CAT_TASKS -> ListTasksRequest() + ClusterMetricType.CLUSTER_HEALTH -> { + if (pathParams.isEmpty()) return ClusterHealthRequest() + val pathParamsArray = pathParams.split(",").toTypedArray() + return ClusterHealthRequest(*pathParamsArray) + } + ClusterMetricType.CLUSTER_SETTINGS -> ClusterStateRequest().routingTable(false).nodes(false) + ClusterMetricType.CLUSTER_STATS -> { + if (pathParams.isEmpty()) return ClusterStatsRequest() + val pathParamsArray = pathParams.split(",").toTypedArray() + return ClusterStatsRequest(*pathParamsArray) + } + ClusterMetricType.NODES_STATS -> NodesStatsRequest().addMetrics( + "os", + "process", + "jvm", + "thread_pool", + "fs", + "transport", + "http", + "breaker", + "script", + "discovery", + "ingest", + "adaptive_selection", + "script_cache", + "indexing_pressure", + "shard_indexing_pressure" + ) + else -> throw IllegalArgumentException("Unsupported API.") + } + } + + /** + * Confirms whether the provided path is in [supportedApiList]. + * Throws an exception if the provided path is not on the list; otherwise performs no action. + * @param clusterMetricsInput The [ClusterMetricsInput] to validate. + * @throws IllegalArgumentException when supportedApiList does not contain the provided path. + */ + fun validateApiTyped(clusterMetricsInput: ClusterMetricsInput) { + if (!supportedApiList.keys.contains(clusterMetricsInput.clusterMetricType.defaultPath)) { + throw IllegalArgumentException("API path not in supportedApiList.") + } + } + } + + constructor() { + supportedClusterMetricsSettings = this + } + + override fun validateApiType(clusterMetricsInput: ClusterMetricsInput) { + validateApiTyped(clusterMetricsInput) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json b/alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json new file mode 100644 index 000000000..a153a67b2 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json @@ -0,0 +1,12 @@ +{ + "/_cat/indices": {}, + "/_cat/pending_tasks": {}, + "/_cat/recovery": {}, + "/_cat/shards": {}, + "/_cat/snapshots": {}, + "/_cat/tasks": {}, + "/_cluster/health": {}, + "/_cluster/settings": {}, + "/_cluster/stats": {}, + "/_nodes/stats": {} +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt new file mode 100644 index 000000000..402780212 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt @@ -0,0 +1,139 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.authuser.User +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus + +private val log = LogManager.getLogger(SecureTransportAction::class.java) + +/** + * TransportActon classes extend this interface to add filter-by-backend-roles functionality. + * + * 1. If filterBy is enabled + * a) Don't allow to create monitor/ destination (throw error) if the logged-on user has no backend roles configured. + * + * 2. If filterBy is enabled & monitors are created when filterBy is disabled: + * a) If backend_roles are saved with config, results will get filtered and data is shown + * b) If backend_roles are not saved with monitor config, results will get filtered and no monitors + * will be displayed. + * c) Users can edit and save the monitors to associate their backend_roles. + * + * 3. If filterBy is enabled & monitors are created by older version: + * a) No User details are present on monitor. + * b) No monitors will be displayed. + * c) Users can edit and save the monitors to associate their backend_roles. + */ +interface SecureTransportAction { + + var filterByEnabled: Boolean + + fun listenFilterBySettingChange(clusterService: ClusterService) { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.FILTER_BY_BACKEND_ROLES) { filterByEnabled = it } + } + + fun readUserFromThreadContext(client: Client): User? { + val userStr = client.threadPool().threadContext.getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) + log.debug("User and roles string from thread context: $userStr") + return User.parse(userStr) + } + + fun doFilterForUser(user: User?): Boolean { + log.debug("Is filterByEnabled: $filterByEnabled ; Is admin user: ${isAdmin(user)}") + return if (isAdmin(user)) { + false + } else { + filterByEnabled + } + } + + /** + * 'all_access' role users are treated as admins. + */ + fun isAdmin(user: User?): Boolean { + return when { + user == null -> { + false + } + user.roles?.isNullOrEmpty() == true -> { + false + } + else -> { + user.roles?.contains("all_access") == true + } + } + } + + fun validateUserBackendRoles(user: User?, actionListener: ActionListener): Boolean { + if (filterByEnabled) { + if (user == null) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Filter by user backend roles is enabled with security disabled.", + RestStatus.FORBIDDEN + ) + ) + ) + return false + } else if (isAdmin(user)) { + return true + } else if (user.backendRoles.isNullOrEmpty()) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("User doesn't have backend roles configured. Contact administrator", RestStatus.FORBIDDEN) + ) + ) + return false + } + } + return true + } + + /** + * If FilterBy is enabled, this function verifies that the requester user has FilterBy permissions to access + * the resource. If FilterBy is disabled, we will assume the user has permissions and return true. + * + * This check will later to moved to the security plugin. + */ + fun checkUserPermissionsWithResource( + requesterUser: User?, + resourceUser: User?, + actionListener: ActionListener, + resourceType: String, + resourceId: String + ): Boolean { + if (!doFilterForUser(requesterUser)) return true + + val resourceBackendRoles = resourceUser?.backendRoles + val requesterBackendRoles = requesterUser?.backendRoles + + if ( + resourceBackendRoles == null || + requesterBackendRoles == null || + resourceBackendRoles.intersect(requesterBackendRoles).isEmpty() + ) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Do not have permissions to resource, $resourceType, with id, $resourceId", + RestStatus.FORBIDDEN + ) + ) + ) + return false + } + return true + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt new file mode 100644 index 000000000..a94a682d3 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt @@ -0,0 +1,268 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.ResourceNotFoundException +import org.opensearch.action.ActionRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.update.UpdateRequest +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest +import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetMonitorResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestRequest +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.time.Instant +import java.util.Locale + +private val log = LogManager.getLogger(TransportAcknowledgeAlertAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportAcknowledgeAlertAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, + val transportGetMonitorAction: TransportGetMonitorAction +) : HandledTransportAction( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_NAME, + transportService, + actionFilters, + ::AcknowledgeAlertRequest +) { + + @Volatile + private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } + } + + override fun doExecute( + task: Task, + acknowledgeAlertRequest: ActionRequest, + actionListener: ActionListener + ) { + val request = acknowledgeAlertRequest as? AcknowledgeAlertRequest + ?: recreateObject(acknowledgeAlertRequest) { AcknowledgeAlertRequest(it) } + client.threadPool().threadContext.stashContext().use { + scope.launch { + val getMonitorResponse: GetMonitorResponse = + transportGetMonitorAction.client.suspendUntil { + val getMonitorRequest = GetMonitorRequest( + monitorId = request.monitorId, + -3L, + RestRequest.Method.GET, + FetchSourceContext.FETCH_SOURCE + ) + execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, it) + } + if (getMonitorResponse.monitor == null) { + actionListener.onFailure( + AlertingException.wrap( + ResourceNotFoundException( + String.format( + Locale.ROOT, + "No monitor found with id [%s]", + request.monitorId + ) + ) + ) + ) + } else { + AcknowledgeHandler(client, actionListener, request).start(getMonitorResponse.monitor!!) + } + } + } + } + + inner class AcknowledgeHandler( + private val client: Client, + private val actionListener: ActionListener, + private val request: AcknowledgeAlertRequest + ) { + val alerts = mutableMapOf() + + suspend fun start(monitor: Monitor) = findActiveAlerts(monitor) + + private suspend fun findActiveAlerts(monitor: Monitor) { + val queryBuilder = QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, request.monitorId)) + .filter(QueryBuilders.termsQuery("_id", request.alertIds)) + val searchRequest = SearchRequest() + .indices(monitor.dataSources.alertsIndex) + .routing(request.monitorId) + .source( + SearchSourceBuilder() + .query(queryBuilder) + .version(true) + .seqNoAndPrimaryTerm(true) + .size(request.alertIds.size) + ) + try { + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + onSearchResponse(searchResponse, monitor) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun onSearchResponse(response: SearchResponse, monitor: Monitor) { + val alertsHistoryIndex = monitor.dataSources.alertsHistoryIndex + val updateRequests = mutableListOf() + val copyRequests = mutableListOf() + response.hits.forEach { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alerts[alert.id] = alert + + if (alert.state == Alert.State.ACTIVE) { + if ( + alert.findingIds.isEmpty() || + !isAlertHistoryEnabled + ) { + val updateRequest = UpdateRequest(monitor.dataSources.alertsIndex, alert.id) + .routing(request.monitorId) + .setIfSeqNo(hit.seqNo) + .setIfPrimaryTerm(hit.primaryTerm) + .doc( + XContentFactory.jsonBuilder().startObject() + .field(Alert.STATE_FIELD, Alert.State.ACKNOWLEDGED.toString()) + .optionalTimeField(Alert.ACKNOWLEDGED_TIME_FIELD, Instant.now()) + .endObject() + ) + updateRequests.add(updateRequest) + } else { + val copyRequest = IndexRequest(alertsHistoryIndex) + .routing(request.monitorId) + .id(alert.id) + .source( + alert.copy(state = Alert.State.ACKNOWLEDGED, acknowledgedTime = Instant.now()) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + copyRequests.add(copyRequest) + } + } + } + + try { + val updateResponse: BulkResponse? = if (updateRequests.isNotEmpty()) { + client.suspendUntil { + client.bulk(BulkRequest().add(updateRequests).setRefreshPolicy(request.refreshPolicy), it) + } + } else null + val copyResponse: BulkResponse? = if (copyRequests.isNotEmpty()) { + client.suspendUntil { + client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(request.refreshPolicy), it) + } + } else null + onBulkResponse(updateResponse, copyResponse, monitor) + } catch (t: Exception) { + log.error("ack error: ${t.message}") + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?, monitor: Monitor) { + val deleteRequests = mutableListOf() + val missing = request.alertIds.toMutableSet() + val acknowledged = mutableListOf() + val failed = mutableListOf() + + alerts.values.forEach { + if (it.state != Alert.State.ACTIVE) { + missing.remove(it.id) + failed.add(it) + } + } + + updateResponse?.items?.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + + copyResponse?.items?.forEach { item -> + log.info("got a copyResponse: $item") + missing.remove(item.id) + if (item.isFailed) { + log.info("got a failureResponse: ${item.failureMessage}") + failed.add(alerts[item.id]!!) + } else { + val deleteRequest = DeleteRequest(monitor.dataSources.alertsIndex, item.id) + .routing(request.monitorId) + deleteRequests.add(deleteRequest) + } + } + + if (deleteRequests.isNotEmpty()) { + try { + val deleteResponse: BulkResponse = client.suspendUntil { + client.bulk(BulkRequest().add(deleteRequests).setRefreshPolicy(request.refreshPolicy), it) + } + deleteResponse.items.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + return + } + } + actionListener.onResponse(AcknowledgeAlertResponse(acknowledged.toList(), failed.toList(), missing.toList())) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt new file mode 100644 index 000000000..26da7f644 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt @@ -0,0 +1,296 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.ResourceNotFoundException +import org.opensearch.action.ActionRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest +import org.opensearch.action.update.UpdateRequest +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse +import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.util.optionalTimeField +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.time.Instant +import java.util.Locale + +private val log = LogManager.getLogger(TransportAcknowledgeChainedAlertAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportAcknowledgeChainedAlertAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_NAME, + transportService, + actionFilters, + ::AcknowledgeChainedAlertRequest +) { + @Volatile + private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } + } + + override fun doExecute( + task: Task, + AcknowledgeChainedAlertRequest: ActionRequest, + actionListener: ActionListener, + ) { + val request = AcknowledgeChainedAlertRequest as? AcknowledgeChainedAlertRequest + ?: recreateObject(AcknowledgeChainedAlertRequest) { AcknowledgeChainedAlertRequest(it) } + client.threadPool().threadContext.stashContext().use { + scope.launch { + try { + val getResponse = getWorkflow(request.workflowId) + if (getResponse.isExists == false) { + actionListener.onFailure( + AlertingException.wrap( + ResourceNotFoundException( + String.format( + Locale.ROOT, + "No workflow found with id [%s]", + request.workflowId + ) + ) + ) + ) + } else { + val workflow = ScheduledJobUtils.parseWorkflowFromScheduledJobDocSource(xContentRegistry, getResponse) + AcknowledgeHandler(client, actionListener, request).start(workflow = workflow) + } + } catch (e: Exception) { + log.error("Failed to acknowledge chained alerts from request $request", e) + actionListener.onFailure(AlertingException.wrap(e)) + } + } + } + } + + private suspend fun getWorkflow(workflowId: String): GetResponse { + return client.suspendUntil { client.get(GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, workflowId), it) } + } + + inner class AcknowledgeHandler( + private val client: Client, + private val actionListener: ActionListener, + private val request: AcknowledgeChainedAlertRequest, + ) { + val alerts = mutableMapOf() + + suspend fun start(workflow: Workflow) = findActiveAlerts(workflow) + + private suspend fun findActiveAlerts(workflow: Workflow) { + try { + val queryBuilder = QueryBuilders.boolQuery() + .must( + QueryBuilders.wildcardQuery("workflow_id", request.workflowId) + ) + .must(QueryBuilders.termsQuery("_id", request.alertIds)) + if (workflow.inputs.isEmpty() || (workflow.inputs[0] is CompositeInput) == false) { + actionListener.onFailure( + OpenSearchStatusException("Workflow ${workflow.id} is invalid", RestStatus.INTERNAL_SERVER_ERROR) + ) + return + } + val compositeInput = workflow.inputs[0] as CompositeInput + val workflowId = compositeInput.sequence.delegates[0].monitorId + val dataSources: DataSources = getDataSources(workflowId) + val searchRequest = SearchRequest() + .indices(dataSources.alertsIndex) + .routing(request.workflowId) + .source( + SearchSourceBuilder() + .query(queryBuilder) + .version(true) + .seqNoAndPrimaryTerm(true) + .size(request.alertIds.size) + ) + + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + onSearchResponse(searchResponse, workflow, dataSources) + } catch (t: Exception) { + log.error("Failed to acknowledge chained alert ${request.alertIds} for workflow ${request.workflowId}", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun getDataSources(monitorId: String): DataSources { + val getResponse: GetResponse = client.suspendUntil { client.get(GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId), it) } + return ScheduledJobUtils.parseMonitorFromScheduledJobDocSource(xContentRegistry, getResponse).dataSources + } + + private suspend fun onSearchResponse(response: SearchResponse, workflow: Workflow, dataSources: DataSources) { + val alertsHistoryIndex = dataSources.alertsHistoryIndex + val updateRequests = mutableListOf() + val copyRequests = mutableListOf() + response.hits.forEach { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alerts[alert.id] = alert + + if (alert.state == Alert.State.ACTIVE) { + if ( + alert.findingIds.isEmpty() || + !isAlertHistoryEnabled + ) { + val updateRequest = UpdateRequest(dataSources.alertsIndex, alert.id) + .routing(request.workflowId) + .setIfSeqNo(hit.seqNo) + .setIfPrimaryTerm(hit.primaryTerm) + .doc( + XContentFactory.jsonBuilder().startObject() + .field(Alert.STATE_FIELD, Alert.State.ACKNOWLEDGED.toString()) + .optionalTimeField(Alert.ACKNOWLEDGED_TIME_FIELD, Instant.now()) + .endObject() + ) + updateRequests.add(updateRequest) + } else { + val copyRequest = IndexRequest(alertsHistoryIndex) + .routing(request.workflowId) + .id(alert.id) + .source( + alert.copy(state = Alert.State.ACKNOWLEDGED, acknowledgedTime = Instant.now()) + .toXContentWithUser(XContentFactory.jsonBuilder()) + ) + copyRequests.add(copyRequest) + } + } + } + + try { + val updateResponse: BulkResponse? = if (updateRequests.isNotEmpty()) { + client.suspendUntil { + client.bulk(BulkRequest().add(updateRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + } else null + val copyResponse: BulkResponse? = if (copyRequests.isNotEmpty()) { + client.suspendUntil { + client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + } else null + onBulkResponse(updateResponse, copyResponse, dataSources) + } catch (t: Exception) { + log.error("Failed to acknowledge chained alert ${request.alertIds} for workflow ${request.workflowId}", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?, dataSources: DataSources) { + val deleteRequests = mutableListOf() + val acknowledged = mutableListOf() + val missing = request.alertIds.toMutableSet() + val failed = mutableListOf() + + alerts.values.forEach { + if (it.state != Alert.State.ACTIVE) { + missing.remove(it.id) + failed.add(it) + } + } + + updateResponse?.items?.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + + copyResponse?.items?.forEach { item -> + log.info("got a copyResponse: $item") + missing.remove(item.id) + if (item.isFailed) { + log.info("got a failureResponse: ${item.failureMessage}") + failed.add(alerts[item.id]!!) + } else { + val deleteRequest = DeleteRequest(dataSources.alertsIndex, item.id) + .routing(request.workflowId) + deleteRequests.add(deleteRequest) + } + } + + if (deleteRequests.isNotEmpty()) { + try { + val deleteResponse: BulkResponse = client.suspendUntil { + client.bulk(BulkRequest().add(deleteRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) + } + deleteResponse.items.forEach { item -> + missing.remove(item.id) + if (item.isFailed) { + failed.add(alerts[item.id]!!) + } else { + acknowledged.add(alerts[item.id]!!) + } + } + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + return + } + } + actionListener.onResponse( + AcknowledgeAlertResponse( + acknowledged.toList(), + failed.toList(), + missing.toList() + ) + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt new file mode 100644 index 000000000..321c2e162 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt @@ -0,0 +1,141 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.ActionRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.service.DeleteMonitorService +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest +import org.opensearch.commons.alerting.action.DeleteMonitorResponse +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) +private val log = LogManager.getLogger(TransportDeleteMonitorAction::class.java) + +class TransportDeleteMonitorAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val clusterService: ClusterService, + settings: Settings, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction( + AlertingActions.DELETE_MONITOR_ACTION_NAME, + transportService, + actionFilters, + ::DeleteMonitorRequest +), + SecureTransportAction { + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? DeleteMonitorRequest + ?: recreateObject(request) { DeleteMonitorRequest(it) } + val user = readUserFromThreadContext(client) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + scope.launch { + DeleteMonitorHandler( + client, + actionListener, + user, + transformedRequest.monitorId + ).resolveUserAndStart(transformedRequest.refreshPolicy) + } + } + + inner class DeleteMonitorHandler( + private val client: Client, + private val actionListener: ActionListener, + private val user: User?, + private val monitorId: String + ) { + suspend fun resolveUserAndStart(refreshPolicy: RefreshPolicy) { + try { + val monitor = getMonitor() + + val canDelete = user == null || !doFilterForUser(user) || + checkUserPermissionsWithResource(user, monitor.user, actionListener, "monitor", monitorId) + + if (DeleteMonitorService.monitorIsWorkflowDelegate(monitor.id)) { + actionListener.onFailure( + AlertingException( + "Monitor can't be deleted because it is a part of workflow(s)", + RestStatus.FORBIDDEN, + IllegalStateException() + ) + ) + } else if (canDelete) { + actionListener.onResponse( + DeleteMonitorService.deleteMonitor(monitor, refreshPolicy) + ) + } else { + actionListener.onFailure( + AlertingException("Not allowed to delete this monitor!", RestStatus.FORBIDDEN, IllegalStateException()) + ) + } + } catch (t: Exception) { + log.error("Failed to delete monitor $monitorId", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun getMonitor(): Monitor { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) + + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + if (getResponse.isExists == false) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Monitor with $monitorId is not found", RestStatus.NOT_FOUND) + ) + ) + } + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, + XContentType.JSON + ) + return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt new file mode 100644 index 000000000..9b076a600 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt @@ -0,0 +1,327 @@ + +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.apache.lucene.search.join.ScoreMode +import org.opensearch.OpenSearchException +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.ActionRequest +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.delete.DeleteResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.service.DeleteMonitorService +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteWorkflowRequest +import org.opensearch.commons.alerting.action.DeleteWorkflowResponse +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.index.IndexNotFoundException +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) +/** + * Transport class that deletes the workflow. + * If the deleteDelegateMonitor flag is set to true, deletes the workflow delegates that are not part of another workflow + */ +class TransportDeleteWorkflowAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val clusterService: ClusterService, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.DELETE_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::DeleteWorkflowRequest +), + SecureTransportAction { + private val log = LogManager.getLogger(javaClass) + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? DeleteWorkflowRequest + ?: recreateObject(request) { DeleteWorkflowRequest(it) } + + val user = readUserFromThreadContext(client) + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, transformedRequest.workflowId) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + scope.launch { + DeleteWorkflowHandler( + client, + actionListener, + deleteRequest, + transformedRequest.deleteDelegateMonitors, + user, + transformedRequest.workflowId + ).resolveUserAndStart() + } + } + + inner class DeleteWorkflowHandler( + private val client: Client, + private val actionListener: ActionListener, + private val deleteRequest: DeleteRequest, + private val deleteDelegateMonitors: Boolean?, + private val user: User?, + private val workflowId: String, + ) { + suspend fun resolveUserAndStart() { + try { + val workflow = getWorkflow() + + val canDelete = user == null || + !doFilterForUser(user) || + checkUserPermissionsWithResource( + user, + workflow.user, + actionListener, + "workflow", + workflowId + ) + + if (canDelete) { + val delegateMonitorIds = (workflow.inputs[0] as CompositeInput).getMonitorIds() + var deletableMonitors = listOf() + // User can only delete the delegate monitors only in the case if all monitors can be deleted + // if there are monitors in this workflow that are referenced in other workflows, we cannot delete the monitors. + // We will not partially delete monitors. we delete them all or fail the request. + if (deleteDelegateMonitors == true) { + deletableMonitors = getDeletableDelegates(workflowId, delegateMonitorIds, user) + val monitorsDiff = delegateMonitorIds.toMutableList() + monitorsDiff.removeAll(deletableMonitors.map { it.id }) + + if (monitorsDiff.isNotEmpty()) { + actionListener.onFailure( + AlertingException( + "Not allowed to delete ${monitorsDiff.joinToString()} monitors", + RestStatus.FORBIDDEN, + IllegalStateException() + ) + ) + return + } + } + + val deleteResponse = deleteWorkflow(deleteRequest) + var deleteWorkflowResponse = DeleteWorkflowResponse(deleteResponse.id, deleteResponse.version) + + val workflowMetadataId = WorkflowMetadata.getId(workflow.id) + + val metadataIdsToDelete = mutableListOf(workflowMetadataId) + + if (deleteDelegateMonitors == true) { + val failedMonitorIds = tryDeletingMonitors(deletableMonitors, RefreshPolicy.IMMEDIATE) + // Update delete workflow response + deleteWorkflowResponse.nonDeletedMonitors = failedMonitorIds + // Delete monitors workflow metadata + // Monitor metadata will be in workflowId-monitorId-metadata format + metadataIdsToDelete.addAll(deletableMonitors.map { MonitorMetadata.getId(it, workflowMetadataId) }) + } + try { + // Delete the monitors workflow metadata + val deleteMonitorWorkflowMetadataResponse: BulkByScrollResponse = client.suspendUntil { + DeleteByQueryRequestBuilder(this, DeleteByQueryAction.INSTANCE) + .source(ScheduledJob.SCHEDULED_JOBS_INDEX) + .filter(QueryBuilders.idsQuery().addIds(*metadataIdsToDelete.toTypedArray())) + .execute(it) + } + } catch (t: Exception) { + log.error("Failed to delete delegate monitor metadata. But proceeding with workflow deletion $workflowId", t) + } + actionListener.onResponse(deleteWorkflowResponse) + } else { + actionListener.onFailure( + AlertingException( + "Not allowed to delete this workflow!", + RestStatus.FORBIDDEN, + IllegalStateException() + ) + ) + } + } catch (t: Exception) { + if (t is IndexNotFoundException) { + actionListener.onFailure( + OpenSearchStatusException( + "Workflow not found.", + RestStatus.NOT_FOUND + ) + ) + } else { + log.error("Failed to delete workflow $workflowId", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + + /** + * Tries to delete the given list of the monitors. Return value contains all the monitorIds for which deletion failed + * @param monitorIds list of monitor ids to be deleted + * @param refreshPolicy + * @return list of the monitors that were not deleted + */ + private suspend fun tryDeletingMonitors(monitors: List, refreshPolicy: RefreshPolicy): List { + val nonDeletedMonitorIds = mutableListOf() + for (monitor in monitors) { + try { + DeleteMonitorService.deleteMonitor(monitor, refreshPolicy) + } catch (ex: Exception) { + log.error("failed to delete delegate monitor ${monitor.id} for $workflowId") + nonDeletedMonitorIds.add(monitor.id) + } + } + return nonDeletedMonitorIds + } + + /** + * Returns lit of monitor ids belonging only to a given workflow. + * if filterBy is enabled, it filters and returns only those monitors which user has permission to delete. + * @param workflowIdToBeDeleted Id of the workflow that should be deleted + * @param monitorIds List of delegate monitor ids (underlying monitor ids) + */ + private suspend fun getDeletableDelegates(workflowIdToBeDeleted: String, monitorIds: List, user: User?): List { + // Retrieve monitors belonging to another workflows + val queryBuilder = QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery("_id", workflowIdToBeDeleted)).filter( + QueryBuilders.nestedQuery( + WORKFLOW_DELEGATE_PATH, + QueryBuilders.boolQuery().must( + QueryBuilders.termsQuery( + WORKFLOW_MONITOR_PATH, + monitorIds + ) + ), + ScoreMode.None + ) + ) + + val searchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .source(SearchSourceBuilder().query(queryBuilder)) + + val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } + + val workflows = searchResponse.hits.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, XContentType.JSON + ).also { it.nextToken() } + lateinit var workflow: Workflow + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "workflow" -> workflow = Workflow.parse(xcp) + } + } + workflow.copy(id = hit.id, version = hit.version) + } + val workflowMonitors = workflows.flatMap { (it.inputs[0] as CompositeInput).getMonitorIds() }.distinct() + // Monitors that can be deleted -> all workflow delegates - monitors belonging to different workflows + val deletableMonitorIds = monitorIds.minus(workflowMonitors.toSet()) + + // filtering further to get the list of monitors that user has permission to delete if filterby is enabled and user is not null + val query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", deletableMonitorIds)) + val searchSource = SearchSourceBuilder().query(query) + val monitorSearchRequest = SearchRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).source(searchSource) + + if (user != null && filterByEnabled) { + addFilter(user, monitorSearchRequest.source(), "monitor.user.backend_roles.keyword") + } + + val searchMonitorResponse: SearchResponse = client.suspendUntil { search(monitorSearchRequest, it) } + if (searchMonitorResponse.isTimedOut) { + throw OpenSearchException("Cannot determine that the ${ScheduledJob.SCHEDULED_JOBS_INDEX} index is healthy") + } + val deletableMonitors = mutableListOf() + for (hit in searchMonitorResponse.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, hit.sourceAsString + ).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor + deletableMonitors.add(monitor) + } + } + + return deletableMonitors + } + + private suspend fun getWorkflow(): Workflow { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, workflowId) + + val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } + if (getResponse.isExists == false) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Workflow not found.", RestStatus.NOT_FOUND) + ) + ) + } + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Workflow + } + + private suspend fun deleteWorkflow(deleteRequest: DeleteRequest): DeleteResponse { + log.debug("Deleting the workflow with id ${deleteRequest.id()}") + return client.suspendUntil { delete(deleteRequest, it) } + } + + private suspend fun deleteWorkflowMetadata(workflow: Workflow) { + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, WorkflowMetadata.getId(workflow.id)) + val deleteResponse: DeleteResponse = client.suspendUntil { delete(deleteRequest, it) } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt new file mode 100644 index 000000000..9a814bb90 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt @@ -0,0 +1,161 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import kotlinx.coroutines.withContext +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.MonitorMetadataService +import org.opensearch.alerting.MonitorRunnerService +import org.opensearch.alerting.action.ExecuteMonitorAction +import org.opensearch.alerting.action.ExecuteMonitorRequest +import org.opensearch.alerting.action.ExecuteMonitorResponse +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.authuser.User +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.time.Instant + +private val log = LogManager.getLogger(TransportExecuteMonitorAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportExecuteMonitorAction @Inject constructor( + transportService: TransportService, + private val client: Client, + private val clusterService: ClusterService, + private val runner: MonitorRunnerService, + actionFilters: ActionFilters, + val xContentRegistry: NamedXContentRegistry, + private val docLevelMonitorQueries: DocLevelMonitorQueries, + private val settings: Settings +) : HandledTransportAction ( + ExecuteMonitorAction.NAME, + transportService, + actionFilters, + ::ExecuteMonitorRequest +) { + @Volatile private var indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) + + override fun doExecute(task: Task, execMonitorRequest: ExecuteMonitorRequest, actionListener: ActionListener) { + val userStr = client.threadPool().threadContext.getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) + log.debug("User and roles string from thread context: $userStr") + val user: User? = User.parse(userStr) + + client.threadPool().threadContext.stashContext().use { + val executeMonitor = fun(monitor: Monitor) { + // Launch the coroutine with the clients threadContext. This is needed to preserve authentication information + // stored on the threadContext set by the security plugin when using the Alerting plugin with the Security plugin. + // runner.launch(ElasticThreadContextElement(client.threadPool().threadContext)) { + runner.launch { + val (periodStart, periodEnd) = + monitor.schedule.getPeriodEndingAt(Instant.ofEpochMilli(execMonitorRequest.requestEnd.millis)) + try { + log.info( + "Executing monitor from API - id: ${monitor.id}, type: ${monitor.monitorType.name}, " + + "periodStart: $periodStart, periodEnd: $periodEnd, dryrun: ${execMonitorRequest.dryrun}" + ) + val monitorRunResult = runner.runJob(monitor, periodStart, periodEnd, execMonitorRequest.dryrun) + withContext(Dispatchers.IO) { + actionListener.onResponse(ExecuteMonitorResponse(monitorRunResult)) + } + } catch (e: Exception) { + log.error("Unexpected error running monitor", e) + withContext(Dispatchers.IO) { + actionListener.onFailure(AlertingException.wrap(e)) + } + } + } + } + + if (execMonitorRequest.monitorId != null) { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).id(execMonitorRequest.monitorId) + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Can't find monitor with id: ${response.id}", RestStatus.NOT_FOUND) + ) + ) + return + } + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, + XContentType.JSON + ).use { xcp -> + val monitor = ScheduledJob.parse(xcp, response.id, response.version) as Monitor + executeMonitor(monitor) + } + } + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } else { + val monitor = when (user?.name.isNullOrEmpty()) { + true -> execMonitorRequest.monitor as Monitor + false -> (execMonitorRequest.monitor as Monitor).copy(user = user) + } + + if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + try { + scope.launch { + if (!docLevelMonitorQueries.docLevelQueryIndexExists(monitor.dataSources)) { + docLevelMonitorQueries.initDocLevelQueryIndex(monitor.dataSources) + log.info("Central Percolation index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX} created") + } + val (metadata, _) = MonitorMetadataService.getOrCreateMetadata(monitor, skipIndex = true) + docLevelMonitorQueries.indexDocLevelQueries( + monitor, + monitor.id, + metadata, + WriteRequest.RefreshPolicy.IMMEDIATE, + indexTimeout + ) + log.info("Queries inserted into Percolate index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") + executeMonitor(monitor) + } + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } else { + executeMonitor(monitor) + } + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt new file mode 100644 index 000000000..b29171f65 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt @@ -0,0 +1,131 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import kotlinx.coroutines.withContext +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.MonitorRunnerService +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.alerting.action.ExecuteWorkflowRequest +import org.opensearch.alerting.action.ExecuteWorkflowResponse +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.common.inject.Inject +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.authuser.User +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.time.Instant + +private val log = LogManager.getLogger(TransportExecuteWorkflowAction::class.java) + +class TransportExecuteWorkflowAction @Inject constructor( + transportService: TransportService, + private val client: Client, + private val runner: MonitorRunnerService, + actionFilters: ActionFilters, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction( + ExecuteWorkflowAction.NAME, transportService, actionFilters, ::ExecuteWorkflowRequest +) { + override fun doExecute( + task: Task, + execWorkflowRequest: ExecuteWorkflowRequest, + actionListener: ActionListener, + ) { + val userStr = client.threadPool().threadContext.getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) + log.debug("User and roles string from thread context: $userStr") + val user: User? = User.parse(userStr) + + client.threadPool().threadContext.stashContext().use { + val executeWorkflow = fun(workflow: Workflow) { + runner.launch { + val (periodStart, periodEnd) = + workflow.schedule.getPeriodEndingAt(Instant.ofEpochMilli(execWorkflowRequest.requestEnd.millis)) + try { + log.info( + "Executing workflow from API - id: ${workflow.id}, periodStart: $periodStart, periodEnd: $periodEnd, " + + "dryrun: ${execWorkflowRequest.dryrun}" + ) + val workflowRunResult = + MonitorRunnerService.runJob(workflow, periodStart, periodEnd, execWorkflowRequest.dryrun) + withContext(Dispatchers.IO, { + actionListener.onResponse( + ExecuteWorkflowResponse( + workflowRunResult + ) + ) + }) + } catch (e: Exception) { + log.error("Unexpected error running workflow", e) + withContext(Dispatchers.IO) { + actionListener.onFailure(AlertingException.wrap(e)) + } + } + } + } + + if (execWorkflowRequest.workflowId != null) { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).id(execWorkflowRequest.workflowId) + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + log.error("Can't find workflow with id: ${response.id}") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Can't find workflow with id: ${response.id}", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + val workflow = ScheduledJob.parse(xcp, response.id, response.version) as Workflow + executeWorkflow(workflow) + } + } + } + + override fun onFailure(t: Exception) { + log.error("Error getting workflow ${execWorkflowRequest.workflowId}", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } else { + val workflow = when (user?.name.isNullOrEmpty()) { + true -> execWorkflowRequest.workflow as Workflow + false -> (execWorkflowRequest.workflow as Workflow).copy(user = user) + } + + executeWorkflow(workflow) + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt new file mode 100644 index 000000000..604c130fc --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt @@ -0,0 +1,273 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.action.GetAlertsResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.Operator +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortBuilders +import org.opensearch.search.sort.SortOrder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.io.IOException + +private val log = LogManager.getLogger(TransportGetAlertsAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportGetAlertsAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.GET_ALERTS_ACTION_NAME, + transportService, + actionFilters, + ::GetAlertsRequest +), + SecureTransportAction { + + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute( + task: Task, + request: ActionRequest, + actionListener: ActionListener, + ) { + val getAlertsRequest = request as? GetAlertsRequest + ?: recreateObject(request) { GetAlertsRequest(it) } + val user = readUserFromThreadContext(client) + + val tableProp = getAlertsRequest.table + val sortBuilder = SortBuilders + .fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + + val queryBuilder = QueryBuilders.boolQuery() + + if (getAlertsRequest.severityLevel != "ALL") { + queryBuilder.filter(QueryBuilders.termQuery("severity", getAlertsRequest.severityLevel)) + } + + if (getAlertsRequest.alertState == "ALL") { + // alerting dashboards expects chained alerts and individually executed monitors' alerts to be returned from this api + // when invoked with state=ALL. They require that audit alerts are NOT returned in this page + // and only be shown in "associated alerts" field under get workflow_alerts API. + // But if the API is called with query_params: state=AUDIT,monitor_id=<123>,workflow_id=, this api + // will return audit alerts generated by delegate monitor <123> in workflow + queryBuilder.filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(Alert.STATE_FIELD, Alert.State.AUDIT.name))) + } else { + queryBuilder.filter(QueryBuilders.termQuery("state", getAlertsRequest.alertState)) + } + + if (getAlertsRequest.alertIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("_id", getAlertsRequest.alertIds)) + } + + if (getAlertsRequest.monitorId != null) { + queryBuilder.filter(QueryBuilders.termQuery("monitor_id", getAlertsRequest.monitorId)) + addWorkflowIdNullOrEmptyCheck(getAlertsRequest, queryBuilder) + } else if (getAlertsRequest.monitorIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getAlertsRequest.monitorIds)) + addWorkflowIdNullOrEmptyCheck(getAlertsRequest, queryBuilder) + } + if ( + getAlertsRequest.workflowIds.isNullOrEmpty() == false && + !(getAlertsRequest.workflowIds!!.size == 1 && getAlertsRequest.workflowIds!![0] == "") + ) { + queryBuilder.must(QueryBuilders.termsQuery("workflow_id", getAlertsRequest.workflowIds)) + } + if (!tableProp.searchString.isNullOrBlank()) { + queryBuilder + .must( + QueryBuilders + .queryStringQuery(tableProp.searchString) + .defaultOperator(Operator.AND) + .field("monitor_name") + .field("trigger_name") + ) + } + val searchSourceBuilder = SearchSourceBuilder() + .version(true) + .seqNoAndPrimaryTerm(true) + .query(queryBuilder) + .sort(sortBuilder) + .size(tableProp.size) + .from(tableProp.startIndex) + + client.threadPool().threadContext.stashContext().use { + scope.launch { + try { + val alertIndex = resolveAlertsIndexName(getAlertsRequest) + getAlerts(alertIndex, searchSourceBuilder, actionListener, user) + } catch (t: Exception) { + log.error("Failed to get alerts", t) + if (t is AlertingException) { + actionListener.onFailure(t) + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + } + } + + // we add this check when we want to fetch alerts for monitors not generated as part of a workflow i.e. non-delegate monitor alerts + private fun addWorkflowIdNullOrEmptyCheck( + getAlertsRequest: GetAlertsRequest, + queryBuilder: BoolQueryBuilder, + ) { + if ( + getAlertsRequest.workflowIds != null && getAlertsRequest.workflowIds!!.size == 1 && getAlertsRequest.workflowIds!![0] == "" + ) { + val noWorkflowIdQuery = QueryBuilders.boolQuery() + .should(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery(Alert.WORKFLOW_ID_FIELD))) + .should(QueryBuilders.termsQuery(Alert.WORKFLOW_ID_FIELD, "")) + queryBuilder.must(noWorkflowIdQuery) + } + } + + /** Precedence order for resolving alert index to be queried: + 1. alertIndex param. + 2. alert index mentioned in monitor data sources. + 3. Default alert indices pattern + */ + suspend fun resolveAlertsIndexName(getAlertsRequest: GetAlertsRequest): String { + var alertIndex = AlertIndices.ALL_ALERT_INDEX_PATTERN + if (getAlertsRequest.alertIndex.isNullOrEmpty() == false) { + alertIndex = getAlertsRequest.alertIndex!! + } else if (getAlertsRequest.monitorId.isNullOrEmpty() == false) { + val retrievedMonitor = getMonitor(getAlertsRequest) + if (retrievedMonitor != null) { + alertIndex = retrievedMonitor.dataSources.alertsIndex + } + } + return if (alertIndex == AlertIndices.ALERT_INDEX) + AlertIndices.ALL_ALERT_INDEX_PATTERN + else + alertIndex + } + + private suspend fun getMonitor(getAlertsRequest: GetAlertsRequest): Monitor? { + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, getAlertsRequest.monitorId!!) + try { + val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } + if (!getResponse.isExists) { + return null + } + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor + } catch (t: Exception) { + log.error("Failure in fetching monitor ${getAlertsRequest.monitorId} to resolve alert index in get alerts action", t) + return null + } + } + + fun getAlerts( + alertIndex: String, + searchSourceBuilder: SearchSourceBuilder, + actionListener: ActionListener, + user: User?, + ) { + // user is null when: 1/ security is disabled. 2/when user is super-admin. + if (user == null) { + // user is null when: 1/ security is disabled. 2/when user is super-admin. + search(alertIndex, searchSourceBuilder, actionListener) + } else if (!doFilterForUser(user)) { + // security is enabled and filterby is disabled. + search(alertIndex, searchSourceBuilder, actionListener) + } else { + // security is enabled and filterby is enabled. + try { + log.info("Filtering result by: ${user.backendRoles}") + addFilter(user, searchSourceBuilder, "monitor_user.backend_roles.keyword") + search(alertIndex, searchSourceBuilder, actionListener) + } catch (ex: IOException) { + actionListener.onFailure(AlertingException.wrap(ex)) + } + } + } + + fun search(alertIndex: String, searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { + val searchRequest = SearchRequest() + .indices(alertIndex) + .source(searchSourceBuilder) + + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse) { + val totalAlertCount = response.hits.totalHits?.value?.toInt() + val alerts = response.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + actionListener.onResponse(GetAlertsResponse(alerts, totalAlertCount)) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(t) + } + } + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt new file mode 100644 index 000000000..4036769ad --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt @@ -0,0 +1,171 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.action.GetDestinationsAction +import org.opensearch.alerting.action.GetDestinationsRequest +import org.opensearch.alerting.action.GetDestinationsResponse +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.authuser.User +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.Strings +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.Operator +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.search.sort.SortBuilders +import org.opensearch.search.sort.SortOrder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.io.IOException + +private val log = LogManager.getLogger(TransportGetDestinationsAction::class.java) + +class TransportGetDestinationsAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction ( + GetDestinationsAction.NAME, + transportService, + actionFilters, + ::GetDestinationsRequest +), + SecureTransportAction { + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute( + task: Task, + getDestinationsRequest: GetDestinationsRequest, + actionListener: ActionListener + ) { + val user = readUserFromThreadContext(client) + val tableProp = getDestinationsRequest.table + + val sortBuilder = SortBuilders + .fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + + val searchSourceBuilder = SearchSourceBuilder() + .sort(sortBuilder) + .size(tableProp.size) + .from(tableProp.startIndex) + .fetchSource(FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)) + .seqNoAndPrimaryTerm(true) + .version(true) + val queryBuilder = QueryBuilders.boolQuery() + .must(QueryBuilders.existsQuery("destination")) + + if (!getDestinationsRequest.destinationId.isNullOrBlank()) { + queryBuilder.filter(QueryBuilders.termQuery("_id", getDestinationsRequest.destinationId)) + } + + if (getDestinationsRequest.destinationType != "ALL") { + queryBuilder.filter(QueryBuilders.termQuery("destination.type", getDestinationsRequest.destinationType)) + } + + if (!tableProp.searchString.isNullOrBlank()) { + queryBuilder + .must( + QueryBuilders + .queryStringQuery(tableProp.searchString) + .defaultOperator(Operator.AND) + .field("destination.type") + .field("destination.name") + ) + } + searchSourceBuilder.query(queryBuilder) + + client.threadPool().threadContext.stashContext().use { + resolve(searchSourceBuilder, actionListener, user) + } + } + + fun resolve( + searchSourceBuilder: SearchSourceBuilder, + actionListener: ActionListener, + user: User? + ) { + if (user == null) { + // user is null when: 1/ security is disabled. 2/when user is super-admin. + search(searchSourceBuilder, actionListener) + } else if (!doFilterForUser(user)) { + // security is enabled and filterby is disabled. + search(searchSourceBuilder, actionListener) + } else { + // security is enabled and filterby is enabled. + try { + log.info("Filtering result by: ${user.backendRoles}") + addFilter(user, searchSourceBuilder, "destination.user.backend_roles.keyword") + search(searchSourceBuilder, actionListener) + } catch (ex: IOException) { + actionListener.onFailure(AlertingException.wrap(ex)) + } + } + } + + fun search(searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse) { + val totalDestinationCount = response.hits.totalHits?.value?.toInt() + val destinations = mutableListOf() + for (hit in response.hits) { + val id = hit.id + val version = hit.version + val seqNo = hit.seqNo.toInt() + val primaryTerm = hit.primaryTerm.toInt() + val xcp = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + destinations.add(Destination.parse(xcp, id, version, seqNo, primaryTerm)) + } + actionListener.onResponse(GetDestinationsResponse(RestStatus.OK, totalDestinationCount, destinations)) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt new file mode 100644 index 000000000..cae1f2298 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt @@ -0,0 +1,122 @@ +/* +* Copyright OpenSearch Contributors +* SPDX-License-Identifier: Apache-2.0 +*/ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.action.GetEmailAccountAction +import org.opensearch.alerting.action.GetEmailAccountRequest +import org.opensearch.alerting.action.GetEmailAccountResponse +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val log = LogManager.getLogger(TransportGetEmailAccountAction::class.java) + +class TransportGetEmailAccountAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val clusterService: ClusterService, + settings: Settings, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction( + GetEmailAccountAction.NAME, + transportService, + actionFilters, + ::GetEmailAccountRequest +) { + + @Volatile private var allowList = ALLOW_LIST.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } + } + + override fun doExecute( + task: Task, + getEmailAccountRequest: GetEmailAccountRequest, + actionListener: ActionListener + ) { + if (!allowList.contains(DestinationType.EMAIL.value)) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, getEmailAccountRequest.emailAccountID) + .version(getEmailAccountRequest.version) + .fetchSourceContext(getEmailAccountRequest.srcContext) + client.threadPool().threadContext.stashContext().use { + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Email Account not found.", RestStatus.NOT_FOUND) + ) + ) + return + } + + var emailAccount: EmailAccount? = null + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, + XContentType.JSON + ).use { xcp -> + emailAccount = EmailAccount.parseWithType(xcp, response.id, response.version) + } + } + + actionListener.onResponse( + GetEmailAccountResponse( + response.id, + response.version, + response.seqNo, + response.primaryTerm, + RestStatus.OK, + emailAccount + ) + ) + } + + override fun onFailure(e: Exception) { + actionListener.onFailure(e) + } + } + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt new file mode 100644 index 000000000..4bc51440c --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt @@ -0,0 +1,122 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.action.GetEmailGroupAction +import org.opensearch.alerting.action.GetEmailGroupRequest +import org.opensearch.alerting.action.GetEmailGroupResponse +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val log = LogManager.getLogger(TransportGetEmailGroupAction::class.java) + +class TransportGetEmailGroupAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val clusterService: ClusterService, + settings: Settings, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction( + GetEmailGroupAction.NAME, + transportService, + actionFilters, + ::GetEmailGroupRequest +) { + + @Volatile private var allowList = ALLOW_LIST.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } + } + + override fun doExecute( + task: Task, + getEmailGroupRequest: GetEmailGroupRequest, + actionListener: ActionListener + ) { + if (!allowList.contains(DestinationType.EMAIL.value)) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, getEmailGroupRequest.emailGroupID) + .version(getEmailGroupRequest.version) + .fetchSourceContext(getEmailGroupRequest.srcContext) + client.threadPool().threadContext.stashContext().use { + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Email Group not found.", RestStatus.NOT_FOUND) + ) + ) + return + } + + var emailGroup: EmailGroup? = null + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, + XContentType.JSON + ).use { xcp -> + emailGroup = EmailGroup.parseWithType(xcp, response.id, response.version) + } + } + + actionListener.onResponse( + GetEmailGroupResponse( + response.id, + response.version, + response.seqNo, + response.primaryTerm, + RestStatus.OK, + emailGroup + ) + ) + } + + override fun onFailure(e: Exception) { + actionListener.onFailure(e) + } + } + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt new file mode 100644 index 000000000..84f3ab24f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt @@ -0,0 +1,230 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import kotlinx.coroutines.withContext +import org.apache.logging.log4j.LogManager +import org.apache.lucene.search.join.ScoreMode +import org.opensearch.action.ActionRequest +import org.opensearch.action.get.MultiGetRequest +import org.opensearch.action.get.MultiGetResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_FINDING_INDEX_PATTERN +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.action.GetFindingsResponse +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetMonitorResponse +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.FindingDocument +import org.opensearch.commons.alerting.model.FindingWithDocs +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.Strings +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.Operator +import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestRequest +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.search.sort.SortBuilders +import org.opensearch.search.sort.SortOrder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val log = LogManager.getLogger(TransportGetFindingsSearchAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportGetFindingsSearchAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry +) : HandledTransportAction ( + AlertingActions.GET_FINDINGS_ACTION_NAME, + transportService, + actionFilters, + ::GetFindingsRequest +), + SecureTransportAction { + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute( + task: Task, + request: ActionRequest, + actionListener: ActionListener + ) { + val getFindingsRequest = request as? GetFindingsRequest + ?: recreateObject(request) { GetFindingsRequest(it) } + val tableProp = getFindingsRequest.table + + val sortBuilder = SortBuilders + .fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + + val searchSourceBuilder = SearchSourceBuilder() + .sort(sortBuilder) + .size(tableProp.size) + .from(tableProp.startIndex) + .fetchSource(FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)) + .seqNoAndPrimaryTerm(true) + .version(true) + + val queryBuilder = QueryBuilders.boolQuery() + + if (!getFindingsRequest.findingId.isNullOrBlank()) { + queryBuilder.filter(QueryBuilders.termQuery("_id", getFindingsRequest.findingId)) + } + + if (getFindingsRequest.monitorId != null) { + queryBuilder.filter(QueryBuilders.termQuery("monitor_id", getFindingsRequest.monitorId)) + } else if (getFindingsRequest.monitorIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getFindingsRequest.monitorIds)) + } + + if (!tableProp.searchString.isNullOrBlank()) { + queryBuilder + .should( + QueryBuilders + .queryStringQuery(tableProp.searchString) + ) + .should( + QueryBuilders.nestedQuery( + "queries", + QueryBuilders.boolQuery() + .must( + QueryBuilders + .queryStringQuery(tableProp.searchString) + .defaultOperator(Operator.AND) + .field("queries.tags") + .field("queries.name") + ), + ScoreMode.Avg + ) + ) + } + + searchSourceBuilder.query(queryBuilder) + + client.threadPool().threadContext.stashContext().use { + scope.launch { + try { + val indexName = resolveFindingsIndexName(getFindingsRequest) + val getFindingsResponse = search(searchSourceBuilder, indexName) + actionListener.onResponse(getFindingsResponse) + } catch (t: AlertingException) { + actionListener.onFailure(t) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + } + + suspend fun resolveFindingsIndexName(findingsRequest: GetFindingsRequest): String { + var indexName = ALL_FINDING_INDEX_PATTERN + + if (findingsRequest.findingIndex.isNullOrEmpty() == false) { + // findingIndex has highest priority, so use that if available + indexName = findingsRequest.findingIndex!! + } else if (findingsRequest.monitorId.isNullOrEmpty() == false) { + // second best is monitorId. + // We will use it to fetch monitor and then read indexName from dataSources field of monitor + withContext(Dispatchers.IO) { + val getMonitorRequest = GetMonitorRequest( + findingsRequest.monitorId!!, + -3L, + RestRequest.Method.GET, + FetchSourceContext.FETCH_SOURCE + ) + val getMonitorResponse: GetMonitorResponse = + this@TransportGetFindingsSearchAction.client.suspendUntil { + execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, it) + } + indexName = getMonitorResponse.monitor?.dataSources?.findingsIndex ?: ALL_FINDING_INDEX_PATTERN + } + } + return indexName + } + + suspend fun search(searchSourceBuilder: SearchSourceBuilder, indexName: String): GetFindingsResponse { + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(indexName) + val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + val totalFindingCount = searchResponse.hits.totalHits?.value?.toInt() + val mgetRequest = MultiGetRequest() + val findingsWithDocs = mutableListOf() + val findings = mutableListOf() + for (hit in searchResponse.hits) { + val xcp = XContentType.JSON.xContent() + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val finding = Finding.parse(xcp) + findings.add(finding) + val documentIds = finding.relatedDocIds + // Add getRequests to mget request + documentIds.forEach { docId -> + mgetRequest.add(MultiGetRequest.Item(finding.index, docId)) + } + } + val documents = if (mgetRequest.items.isEmpty()) mutableMapOf() else searchDocument(mgetRequest) + findings.forEach { + val documentIds = it.relatedDocIds + val relatedDocs = mutableListOf() + for (docId in documentIds) { + val key = "${it.index}|$docId" + documents[key]?.let { document -> relatedDocs.add(document) } + } + findingsWithDocs.add(FindingWithDocs(it, relatedDocs)) + } + + return GetFindingsResponse(searchResponse.status(), totalFindingCount, findingsWithDocs) + } + + // TODO: Verify what happens if indices are closed/deleted + suspend fun searchDocument( + mgetRequest: MultiGetRequest + ): Map { + val response: MultiGetResponse = client.suspendUntil { client.multiGet(mgetRequest, it) } + val documents: MutableMap = mutableMapOf() + response.responses.forEach { + val key = "${it.index}|${it.id}" + val docData = if (it.isFailed) "" else it.response.sourceAsString + val findingDocument = FindingDocument(it.index, it.id, !it.isFailed, docData) + documents[key] = findingDocument + } + + return documents + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt new file mode 100644 index 000000000..3a6f090ec --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt @@ -0,0 +1,194 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.apache.lucene.search.join.ScoreMode +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.ActionRequest +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH +import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetMonitorResponse +import org.opensearch.commons.alerting.action.GetMonitorResponse.AssociatedWorkflow +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val log = LogManager.getLogger(TransportGetMonitorAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportGetMonitorAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val xContentRegistry: NamedXContentRegistry, + val clusterService: ClusterService, + settings: Settings, +) : HandledTransportAction( + AlertingActions.GET_MONITOR_ACTION_NAME, + transportService, + actionFilters, + ::GetMonitorRequest +), + SecureTransportAction { + + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? GetMonitorRequest + ?: recreateObject(request) { + GetMonitorRequest(it) + } + + val user = readUserFromThreadContext(client) + + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, transformedRequest.monitorId) + .version(transformedRequest.version) + .fetchSourceContext(transformedRequest.srcContext) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + /* + * Remove security context before you call elasticsearch api's. By this time, permissions required + * to call this api are validated. + * Once system-indices [https://github.com/opendistro-for-elasticsearch/security/issues/666] is done, we + * might further improve this logic. Also change try to kotlin-use for auto-closable. + */ + client.threadPool().threadContext.stashContext().use { + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + actionListener.onFailure( + AlertingException.wrap(OpenSearchStatusException("Monitor not found.", RestStatus.NOT_FOUND)) + ) + return + } + + var monitor: Monitor? = null + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, + XContentType.JSON + ).use { xcp -> + monitor = ScheduledJob.parse(xcp, response.id, response.version) as Monitor + + // security is enabled and filterby is enabled + if (!checkUserPermissionsWithResource( + user, + monitor?.user, + actionListener, + "monitor", + transformedRequest.monitorId + ) + ) { + return + } + } + } + try { + scope.launch { + val associatedCompositeMonitors = getAssociatedWorkflows(response.id) + actionListener.onResponse( + GetMonitorResponse( + response.id, + response.version, + response.seqNo, + response.primaryTerm, + monitor, + associatedCompositeMonitors + ) + ) + } + } catch (e: Exception) { + log.error("Failed to get associate workflows in get monitor action", e) + } + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + } + + private suspend fun getAssociatedWorkflows(id: String): List { + try { + val associatedWorkflows = mutableListOf() + val queryBuilder = QueryBuilders.nestedQuery( + WORKFLOW_DELEGATE_PATH, + QueryBuilders.boolQuery().must( + QueryBuilders.matchQuery( + WORKFLOW_MONITOR_PATH, + id + ) + ), + ScoreMode.None + ) + val searchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .source(SearchSourceBuilder().query(queryBuilder).fetchField("_id")) + val response: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + + for (hit in response.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceAsString + ).use { hitsParser -> + val workflow = ScheduledJob.parse(hitsParser, hit.id, hit.version) + if (workflow is Workflow) { + associatedWorkflows.add(AssociatedWorkflow(hit.id, workflow.name)) + } + } + } + return associatedWorkflows + } catch (e: java.lang.Exception) { + log.error("failed to fetch associated workflows for monitor $id", e) + return emptyList() + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt new file mode 100644 index 000000000..c7bd42904 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt @@ -0,0 +1,148 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetWorkflowRequest +import org.opensearch.commons.alerting.action.GetWorkflowResponse +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.IndexNotFoundException +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +class TransportGetWorkflowAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val xContentRegistry: NamedXContentRegistry, + val clusterService: ClusterService, + settings: Settings +) : HandledTransportAction( + AlertingActions.GET_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::GetWorkflowRequest +), + SecureTransportAction { + + private val log = LogManager.getLogger(javaClass) + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, getWorkflowRequest: GetWorkflowRequest, actionListener: ActionListener) { + val user = readUserFromThreadContext(client) + + val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, getWorkflowRequest.workflowId) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + client.threadPool().threadContext.stashContext().use { + client.get( + getRequest, + object : ActionListener { + override fun onResponse(response: GetResponse) { + if (!response.isExists) { + log.error("Workflow with ${getWorkflowRequest.workflowId} not found") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Workflow not found.", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + + var workflow: Workflow? = null + if (!response.isSourceEmpty) { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + val compositeMonitor = ScheduledJob.parse(xcp, response.id, response.version) + if (compositeMonitor is Workflow) { + workflow = compositeMonitor + } else { + log.error("Wrong monitor type returned") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Workflow not found.", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + + // security is enabled and filterby is enabled + if (!checkUserPermissionsWithResource( + user, + workflow?.user, + actionListener, + "workflow", + getWorkflowRequest.workflowId + ) + ) { + return + } + } + } + + actionListener.onResponse( + GetWorkflowResponse( + response.id, + response.version, + response.seqNo, + response.primaryTerm, + RestStatus.OK, + workflow + ) + ) + } + + override fun onFailure(t: Exception) { + log.error("Getting the workflow failed", t) + + if (t is IndexNotFoundException) { + actionListener.onFailure( + OpenSearchStatusException( + "Workflow not found", + RestStatus.NOT_FOUND + ) + ) + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt new file mode 100644 index 000000000..7a9561ccb --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt @@ -0,0 +1,274 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.Operator +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortBuilders +import org.opensearch.search.sort.SortOrder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.io.IOException + +private val log = LogManager.getLogger(TransportGetAlertsAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportGetWorkflowAlertsAction @Inject constructor( + transportService: TransportService, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, +) : HandledTransportAction( + AlertingActions.GET_WORKFLOW_ALERTS_ACTION_NAME, + transportService, + actionFilters, + ::GetAlertsRequest +), + SecureTransportAction { + + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + @Volatile + private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } + listenFilterBySettingChange(clusterService) + } + + override fun doExecute( + task: Task, + request: ActionRequest, + actionListener: ActionListener, + ) { + val getWorkflowAlertsRequest = request as? GetWorkflowAlertsRequest + ?: recreateObject(request) { GetWorkflowAlertsRequest(it) } + val user = readUserFromThreadContext(client) + + val tableProp = getWorkflowAlertsRequest.table + val sortBuilder = SortBuilders.fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + + val queryBuilder = QueryBuilders.boolQuery() + + if (getWorkflowAlertsRequest.severityLevel != "ALL") { + queryBuilder.filter(QueryBuilders.termQuery("severity", getWorkflowAlertsRequest.severityLevel)) + } + + if (getWorkflowAlertsRequest.alertState == "ALL") { + QueryBuilders.boolQuery() + .filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(Alert.STATE_FIELD, Alert.State.AUDIT.name))) + } else { + queryBuilder.filter(QueryBuilders.termQuery(Alert.STATE_FIELD, getWorkflowAlertsRequest.alertState)) + } + + if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("_id", getWorkflowAlertsRequest.alertIds)) + } + + if (getWorkflowAlertsRequest.monitorIds.isNullOrEmpty() == false) { + queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getWorkflowAlertsRequest.monitorIds)) + } + if (getWorkflowAlertsRequest.workflowIds.isNullOrEmpty() == false) { + queryBuilder.must(QueryBuilders.termsQuery("workflow_id", getWorkflowAlertsRequest.workflowIds)) + queryBuilder.must(QueryBuilders.termQuery("monitor_id", "")) + } + if (!tableProp.searchString.isNullOrBlank()) { + queryBuilder + .must( + QueryBuilders.queryStringQuery(tableProp.searchString) + .defaultOperator(Operator.AND) + .field("monitor_name") + .field("trigger_name") + ) + } + // if alert id is mentioned we cannot set "from" field as it may not return id. we would be using it to paginate associated alerts + val from = if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty()) + tableProp.startIndex + else 0 + + val searchSourceBuilder = SearchSourceBuilder() + .version(true) + .seqNoAndPrimaryTerm(true) + .query(queryBuilder) + .sort(sortBuilder) + .size(tableProp.size) + .from(from) + + client.threadPool().threadContext.stashContext().use { + scope.launch { + try { + val alertIndex = resolveAlertsIndexName(getWorkflowAlertsRequest) + getAlerts(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener, user) + } catch (t: Exception) { + log.error("Failed to get alerts", t) + if (t is AlertingException) { + actionListener.onFailure(t) + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + } + } + } + + fun resolveAlertsIndexName(getAlertsRequest: GetWorkflowAlertsRequest): String { + var alertIndex = AlertIndices.ALL_ALERT_INDEX_PATTERN + if (getAlertsRequest.alertIndex.isNullOrEmpty() == false) { + alertIndex = getAlertsRequest.alertIndex!! + } + return if (alertIndex == AlertIndices.ALERT_INDEX) + AlertIndices.ALL_ALERT_INDEX_PATTERN + else + alertIndex + } + + fun resolveAssociatedAlertsIndexName(getAlertsRequest: GetWorkflowAlertsRequest): String { + return if (getAlertsRequest.alertIndex.isNullOrEmpty()) AlertIndices.ALL_ALERT_INDEX_PATTERN + else getAlertsRequest.associatedAlertsIndex!! + } + + suspend fun getAlerts( + getWorkflowAlertsRequest: GetWorkflowAlertsRequest, + alertIndex: String, + searchSourceBuilder: SearchSourceBuilder, + actionListener: ActionListener, + user: User?, + ) { + // user is null when: 1/ security is disabled. 2/when user is super-admin. + if (user == null) { + // user is null when: 1/ security is disabled. 2/when user is super-admin. + search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) + } else if (!doFilterForUser(user)) { + // security is enabled and filterby is disabled. + search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) + } else { + // security is enabled and filterby is enabled. + try { + log.info("Filtering result by: ${user.backendRoles}") + addFilter(user, searchSourceBuilder, "monitor_user.backend_roles.keyword") + search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) + } catch (ex: IOException) { + actionListener.onFailure(AlertingException.wrap(ex)) + } + } + } + + suspend fun search( + getWorkflowAlertsRequest: GetWorkflowAlertsRequest, + alertIndex: String, + searchSourceBuilder: SearchSourceBuilder, + actionListener: ActionListener, + ) { + try { + val searchRequest = SearchRequest() + .indices(alertIndex) + .source(searchSourceBuilder) + val alerts = mutableListOf() + val associatedAlerts = mutableListOf() + + val response: SearchResponse = client.suspendUntil { search(searchRequest, it) } + val totalAlertCount = response.hits.totalHits?.value?.toInt() + alerts.addAll( + parseAlertsFromSearchResponse(response) + ) + if (alerts.isNotEmpty() && getWorkflowAlertsRequest.getAssociatedAlerts == true) + getAssociatedAlerts( + associatedAlerts, + alerts, + resolveAssociatedAlertsIndexName(getWorkflowAlertsRequest), + getWorkflowAlertsRequest + ) + actionListener.onResponse(GetWorkflowAlertsResponse(alerts, associatedAlerts, totalAlertCount)) + } catch (e: Exception) { + actionListener.onFailure(AlertingException("Failed to get alerts", RestStatus.INTERNAL_SERVER_ERROR, e)) + } + } + + private suspend fun getAssociatedAlerts( + associatedAlerts: MutableList, + alerts: MutableList, + alertIndex: String, + getWorkflowAlertsRequest: GetWorkflowAlertsRequest, + ) { + try { + val associatedAlertIds = mutableSetOf() + alerts.forEach { associatedAlertIds.addAll(it.associatedAlertIds) } + if (associatedAlertIds.isEmpty()) return + val queryBuilder = QueryBuilders.boolQuery() + val searchRequest = SearchRequest(alertIndex) + // if chained alert id param is non-null, paginate the associated alerts. + if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty() == false) { + val tableProp = getWorkflowAlertsRequest.table + val sortBuilder = SortBuilders.fieldSort(tableProp.sortString) + .order(SortOrder.fromString(tableProp.sortOrder)) + if (!tableProp.missing.isNullOrBlank()) { + sortBuilder.missing(tableProp.missing) + } + searchRequest.source().sort(sortBuilder).size(tableProp.size).from(tableProp.startIndex) + } + queryBuilder.must(QueryBuilders.termsQuery("_id", associatedAlertIds)) + queryBuilder.must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.AUDIT)) + searchRequest.source().query(queryBuilder) + val response: SearchResponse = client.suspendUntil { search(searchRequest, it) } + associatedAlerts.addAll(parseAlertsFromSearchResponse(response)) + } catch (e: Exception) { + log.error("Failed to get associated alerts in get workflow alerts action", e) + } + } + + private fun parseAlertsFromSearchResponse(response: SearchResponse) = response.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt new file mode 100644 index 000000000..2100c0593 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt @@ -0,0 +1,732 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchException +import org.opensearch.OpenSearchSecurityException +import org.opensearch.OpenSearchStatusException +import org.opensearch.ResourceAlreadyExistsException +import org.opensearch.action.ActionRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthAction +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.MonitorMetadataService +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.service.DeleteMonitorService +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERTING_MAX_MONITORS +import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTION_THROTTLE_VALUE +import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.alerting.util.addUserBackendRolesFilter +import org.opensearch.alerting.util.getRoleFilterEnabled +import org.opensearch.alerting.util.isADMonitor +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory.jsonBuilder +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexMonitorRequest +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelMonitorInput.Companion.DOC_LEVEL_INPUT_FIELD +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.reindex.BulkByScrollResponse +import org.opensearch.index.reindex.DeleteByQueryAction +import org.opensearch.index.reindex.DeleteByQueryRequestBuilder +import org.opensearch.rest.RestRequest +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.io.IOException +import java.time.Duration + +private val log = LogManager.getLogger(TransportIndexMonitorAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportIndexMonitorAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val scheduledJobIndices: ScheduledJobIndices, + val docLevelMonitorQueries: DocLevelMonitorQueries, + val clusterService: ClusterService, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, + val namedWriteableRegistry: NamedWriteableRegistry +) : HandledTransportAction( + AlertingActions.INDEX_MONITOR_ACTION_NAME, + transportService, + actionFilters, + ::IndexMonitorRequest +), + SecureTransportAction { + + @Volatile private var maxMonitors = ALERTING_MAX_MONITORS.get(settings) + + @Volatile private var requestTimeout = REQUEST_TIMEOUT.get(settings) + + @Volatile private var indexTimeout = INDEX_TIMEOUT.get(settings) + + @Volatile private var maxActionThrottle = MAX_ACTION_THROTTLE_VALUE.get(settings) + + @Volatile private var allowList = ALLOW_LIST.get(settings) + + @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERTING_MAX_MONITORS) { maxMonitors = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(INDEX_TIMEOUT) { indexTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(MAX_ACTION_THROTTLE_VALUE) { maxActionThrottle = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? IndexMonitorRequest + ?: recreateObject(request, namedWriteableRegistry) { + IndexMonitorRequest(it) + } + + val user = readUserFromThreadContext(client) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + if ( + user != null && + !isAdmin(user) && + transformedRequest.rbacRoles != null + ) { + if (transformedRequest.rbacRoles?.stream()?.anyMatch { !user.backendRoles.contains(it) } == true) { + log.debug( + "User specified backend roles, ${transformedRequest.rbacRoles}, " + + "that they don' have access to. User backend roles: ${user.backendRoles}" + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User specified backend roles that they don't have access to. Contact administrator", + RestStatus.FORBIDDEN + ) + ) + ) + return + } else if (transformedRequest.rbacRoles?.isEmpty() == true) { + log.debug( + "Non-admin user are not allowed to specify an empty set of backend roles. " + + "Please don't pass in the parameter or pass in at least one backend role." + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Non-admin user are not allowed to specify an empty set of backend roles.", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + } + + if (!isADMonitor(transformedRequest.monitor)) { + checkIndicesAndExecute(client, actionListener, transformedRequest, user) + } else { + // check if user has access to any anomaly detector for AD monitor + checkAnomalyDetectorAndExecute(client, actionListener, transformedRequest, user) + } + } + + /** + * Check if user has permissions to read the configured indices on the monitor and + * then create monitor. + */ + fun checkIndicesAndExecute( + client: Client, + actionListener: ActionListener, + request: IndexMonitorRequest, + user: User?, + ) { + val indices = mutableListOf() + // todo: for doc level alerting: check if index is present before monitor is created. + val searchInputs = request.monitor.inputs.filter { it.name() == SearchInput.SEARCH_FIELD || it.name() == DOC_LEVEL_INPUT_FIELD } + searchInputs.forEach { + val inputIndices = if (it.name() == SearchInput.SEARCH_FIELD) (it as SearchInput).indices + else (it as DocLevelMonitorInput).indices + indices.addAll(inputIndices) + } + val searchRequest = SearchRequest().indices(*indices.toTypedArray()) + .source(SearchSourceBuilder.searchSource().size(1).query(QueryBuilders.matchAllQuery())) + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(searchResponse: SearchResponse) { + // User has read access to configured indices in the monitor, now create monitor with out user context. + client.threadPool().threadContext.stashContext().use { + IndexMonitorHandler(client, actionListener, request, user).resolveUserAndStart() + } + } + + // Due to below issue with security plugin, we get security_exception when invalid index name is mentioned. + // https://github.com/opendistro-for-elasticsearch/security/issues/718 + override fun onFailure(t: Exception) { + actionListener.onFailure( + AlertingException.wrap( + when (t is OpenSearchSecurityException) { + true -> OpenSearchStatusException( + "User doesn't have read permissions for one or more configured index " + + "$indices", + RestStatus.FORBIDDEN + ) + false -> t + } + ) + ) + } + } + ) + } + + /** + * It's no reasonable to create AD monitor if the user has no access to any detector. Otherwise + * the monitor will not get any anomaly result. So we will check user has access to at least 1 + * anomaly detector if they need to create AD monitor. + * As anomaly detector index is system index, common user has no permission to query. So we need + * to send REST API call to AD REST API. + */ + fun checkAnomalyDetectorAndExecute( + client: Client, + actionListener: ActionListener, + request: IndexMonitorRequest, + user: User?, + ) { + client.threadPool().threadContext.stashContext().use { + IndexMonitorHandler(client, actionListener, request, user).resolveUserAndStartForAD() + } + } + + inner class IndexMonitorHandler( + private val client: Client, + private val actionListener: ActionListener, + private val request: IndexMonitorRequest, + private val user: User?, + ) { + + fun resolveUserAndStart() { + if (user == null) { + // Security is disabled, add empty user to Monitor. user is null for older versions. + request.monitor = request.monitor + .copy(user = User("", listOf(), listOf(), listOf())) + start() + } else { + request.monitor = request.monitor + .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) + start() + } + } + + fun resolveUserAndStartForAD() { + if (user == null) { + // Security is disabled, add empty user to Monitor. user is null for older versions. + request.monitor = request.monitor + .copy(user = User("", listOf(), listOf(), listOf())) + start() + } else { + try { + request.monitor = request.monitor + .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) + val searchSourceBuilder = SearchSourceBuilder().size(0) + if (getRoleFilterEnabled(clusterService, settings, "plugins.anomaly_detection.filter_by_backend_roles")) { + addUserBackendRolesFilter(user, searchSourceBuilder) + } + val searchRequest = SearchRequest().indices(".opendistro-anomaly-detectors").source(searchSourceBuilder) + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse?) { + val totalHits = response?.hits?.totalHits?.value + if (totalHits != null && totalHits > 0L) { + start() + } else { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("User has no available detectors", RestStatus.NOT_FOUND) + ) + ) + } + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } catch (ex: IOException) { + actionListener.onFailure(AlertingException.wrap(ex)) + } + } + } + + fun start() { + if (!scheduledJobIndices.scheduledJobIndexExists()) { + scheduledJobIndices.initScheduledJobIndex(object : ActionListener { + override fun onResponse(response: CreateIndexResponse) { + onCreateMappingsResponse(response.isAcknowledged) + } + override fun onFailure(t: Exception) { + // https://github.com/opensearch-project/alerting/issues/646 + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + scope.launch { + // Wait for the yellow status + val request = ClusterHealthRequest() + .indices(SCHEDULED_JOBS_INDEX) + .waitForYellowStatus() + val response: ClusterHealthResponse = client.suspendUntil { + execute(ClusterHealthAction.INSTANCE, request, it) + } + if (response.isTimedOut) { + actionListener.onFailure( + OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") + ) + } + // Retry mapping of monitor + onCreateMappingsResponse(true) + } + } else { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + }) + } else if (!IndexUtils.scheduledJobIndexUpdated) { + IndexUtils.updateIndexMapping( + SCHEDULED_JOBS_INDEX, + ScheduledJobIndices.scheduledJobMappings(), + clusterService.state(), + client.admin().indices(), + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + onUpdateMappingsResponse(response) + } + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } else { + prepareMonitorIndexing() + } + } + + /** + * This function prepares for indexing a new monitor. + * If this is an update request we can simply update the monitor. Otherwise we first check to see how many monitors already exist, + * and compare this to the [maxMonitorCount]. Requests that breach this threshold will be rejected. + */ + private fun prepareMonitorIndexing() { + // Below check needs to be async operations and needs to be refactored issue#269 + // checkForDisallowedDestinations(allowList) + + try { + validateActionThrottle(request.monitor, maxActionThrottle, TimeValue.timeValueMinutes(1)) + } catch (e: RuntimeException) { + actionListener.onFailure(AlertingException.wrap(e)) + return + } + + if (request.method == RestRequest.Method.PUT) { + scope.launch { + updateMonitor() + } + } else { + val query = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("${Monitor.MONITOR_TYPE}.type", Monitor.MONITOR_TYPE)) + val searchSource = SearchSourceBuilder().query(query).timeout(requestTimeout) + val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX).source(searchSource) + + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(searchResponse: SearchResponse) { + onSearchResponse(searchResponse) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + } + + private fun validateActionThrottle(monitor: Monitor, maxValue: TimeValue, minValue: TimeValue) { + monitor.triggers.forEach { trigger -> + trigger.actions.forEach { action -> + if (action.throttle != null) { + require( + TimeValue(Duration.of(action.throttle!!.value.toLong(), action.throttle!!.unit).toMillis()) + .compareTo(maxValue) <= 0, + { "Can only set throttle period less than or equal to $maxValue" } + ) + require( + TimeValue(Duration.of(action.throttle!!.value.toLong(), action.throttle!!.unit).toMillis()) + .compareTo(minValue) >= 0, + { "Can only set throttle period greater than or equal to $minValue" } + ) + } + } + } + } + + /** + * After searching for all existing monitors we validate the system can support another monitor to be created. + */ + private fun onSearchResponse(response: SearchResponse) { + val totalHits = response.hits.totalHits?.value + if (totalHits != null && totalHits >= maxMonitors) { + log.info("This request would create more than the allowed monitors [$maxMonitors].") + actionListener.onFailure( + AlertingException.wrap( + IllegalArgumentException( + "This request would create more than the allowed monitors [$maxMonitors]." + ) + ) + ) + } else { + scope.launch { + indexMonitor() + } + } + } + + private fun onCreateMappingsResponse(isAcknowledged: Boolean) { + if (isAcknowledged) { + log.info("Created $SCHEDULED_JOBS_INDEX with mappings.") + prepareMonitorIndexing() + IndexUtils.scheduledJobIndexUpdated() + } else { + log.info("Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged", + RestStatus.INTERNAL_SERVER_ERROR + ) + ) + ) + } + } + + private fun onUpdateMappingsResponse(response: AcknowledgedResponse) { + if (response.isAcknowledged) { + log.info("Updated ${ScheduledJob.SCHEDULED_JOBS_INDEX} with mappings.") + IndexUtils.scheduledJobIndexUpdated() + prepareMonitorIndexing() + } else { + log.info("Update ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Updated ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.", + RestStatus.INTERNAL_SERVER_ERROR + ) + ) + ) + } + } + + private suspend fun indexMonitor() { + if (user != null) { + // Use the backend roles which is an intersection of the requested backend roles and the user's backend roles. + // Admins can pass in any backend role. Also if no backend role is passed in, all the user's backend roles are used. + val rbacRoles = if (request.rbacRoles == null) user.backendRoles.toSet() + else if (!isAdmin(user)) request.rbacRoles?.intersect(user.backendRoles)?.toSet() + else request.rbacRoles + + request.monitor = request.monitor.copy( + user = User(user.name, rbacRoles.orEmpty().toList(), user.roles, user.customAttNames) + ) + log.debug("Created monitor's backend roles: $rbacRoles") + } + + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(request.refreshPolicy) + .source(request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .setIfSeqNo(request.seqNo) + .setIfPrimaryTerm(request.primaryTerm) + .timeout(indexTimeout) + + log.info( + "Creating new monitor: ${request.monitor.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + )}" + ) + + try { + val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } + val failureReasons = checkShardsFailure(indexResponse) + if (failureReasons != null) { + log.info(failureReasons.toString()) + actionListener.onFailure( + AlertingException.wrap(OpenSearchStatusException(failureReasons.toString(), indexResponse.status())) + ) + return + } + var metadata: MonitorMetadata? + try { // delete monitor if metadata creation fails, log the right error and re-throw the error to fail listener + request.monitor = request.monitor.copy(id = indexResponse.id) + var (monitorMetadata: MonitorMetadata, created: Boolean) = MonitorMetadataService.getOrCreateMetadata(request.monitor) + if (created == false) { + log.warn("Metadata doc id:${monitorMetadata.id} exists, but it shouldn't!") + } + metadata = monitorMetadata + } catch (t: Exception) { + log.error("failed to create metadata for monitor ${indexResponse.id}. deleting monitor") + cleanupMonitorAfterPartialFailure(request.monitor, indexResponse) + throw t + } + try { + if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + indexDocLevelMonitorQueries(request.monitor, indexResponse.id, metadata, request.refreshPolicy) + } + // When inserting queries in queryIndex we could update sourceToQueryIndexMapping + MonitorMetadataService.upsertMetadata(metadata, updating = true) + } catch (t: Exception) { + log.error("failed to index doc level queries monitor ${indexResponse.id}. deleting monitor", t) + cleanupMonitorAfterPartialFailure(request.monitor, indexResponse) + throw t + } + + actionListener.onResponse( + IndexMonitorResponse( + indexResponse.id, + indexResponse.version, + indexResponse.seqNo, + indexResponse.primaryTerm, + request.monitor + ) + ) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun cleanupMonitorAfterPartialFailure(monitor: Monitor, indexMonitorResponse: IndexResponse) { + // we simply log the success (debug log) or failure (error log) when we try clean up partially failed monitor creation request + try { + DeleteMonitorService.deleteMonitor( + monitor = monitor, + RefreshPolicy.IMMEDIATE + ) + log.debug( + "Cleaned up monitor related resources after monitor creation request partial failure. " + + "Monitor id : ${indexMonitorResponse.id}" + ) + } catch (e: Exception) { + log.error("Failed to clean up monitor after monitor creation request partial failure", e) + } + } + + @Suppress("UNCHECKED_CAST") + private suspend fun indexDocLevelMonitorQueries( + monitor: Monitor, + monitorId: String, + monitorMetadata: MonitorMetadata, + refreshPolicy: RefreshPolicy + ) { + val queryIndex = monitor.dataSources.queryIndex + if (!docLevelMonitorQueries.docLevelQueryIndexExists(monitor.dataSources)) { + docLevelMonitorQueries.initDocLevelQueryIndex(monitor.dataSources) + log.info("Central Percolation index $queryIndex created") + } + docLevelMonitorQueries.indexDocLevelQueries( + monitor, + monitorId, + monitorMetadata, + refreshPolicy, + indexTimeout + ) + log.debug("Queries inserted into Percolate index $queryIndex") + } + + private suspend fun updateMonitor() { + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, request.monitorId) + try { + val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } + if (!getResponse.isExists) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException("Monitor with ${request.monitorId} is not found", RestStatus.NOT_FOUND) + ) + ) + return + } + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, + XContentType.JSON + ) + val monitor = ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor + onGetResponse(monitor) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun onGetResponse(currentMonitor: Monitor) { + if (!checkUserPermissionsWithResource(user, currentMonitor.user, actionListener, "monitor", request.monitorId)) { + return + } + + // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be + // incorrect. + if (request.monitor.enabled && currentMonitor.enabled) { + request.monitor = request.monitor.copy(enabledTime = currentMonitor.enabledTime) + } + + /** + * On update monitor check which backend roles to associate to the monitor. + * Below are 2 examples of how the logic works + * + * Example 1, say we have a Monitor with backend roles [a, b, c, d] associated with it. + * If I'm User A (non-admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c] and the roles to add are [a, b]. + * The Monitor's backend roles would then be [a, b, d]. + * + * Example 2, say we have a Monitor with backend roles [a, b, c, d] associated with it. + * If I'm User A (admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c, d] and the roles to add are [a, b]. + * The Monitor's backend roles would then be [a, b]. + */ + if (user != null) { + if (request.rbacRoles != null) { + if (isAdmin(user)) { + request.monitor = request.monitor.copy( + user = User(user.name, request.rbacRoles, user.roles, user.customAttNames) + ) + } else { + // rolesToRemove: these are the backend roles to remove from the monitor + val rolesToRemove = user.backendRoles - request.rbacRoles.orEmpty() + // remove the monitor's roles with rolesToRemove and add any roles passed into the request.rbacRoles + val updatedRbac = currentMonitor.user?.backendRoles.orEmpty() - rolesToRemove + request.rbacRoles.orEmpty() + request.monitor = request.monitor.copy( + user = User(user.name, updatedRbac, user.roles, user.customAttNames) + ) + } + } else { + request.monitor = request.monitor + .copy(user = User(user.name, currentMonitor.user!!.backendRoles, user.roles, user.customAttNames)) + } + log.debug("Update monitor backend roles to: ${request.monitor.user?.backendRoles}") + } + + request.monitor = request.monitor.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(request.refreshPolicy) + .source(request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) + .id(request.monitorId) + .setIfSeqNo(request.seqNo) + .setIfPrimaryTerm(request.primaryTerm) + .timeout(indexTimeout) + + log.info( + "Updating monitor, ${currentMonitor.id}, from: ${currentMonitor.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + )} \n to: ${request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))}" + ) + + try { + val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } + val failureReasons = checkShardsFailure(indexResponse) + if (failureReasons != null) { + actionListener.onFailure( + AlertingException.wrap(OpenSearchStatusException(failureReasons.toString(), indexResponse.status())) + ) + return + } + var updatedMetadata: MonitorMetadata + val (metadata, created) = MonitorMetadataService.getOrCreateMetadata(request.monitor) + // Recreate runContext if metadata exists + // Delete and insert all queries from/to queryIndex + if (created == false && currentMonitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + updatedMetadata = MonitorMetadataService.recreateRunContext(metadata, currentMonitor) + client.suspendUntil { + DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) + .source(currentMonitor.dataSources.queryIndex) + .filter(QueryBuilders.matchQuery("monitor_id", currentMonitor.id)) + .execute(it) + } + indexDocLevelMonitorQueries(request.monitor, currentMonitor.id, updatedMetadata, request.refreshPolicy) + MonitorMetadataService.upsertMetadata(updatedMetadata, updating = true) + } + actionListener.onResponse( + IndexMonitorResponse( + indexResponse.id, + indexResponse.version, + indexResponse.seqNo, + indexResponse.primaryTerm, + request.monitor + ) + ) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private fun checkShardsFailure(response: IndexResponse): String? { + val failureReasons = StringBuilder() + if (response.shardInfo.failed > 0) { + response.shardInfo.failures.forEach { + entry -> + failureReasons.append(entry.reason()) + } + return failureReasons.toString() + } + return null + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt new file mode 100644 index 000000000..26d834fe6 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt @@ -0,0 +1,796 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchException +import org.opensearch.OpenSearchStatusException +import org.opensearch.ResourceAlreadyExistsException +import org.opensearch.action.ActionRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthAction +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.get.GetRequest +import org.opensearch.action.get.GetResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.index.IndexResponse +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.MonitorMetadataService +import org.opensearch.alerting.MonitorRunnerService.monitorCtx +import org.opensearch.alerting.WorkflowMetadataService +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.opensearchapi.withClosableContext +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERTING_MAX_MONITORS +import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT +import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTION_THROTTLE_VALUE +import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.IndexUtils +import org.opensearch.alerting.util.isADMonitor +import org.opensearch.alerting.util.isQueryLevelMonitor +import org.opensearch.alerting.workflow.CompositeWorkflowRunner +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory.jsonBuilder +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.IndexWorkflowRequest +import org.opensearch.commons.alerting.action.IndexWorkflowResponse +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.index.IndexNotFoundException +import org.opensearch.index.query.QueryBuilders +import org.opensearch.rest.RestRequest +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService +import java.util.UUID +import java.util.stream.Collectors + +private val log = LogManager.getLogger(TransportIndexWorkflowAction::class.java) +private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) + +class TransportIndexWorkflowAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val scheduledJobIndices: ScheduledJobIndices, + val clusterService: ClusterService, + val settings: Settings, + val xContentRegistry: NamedXContentRegistry, + val namedWriteableRegistry: NamedWriteableRegistry, +) : HandledTransportAction( + AlertingActions.INDEX_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::IndexWorkflowRequest +), + SecureTransportAction { + + @Volatile + private var maxMonitors = ALERTING_MAX_MONITORS.get(settings) + + @Volatile + private var requestTimeout = REQUEST_TIMEOUT.get(settings) + + @Volatile + private var indexTimeout = INDEX_TIMEOUT.get(settings) + + @Volatile + private var maxActionThrottle = MAX_ACTION_THROTTLE_VALUE.get(settings) + + @Volatile + private var allowList = ALLOW_LIST.get(settings) + + @Volatile + override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(ALERTING_MAX_MONITORS) { maxMonitors = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(INDEX_TIMEOUT) { indexTimeout = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(MAX_ACTION_THROTTLE_VALUE) { maxActionThrottle = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? IndexWorkflowRequest + ?: recreateObject(request, namedWriteableRegistry) { + IndexWorkflowRequest(it) + } + + val user = readUserFromThreadContext(client) + + if (!validateUserBackendRoles(user, actionListener)) { + return + } + + if ( + user != null && + !isAdmin(user) && + transformedRequest.rbacRoles != null + ) { + if (transformedRequest.rbacRoles?.stream()?.anyMatch { !user.backendRoles.contains(it) } == true) { + log.error( + "User specified backend roles, ${transformedRequest.rbacRoles}, " + + "that they don' have access to. User backend roles: ${user.backendRoles}" + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User specified backend roles that they don't have access to. Contact administrator", + RestStatus.FORBIDDEN + ) + ) + ) + return + } else if (transformedRequest.rbacRoles?.isEmpty() == true) { + log.error( + "Non-admin user are not allowed to specify an empty set of backend roles. " + + "Please don't pass in the parameter or pass in at least one backend role." + ) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Non-admin user are not allowed to specify an empty set of backend roles.", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + } + + scope.launch { + try { + validateMonitorAccess( + transformedRequest, + user, + client, + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + // Stash the context and start the workflow creation + client.threadPool().threadContext.stashContext().use { + IndexWorkflowHandler(client, actionListener, transformedRequest, user).resolveUserAndStart() + } + } + + override fun onFailure(e: Exception) { + log.error("Error indexing workflow", e) + actionListener.onFailure(e) + } + } + ) + } catch (e: Exception) { + log.error("Failed to create workflow", e) + if (e is IndexNotFoundException) { + actionListener.onFailure( + OpenSearchStatusException( + "Monitors not found", + RestStatus.NOT_FOUND + ) + ) + } else { + actionListener.onFailure(e) + } + } + } + } + + inner class IndexWorkflowHandler( + private val client: Client, + private val actionListener: ActionListener, + private val request: IndexWorkflowRequest, + private val user: User?, + ) { + fun resolveUserAndStart() { + scope.launch { + if (user == null) { + // Security is disabled, add empty user to Workflow. user is null for older versions. + request.workflow = request.workflow + .copy(user = User("", listOf(), listOf(), listOf())) + start() + } else { + request.workflow = request.workflow + .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) + start() + } + } + } + + fun start() { + if (!scheduledJobIndices.scheduledJobIndexExists()) { + scheduledJobIndices.initScheduledJobIndex(object : ActionListener { + override fun onResponse(response: CreateIndexResponse) { + onCreateMappingsResponse(response.isAcknowledged) + } + + override fun onFailure(t: Exception) { + // https://github.com/opensearch-project/alerting/issues/646 + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + scope.launch { + // Wait for the yellow status + val request = ClusterHealthRequest() + .indices(SCHEDULED_JOBS_INDEX) + .waitForYellowStatus() + val response: ClusterHealthResponse = client.suspendUntil { + execute(ClusterHealthAction.INSTANCE, request, it) + } + if (response.isTimedOut) { + log.error("Workflow creation timeout", t) + actionListener.onFailure( + OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") + ) + } + // Retry mapping of workflow + onCreateMappingsResponse(true) + } + } else { + log.error("Failed to create workflow", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + }) + } else if (!IndexUtils.scheduledJobIndexUpdated) { + IndexUtils.updateIndexMapping( + SCHEDULED_JOBS_INDEX, + ScheduledJobIndices.scheduledJobMappings(), clusterService.state(), client.admin().indices(), + object : ActionListener { + override fun onResponse(response: AcknowledgedResponse) { + onUpdateMappingsResponse(response) + } + + override fun onFailure(t: Exception) { + log.error("Failed to create workflow", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } else { + prepareWorkflowIndexing() + } + } + + /** + * This function prepares for indexing a new workflow. + * If this is an update request we can simply update the workflow. Otherwise we first check to see how many monitors already exist, + * and compare this to the [maxMonitorCount]. Requests that breach this threshold will be rejected. + */ + private fun prepareWorkflowIndexing() { + if (request.method == RestRequest.Method.PUT) { + scope.launch { + updateWorkflow() + } + } else { + scope.launch { + indexWorkflow() + } + } + } + + private fun onCreateMappingsResponse(isAcknowledged: Boolean) { + if (isAcknowledged) { + log.info("Created $SCHEDULED_JOBS_INDEX with mappings.") + prepareWorkflowIndexing() + IndexUtils.scheduledJobIndexUpdated() + } else { + log.error("Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged", + RestStatus.INTERNAL_SERVER_ERROR + ) + ) + ) + } + } + + private fun onUpdateMappingsResponse(response: AcknowledgedResponse) { + if (response.isAcknowledged) { + log.info("Updated $SCHEDULED_JOBS_INDEX with mappings.") + IndexUtils.scheduledJobIndexUpdated() + prepareWorkflowIndexing() + } else { + log.error("Update $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Updated $SCHEDULED_JOBS_INDEX mappings call not acknowledged.", + RestStatus.INTERNAL_SERVER_ERROR + ) + ) + ) + } + } + + private suspend fun indexWorkflow() { + if (user != null) { + val rbacRoles = if (request.rbacRoles == null) user.backendRoles.toSet() + else if (!isAdmin(user)) request.rbacRoles?.intersect(user.backendRoles)?.toSet() + else request.rbacRoles + + request.workflow = request.workflow.copy( + user = User(user.name, rbacRoles.orEmpty().toList(), user.roles, user.customAttNames) + ) + log.debug("Created workflow's backend roles: $rbacRoles") + } + + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(request.refreshPolicy) + .source( + request.workflow.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + ) + ) + .setIfSeqNo(request.seqNo) + .setIfPrimaryTerm(request.primaryTerm) + .timeout(indexTimeout) + + try { + val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } + val failureReasons = checkShardsFailure(indexResponse) + if (failureReasons != null) { + log.error("Failed to create workflow: $failureReasons") + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + failureReasons.toString(), + indexResponse.status() + ) + ) + ) + return + } + + val createdWorkflow = request.workflow.copy(id = indexResponse.id) + val executionId = CompositeWorkflowRunner.generateExecutionId(false, createdWorkflow) + + val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( + workflow = createdWorkflow, + skipIndex = false, + executionId = executionId + ) + + val delegates = (createdWorkflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } + val monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) + + for (monitor in monitors) { + var (monitorMetadata, created) = MonitorMetadataService.getOrCreateMetadata( + monitor = monitor, + createWithRunContext = true, + workflowMetadataId = workflowMetadata.id + ) + + if (created == false) { + log.warn("Metadata doc id:${monitorMetadata.id} exists, but it shouldn't!") + } + + if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + val oldMonitorMetadata = MonitorMetadataService.getMetadata(monitor) + monitorMetadata = monitorMetadata.copy(sourceToQueryIndexMapping = oldMonitorMetadata!!.sourceToQueryIndexMapping) + } + // When inserting queries in queryIndex we could update sourceToQueryIndexMapping + MonitorMetadataService.upsertMetadata(monitorMetadata, updating = true) + } + actionListener.onResponse( + IndexWorkflowResponse( + indexResponse.id, indexResponse.version, indexResponse.seqNo, + indexResponse.primaryTerm, request.workflow.copy(id = indexResponse.id) + ) + ) + } catch (t: Exception) { + log.error("Failed to index workflow", t) + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun updateWorkflow() { + val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, request.workflowId) + try { + val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } + if (!getResponse.isExists) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "Workflow with ${request.workflowId} is not found", + RestStatus.NOT_FOUND + ) + ) + ) + return + } + val xcp = XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + getResponse.sourceAsBytesRef, XContentType.JSON + ) + val workflow = ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Workflow + onGetResponse(workflow) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private suspend fun onGetResponse(currentWorkflow: Workflow) { + if (!checkUserPermissionsWithResource( + user, + currentWorkflow.user, + actionListener, + "workfklow", + request.workflowId + ) + ) { + return + } + + // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be + // incorrect. + if (request.workflow.enabled && currentWorkflow.enabled) + request.workflow = request.workflow.copy(enabledTime = currentWorkflow.enabledTime) + + /** + * On update workflow check which backend roles to associate to the workflow. + * Below are 2 examples of how the logic works + * + * Example 1, say we have a Workflow with backend roles [a, b, c, d] associated with it. + * If I'm User A (non-admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Workflow's backend roles to [a, b]. This would mean that the roles to remove are [c] and the roles to add are [a, b]. + * The Workflow's backend roles would then be [a, b, d]. + * + * Example 2, say we have a Workflow with backend roles [a, b, c, d] associated with it. + * If I'm User A (admin user) and I have backend roles [a, b, c] associated with me and I make a request to update + * the Workflow's backend roles to [a, b]. This would mean that the roles to remove are [c, d] and the roles to add are [a, b]. + * The Workflow's backend roles would then be [a, b]. + */ + if (user != null) { + if (request.rbacRoles != null) { + if (isAdmin(user)) { + request.workflow = request.workflow.copy( + user = User(user.name, request.rbacRoles, user.roles, user.customAttNames) + ) + } else { + // rolesToRemove: these are the backend roles to remove from the monitor + val rolesToRemove = user.backendRoles - request.rbacRoles.orEmpty() + // remove the monitor's roles with rolesToRemove and add any roles passed into the request.rbacRoles + val updatedRbac = + currentWorkflow.user?.backendRoles.orEmpty() - rolesToRemove + request.rbacRoles.orEmpty() + request.workflow = request.workflow.copy( + user = User(user.name, updatedRbac, user.roles, user.customAttNames) + ) + } + } else { + request.workflow = request.workflow + .copy( + user = User( + user.name, + currentWorkflow.user!!.backendRoles, + user.roles, + user.customAttNames + ) + ) + } + log.debug("Update workflow backend roles to: ${request.workflow.user?.backendRoles}") + } + + request.workflow = request.workflow.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) + val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) + .setRefreshPolicy(request.refreshPolicy) + .source( + request.workflow.toXContentWithUser( + jsonBuilder(), + ToXContent.MapParams(mapOf("with_type" to "true")) + ) + ) + .id(request.workflowId) + .setIfSeqNo(request.seqNo) + .setIfPrimaryTerm(request.primaryTerm) + .timeout(indexTimeout) + + try { + val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } + val failureReasons = checkShardsFailure(indexResponse) + if (failureReasons != null) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + failureReasons.toString(), + indexResponse.status() + ) + ) + ) + return + } + + val updatedWorkflow = request.workflow.copy(id = indexResponse.id) + val executionId = CompositeWorkflowRunner.generateExecutionId(false, updatedWorkflow) + + val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( + workflow = updatedWorkflow, + skipIndex = false, + executionId = executionId + ) + + val delegates = (updatedWorkflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } + val monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) + + for (monitor in monitors) { + val (monitorMetadata, created) = MonitorMetadataService.getOrCreateMetadata( + monitor = monitor, + createWithRunContext = true, + workflowMetadataId = workflowMetadata.id + ) + + if (created == false && monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { + var updatedMetadata = MonitorMetadataService.recreateRunContext(monitorMetadata, monitor) + val oldMonitorMetadata = MonitorMetadataService.getMetadata(monitor) + updatedMetadata = updatedMetadata.copy(sourceToQueryIndexMapping = oldMonitorMetadata!!.sourceToQueryIndexMapping) + MonitorMetadataService.upsertMetadata(updatedMetadata, updating = true) + } + } + actionListener.onResponse( + IndexWorkflowResponse( + indexResponse.id, indexResponse.version, indexResponse.seqNo, + indexResponse.primaryTerm, request.workflow.copy(id = currentWorkflow.id) + ) + ) + } catch (t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + + private fun checkShardsFailure(response: IndexResponse): String? { + val failureReasons = StringBuilder() + if (response.shardInfo.failed > 0) { + response.shardInfo.failures.forEach { entry -> + failureReasons.append(entry.reason()) + } + return failureReasons.toString() + } + return null + } + } + + private fun validateChainedMonitorFindingsMonitors(delegates: List, monitorDelegates: List) { + infix fun List.equalsIgnoreOrder(other: List) = + this.size == other.size && this.toSet() == other.toSet() + + val monitorsById = monitorDelegates.associateBy { it.id } + delegates.forEach { + + val delegateMonitor = monitorsById[it.monitorId] ?: throw AlertingException.wrap( + IllegalArgumentException("Delegate monitor ${it.monitorId} doesn't exist") + ) + if (it.chainedMonitorFindings != null) { + val chainedMonitorIds: MutableList = mutableListOf() + if (it.chainedMonitorFindings!!.monitorId.isNullOrBlank()) { + chainedMonitorIds.addAll(it.chainedMonitorFindings!!.monitorIds) + } else { + chainedMonitorIds.add(it.chainedMonitorFindings!!.monitorId!!) + } + chainedMonitorIds.forEach { chainedMonitorId -> + val chainedFindingMonitor = + monitorsById[chainedMonitorId] ?: throw AlertingException.wrap( + IllegalArgumentException("Chained finding monitor $chainedMonitorId doesn't exist") + ) + + if (chainedFindingMonitor.isQueryLevelMonitor()) { + throw AlertingException.wrap(IllegalArgumentException("Query level monitor can't be part of chained findings")) + } + + val delegateMonitorIndices = getMonitorIndices(delegateMonitor) + + val chainedMonitorIndices = getMonitorIndices(chainedFindingMonitor) + + if (!delegateMonitorIndices.containsAll(chainedMonitorIndices)) { + throw AlertingException.wrap( + IllegalArgumentException( + "Delegate monitor indices ${delegateMonitorIndices.joinToString()} " + + "doesn't query all of chained findings monitor's indices ${chainedMonitorIndices.joinToString()}}" + ) + ) + } + } + } + } + } + + /** + * Returns list of indices for the given monitor depending on it's type + */ + private fun getMonitorIndices(monitor: Monitor): List { + return when (monitor.monitorType) { + Monitor.MonitorType.DOC_LEVEL_MONITOR -> (monitor.inputs[0] as DocLevelMonitorInput).indices + Monitor.MonitorType.BUCKET_LEVEL_MONITOR -> monitor.inputs.flatMap { s -> (s as SearchInput).indices } + Monitor.MonitorType.QUERY_LEVEL_MONITOR -> { + if (isADMonitor(monitor)) monitor.inputs.flatMap { s -> (s as SearchInput).indices } + else { + val indices = mutableListOf() + for (input in monitor.inputs) { + when (input) { + is SearchInput -> indices.addAll(input.indices) + else -> indices + } + } + indices + } + } + + else -> emptyList() + } + } + + private fun validateDelegateMonitorsExist( + monitorIds: List, + delegateMonitors: List, + ) { + val reqMonitorIds: MutableList = monitorIds as MutableList + delegateMonitors.forEach { + reqMonitorIds.remove(it.id) + } + if (reqMonitorIds.isNotEmpty()) { + throw AlertingException.wrap(IllegalArgumentException(("${reqMonitorIds.joinToString()} are not valid monitor ids"))) + } + } + + /** + * Validates monitor and indices access + * 1. Validates the monitor access (if the filterByEnabled is set to true - adds backend role filter) as admin + * 2. Unstashes the context and checks if the user can access the monitor indices + */ + private suspend fun validateMonitorAccess( + request: IndexWorkflowRequest, + user: User?, + client: Client, + actionListener: ActionListener, + ) { + val compositeInput = request.workflow.inputs[0] as CompositeInput + val monitorIds = compositeInput.sequence.delegates.stream().map { it.monitorId }.collect(Collectors.toList()) + val query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", monitorIds)) + val searchSource = SearchSourceBuilder().query(query) + val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX).source(searchSource) + + if (user != null && !isAdmin(user) && filterByEnabled) { + addFilter(user, searchRequest.source(), "monitor.user.backend_roles.keyword") + } + + val searchMonitorResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + + if (searchMonitorResponse.isTimedOut) { + throw OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") + } + val monitors = mutableListOf() + for (hit in searchMonitorResponse.hits) { + XContentType.JSON.xContent().createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, hit.sourceAsString + ).use { hitsParser -> + val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor + monitors.add(monitor) + } + } + if (monitors.isEmpty()) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User doesn't have read permissions for one or more configured monitors ${monitorIds.joinToString()}", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + // Validate delegates and it's chained findings + try { + validateDelegateMonitorsExist(monitorIds, monitors) + validateChainedMonitorFindingsMonitors(compositeInput.sequence.delegates, monitors) + } catch (e: Exception) { + actionListener.onFailure(e) + return + } + val indices = getMonitorIndices(monitors) + + val indicesSearchRequest = SearchRequest().indices(*indices.toTypedArray()) + .source(SearchSourceBuilder.searchSource().size(1).query(QueryBuilders.matchAllQuery())) + + if (user != null && filterByEnabled) { + // Unstash the context and check if user with specified roles has indices access + withClosableContext( + InjectorContextElement( + user.name.plus(UUID.randomUUID().toString()), + settings, + client.threadPool().threadContext, + user.roles, + user + ) + ) { + checkIndicesAccess(client, indicesSearchRequest, indices, actionListener) + } + } else { + checkIndicesAccess(client, indicesSearchRequest, indices, actionListener) + } + } + + /** + * Checks if the client can access the given indices + */ + private fun checkIndicesAccess( + client: Client, + indicesSearchRequest: SearchRequest?, + indices: MutableList, + actionListener: ActionListener, + ) { + client.search( + indicesSearchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse?) { + actionListener.onResponse(AcknowledgedResponse(true)) + } + + override fun onFailure(e: Exception) { + log.error("Error accessing the monitor indices", e) + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "User doesn't have read permissions for one or more configured index ${indices.joinToString()}", + RestStatus.FORBIDDEN + ) + ) + ) + } + } + ) + } + + /** + * Extract indices from monitors + */ + private fun getMonitorIndices(monitors: List): MutableList { + val indices = mutableListOf() + + val searchInputs = + monitors.flatMap { monitor -> + monitor.inputs.filter { + it.name() == SearchInput.SEARCH_FIELD || it.name() == DocLevelMonitorInput.DOC_LEVEL_INPUT_FIELD + } + } + searchInputs.forEach { + val inputIndices = if (it.name() == SearchInput.SEARCH_FIELD) (it as SearchInput).indices + else (it as DocLevelMonitorInput).indices + indices.addAll(inputIndices) + } + return indices + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt new file mode 100644 index 000000000..29ebbb90f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt @@ -0,0 +1,73 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.action.SearchEmailAccountAction +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +class TransportSearchEmailAccountAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val clusterService: ClusterService, + settings: Settings +) : HandledTransportAction( + SearchEmailAccountAction.NAME, + transportService, + actionFilters, + ::SearchRequest +) { + + @Volatile private var allowList = ALLOW_LIST.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } + } + + override fun doExecute(task: Task, searchRequest: SearchRequest, actionListener: ActionListener) { + if (!allowList.contains(DestinationType.EMAIL.value)) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + + client.threadPool().threadContext.stashContext().use { + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse) { + actionListener.onResponse(response) + } + + override fun onFailure(e: Exception) { + actionListener.onFailure(e) + } + } + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt new file mode 100644 index 000000000..c6fc84640 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt @@ -0,0 +1,73 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.action.SearchEmailGroupAction +import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +class TransportSearchEmailGroupAction @Inject constructor( + transportService: TransportService, + val client: Client, + actionFilters: ActionFilters, + val clusterService: ClusterService, + settings: Settings +) : HandledTransportAction( + SearchEmailGroupAction.NAME, + transportService, + actionFilters, + ::SearchRequest +) { + + @Volatile private var allowList = ALLOW_LIST.get(settings) + + init { + clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } + } + + override fun doExecute(task: Task, searchRequest: SearchRequest, actionListener: ActionListener) { + if (!allowList.contains(DestinationType.EMAIL.value)) { + actionListener.onFailure( + AlertingException.wrap( + OpenSearchStatusException( + "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", + RestStatus.FORBIDDEN + ) + ) + ) + return + } + + client.threadPool().threadContext.stashContext().use { + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse) { + actionListener.onResponse(response) + } + + override fun onFailure(e: Exception) { + actionListener.onFailure(e) + } + } + ) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt new file mode 100644 index 000000000..7359d60ea --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt @@ -0,0 +1,128 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.HandledTransportAction +import org.opensearch.alerting.opensearchapi.addFilter +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.AlertingException +import org.opensearch.client.Client +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.SearchMonitorRequest +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.authuser.User +import org.opensearch.commons.utils.recreateObject +import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.io.stream.NamedWriteableRegistry +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.ExistsQueryBuilder +import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.tasks.Task +import org.opensearch.transport.TransportService + +private val log = LogManager.getLogger(TransportSearchMonitorAction::class.java) + +class TransportSearchMonitorAction @Inject constructor( + transportService: TransportService, + val settings: Settings, + val client: Client, + clusterService: ClusterService, + actionFilters: ActionFilters, + val namedWriteableRegistry: NamedWriteableRegistry +) : HandledTransportAction( + AlertingActions.SEARCH_MONITORS_ACTION_NAME, transportService, actionFilters, ::SearchMonitorRequest +), + SecureTransportAction { + @Volatile + override var filterByEnabled: Boolean = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) + init { + listenFilterBySettingChange(clusterService) + } + + override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { + val transformedRequest = request as? SearchMonitorRequest + ?: recreateObject(request, namedWriteableRegistry) { + SearchMonitorRequest(it) + } + + val searchSourceBuilder = transformedRequest.searchRequest.source() + .seqNoAndPrimaryTerm(true) + .version(true) + val queryBuilder = if (searchSourceBuilder.query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(searchSourceBuilder.query()) + + // The SearchMonitor API supports one 'index' parameter of either the SCHEDULED_JOBS_INDEX or ALL_ALERT_INDEX_PATTERN. + // When querying the ALL_ALERT_INDEX_PATTERN, we don't want to check whether the MONITOR_TYPE field exists + // because we're querying alert indexes. + if (transformedRequest.searchRequest.indices().contains(ScheduledJob.SCHEDULED_JOBS_INDEX)) { + val monitorWorkflowType = QueryBuilders.boolQuery().should(QueryBuilders.existsQuery(Monitor.MONITOR_TYPE)) + .should(QueryBuilders.existsQuery(Workflow.WORKFLOW_TYPE)) + queryBuilder.must(monitorWorkflowType) + } + + searchSourceBuilder.query(queryBuilder) + .seqNoAndPrimaryTerm(true) + .version(true) + addOwnerFieldIfNotExists(transformedRequest.searchRequest) + val user = readUserFromThreadContext(client) + client.threadPool().threadContext.stashContext().use { + resolve(transformedRequest, actionListener, user) + } + } + + fun resolve(searchMonitorRequest: SearchMonitorRequest, actionListener: ActionListener, user: User?) { + if (user == null) { + // user header is null when: 1/ security is disabled. 2/ when user is super-admin. + search(searchMonitorRequest.searchRequest, actionListener) + } else if (!doFilterForUser(user)) { + // security is enabled and filterby is disabled. + search(searchMonitorRequest.searchRequest, actionListener) + } else { + // security is enabled and filterby is enabled. + log.info("Filtering result by: ${user.backendRoles}") + addFilter(user, searchMonitorRequest.searchRequest.source(), "monitor.user.backend_roles.keyword") + search(searchMonitorRequest.searchRequest, actionListener) + } + } + + fun search(searchRequest: SearchRequest, actionListener: ActionListener) { + client.search( + searchRequest, + object : ActionListener { + override fun onResponse(response: SearchResponse) { + actionListener.onResponse(response) + } + + override fun onFailure(t: Exception) { + actionListener.onFailure(AlertingException.wrap(t)) + } + } + ) + } + + private fun addOwnerFieldIfNotExists(searchRequest: SearchRequest) { + if (searchRequest.source().query() == null || searchRequest.source().query().toString().contains("monitor.owner") == false) { + var boolQueryBuilder: BoolQueryBuilder = if (searchRequest.source().query() == null) BoolQueryBuilder() + else QueryBuilders.boolQuery().must(searchRequest.source().query()) + val bqb = BoolQueryBuilder() + bqb.should().add(BoolQueryBuilder().mustNot(ExistsQueryBuilder("monitor.owner"))) + bqb.should().add(BoolQueryBuilder().must(MatchQueryBuilder("monitor.owner", "alerting"))) + boolQueryBuilder.filter(bqb) + searchRequest.source().query(boolQueryBuilder) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt new file mode 100644 index 000000000..c0e215000 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.parsers + +import org.opensearch.alerting.triggercondition.resolvers.TriggerExpressionResolver + +interface ExpressionParser { + fun parse(): TriggerExpressionResolver +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt new file mode 100644 index 000000000..835e9b383 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt @@ -0,0 +1,53 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.parsers + +import org.opensearch.alerting.triggercondition.resolvers.TriggerExpressionRPNResolver +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator + +/** + * The postfix (Reverse Polish Notation) parser. + * Uses the Shunting-yard algorithm to parse a mathematical expression + * @param triggerExpression String containing the trigger expression for the monitor + */ +class TriggerExpressionParser( + triggerExpression: String +) : TriggerExpressionRPNBaseParser(triggerExpression) { + + override fun parse(): TriggerExpressionRPNResolver { + val expression = expressionToParse.replace(" ", "") + + val splitters = ArrayList() + TriggerExpressionOperator.values().forEach { splitters.add(it.value) } + + val breaks = ArrayList().apply { add(expression) } + for (s in splitters) { + val a = ArrayList() + for (ind in 0 until breaks.size) { + breaks[ind].let { + if (it.length > 1) { + a.addAll(breakString(breaks[ind], s)) + } else a.add(it) + } + } + breaks.clear() + breaks.addAll(a) + } + + return TriggerExpressionRPNResolver(convertInfixToPostfix(breaks)) + } + + private fun breakString(input: String, delimeter: String): ArrayList { + val tokens = input.split(delimeter) + val array = ArrayList() + for (t in tokens) { + array.add(t) + array.add(delimeter) + } + array.removeAt(array.size - 1) + return array + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt new file mode 100644 index 000000000..53cd5f0ac --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt @@ -0,0 +1,116 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.parsers + +import org.opensearch.alerting.triggercondition.tokens.ExpressionToken +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionConstant +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionToken +import java.util.Stack + +/** + * This is the abstract base class which holds the trigger expression parsing logic; + * using the Infix to Postfix a.k.a. Reverse Polish Notation (RPN) parser. + * It also uses the Shunting-Yard algorithm to parse the given trigger expression. + * + * @param expressionToParse Complete string containing the trigger expression + */ +abstract class TriggerExpressionRPNBaseParser( + protected val expressionToParse: String +) : ExpressionParser { + /** + * To perform the Infix-to-postfix conversion of the trigger expression + */ + protected fun convertInfixToPostfix(expTokens: List): ArrayList { + val expTokenStack = Stack() + val outputExpTokens = ArrayList() + + for (tokenString in expTokens) { + if (tokenString.isEmpty()) continue + when (val expToken = assignToken(tokenString)) { + is TriggerExpressionToken -> outputExpTokens.add(expToken) + is TriggerExpressionOperator -> { + when (expToken) { + TriggerExpressionOperator.PAR_LEFT -> expTokenStack.push(expToken) + TriggerExpressionOperator.PAR_RIGHT -> { + var topExpToken = expTokenStack.popExpTokenOrNull() + while (topExpToken != null && topExpToken != TriggerExpressionOperator.PAR_LEFT) { + outputExpTokens.add(topExpToken) + topExpToken = expTokenStack.popExpTokenOrNull() + } + if (topExpToken != TriggerExpressionOperator.PAR_LEFT) { + throw java.lang.IllegalArgumentException("No matching left parenthesis.") + } + } + else -> { + var op2 = expTokenStack.peekExpTokenOrNull() + while (op2 != null) { + val c = expToken.precedence.compareTo(op2.precedence) + if (c < 0 || !expToken.rightAssociative && c <= 0) { + outputExpTokens.add(expTokenStack.pop()) + } else { + break + } + op2 = expTokenStack.peekExpTokenOrNull() + } + expTokenStack.push(expToken) + } + } + } + } + } + + while (!expTokenStack.isEmpty()) { + expTokenStack.peekExpTokenOrNull()?.let { + if (it == TriggerExpressionOperator.PAR_LEFT) { + throw java.lang.IllegalArgumentException("No matching right parenthesis.") + } + } + val top = expTokenStack.pop() + outputExpTokens.add(top) + } + + return outputExpTokens + } + + /** + * Looks up and maps the expression token that matches the string version of that expression unit + */ + private fun assignToken(tokenString: String): ExpressionToken { + // Check "query" string in trigger expression such as in 'query[name="abc"]' + if (tokenString.startsWith(TriggerExpressionConstant.ConstantType.QUERY.ident)) { + return TriggerExpressionToken(tokenString) + } + + // Check operators in trigger expression such as in [&&, ||, !] + for (op in TriggerExpressionOperator.values()) { + if (op.value == tokenString) return op + } + + // Check any constants in trigger expression such as in ["name, "id", "tag", [", "]", "="] + for (con in TriggerExpressionConstant.ConstantType.values()) { + if (tokenString == con.ident) return TriggerExpressionConstant(con) + } + + throw IllegalArgumentException("Error while processing the trigger expression '$tokenString'") + } + + private inline fun Stack.popExpTokenOrNull(): T? { + return try { + pop() as T + } catch (e: java.lang.Exception) { + null + } + } + + private inline fun Stack.peekExpTokenOrNull(): T? { + return try { + peek() as T + } catch (e: java.lang.Exception) { + null + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt new file mode 100644 index 000000000..2a3e6c1ff --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt @@ -0,0 +1,32 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.resolvers + +sealed class TriggerExpression { + + fun resolve(): Set = when (this) { + is And -> resolveAnd(docSet1, docSet2) + is Or -> resolveOr(docSet1, docSet2) + is Not -> resolveNot(allDocs, docSet2) + } + + private fun resolveAnd(documentSet1: Set, documentSet2: Set): Set { + return documentSet1.intersect(documentSet2) + } + + private fun resolveOr(documentSet1: Set, documentSet2: Set): Set { + return documentSet1.union(documentSet2) + } + + private fun resolveNot(allDocs: Set, documentSet2: Set): Set { + return allDocs.subtract(documentSet2) + } + + // Operators implemented as operator functions + class And(val docSet1: Set, val docSet2: Set) : TriggerExpression() + class Or(val docSet1: Set, val docSet2: Set) : TriggerExpression() + class Not(val allDocs: Set, val docSet2: Set) : TriggerExpression() +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt new file mode 100644 index 000000000..45937c8ab --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt @@ -0,0 +1,103 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.resolvers + +import org.opensearch.alerting.triggercondition.tokens.ExpressionToken +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionConstant +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator +import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionToken +import org.opensearch.commons.alerting.model.DocLevelQuery +import java.util.Optional +import java.util.Stack + +/** + * Solves the Trigger Expression using the Reverse Polish Notation (RPN) based solver + * @param polishNotation an array of expression tokens organized in the RPN order + */ +class TriggerExpressionRPNResolver( + private val polishNotation: ArrayList +) : TriggerExpressionResolver { + + private val eqString by lazy { + val stringBuilder = StringBuilder() + for (expToken in polishNotation) { + when (expToken) { + is TriggerExpressionToken -> stringBuilder.append(expToken.value) + is TriggerExpressionOperator -> stringBuilder.append(expToken.value) + is TriggerExpressionConstant -> stringBuilder.append(expToken.type.ident) + else -> throw Exception() + } + stringBuilder.append(" ") + } + stringBuilder.toString() + } + + override fun toString(): String = eqString + + /** + * Evaluates the trigger expression expressed provided in form of the RPN token array. + * @param queryToDocIds Map to hold the resultant document id per query id + * @return evaluates the final set of document id + */ + override fun evaluate(queryToDocIds: Map>): Set { + val tokenStack = Stack>() + + val allDocIds = mutableSetOf() + for (value in queryToDocIds.values) { + allDocIds.addAll(value) + } + + for (expToken in polishNotation) { + when (expToken) { + is TriggerExpressionToken -> tokenStack.push(resolveQueryExpression(expToken.value, queryToDocIds)) + is TriggerExpressionOperator -> { + val right = tokenStack.pop() + val expr = when (expToken) { + TriggerExpressionOperator.AND -> TriggerExpression.And(tokenStack.pop(), right) + TriggerExpressionOperator.OR -> TriggerExpression.Or(tokenStack.pop(), right) + TriggerExpressionOperator.NOT -> TriggerExpression.Not(allDocIds, right) + else -> throw IllegalArgumentException("No matching operator.") + } + tokenStack.push(expr.resolve()) + } + } + } + return tokenStack.pop() + } + + private fun resolveQueryExpression(queryExpString: String, queryToDocIds: Map>): Set { + if (!queryExpString.startsWith(TriggerExpressionConstant.ConstantType.QUERY.ident)) return emptySet() + val token = queryExpString.substringAfter(TriggerExpressionConstant.ConstantType.BRACKET_LEFT.ident) + .substringBefore(TriggerExpressionConstant.ConstantType.BRACKET_RIGHT.ident) + if (token.isEmpty()) return emptySet() + + val tokens = token.split(TriggerExpressionConstant.ConstantType.EQUALS.ident) + if (tokens.isEmpty() || tokens.size != 2) return emptySet() + + val identifier = tokens[0] + val value = tokens[1] + val documents = mutableSetOf() + when (identifier) { + TriggerExpressionConstant.ConstantType.NAME.ident -> { + val key: Optional = queryToDocIds.keys.stream().filter { it.name == value }.findFirst() + if (key.isPresent) queryToDocIds[key.get()]?.let { doc -> documents.addAll(doc) } + } + + TriggerExpressionConstant.ConstantType.ID.ident -> { + val key: Optional = queryToDocIds.keys.stream().filter { it.id == value }.findFirst() + if (key.isPresent) queryToDocIds[key.get()]?.let { doc -> documents.addAll(doc) } + } + + // Iterate through all the queries with the same Tag + TriggerExpressionConstant.ConstantType.TAG.ident -> { + queryToDocIds.keys.stream().forEach { + if (it.tags.contains(value)) queryToDocIds[it]?.let { it1 -> documents.addAll(it1) } + } + } + } + return documents + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt new file mode 100644 index 000000000..fea22c356 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt @@ -0,0 +1,12 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.resolvers + +import org.opensearch.commons.alerting.model.DocLevelQuery + +interface TriggerExpressionResolver { + fun evaluate(queryToDocIds: Map>): Set +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt new file mode 100644 index 000000000..2085bf2d3 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt @@ -0,0 +1,8 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +interface ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt new file mode 100644 index 000000000..80e662a21 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt @@ -0,0 +1,26 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +/** + * To define all the tokens which could be part of expression constant such as query[id=new_id], query[name=new_name], + * query[tag=new_tag] + */ +class TriggerExpressionConstant(val type: ConstantType) : ExpressionToken { + + enum class ConstantType(val ident: String) { + QUERY("query"), + + TAG("tag"), + NAME("name"), + ID("id"), + + BRACKET_LEFT("["), + BRACKET_RIGHT("]"), + + EQUALS("=") + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt new file mode 100644 index 000000000..de3c4a0df --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt @@ -0,0 +1,20 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +/** + * To define all the operators used in the trigger expression + */ +enum class TriggerExpressionOperator(val value: String, val precedence: Int, val rightAssociative: Boolean) : ExpressionToken { + + AND("&&", 2, false), + OR("||", 2, false), + + NOT("!", 3, true), + + PAR_LEFT("(", 1, false), + PAR_RIGHT(")", 1, false) +} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt new file mode 100644 index 000000000..808f7737d --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt @@ -0,0 +1,11 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggercondition.tokens + +/** + * To define the tokens in Trigger expression such as query[tag=“sev1"] or query[name=“sev1"] or query[id=“sev1"] + */ +internal data class TriggerExpressionToken(val value: String) : ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt b/alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt new file mode 100644 index 000000000..e1b6675b2 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt @@ -0,0 +1,114 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.TriggerAfterKey +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.search.aggregations.AggregationBuilder +import org.opensearch.search.aggregations.AggregatorFactories +import org.opensearch.search.aggregations.bucket.SingleBucketAggregation +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregation +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.support.AggregationPath +import org.opensearch.search.builder.SearchSourceBuilder + +class AggregationQueryRewriter { + + companion object { + /** + * Add the bucket selector conditions for each trigger in input query. It also adds afterKeys from previous result + * for each trigger. + */ + fun rewriteQuery(query: SearchSourceBuilder, prevResult: InputRunResults?, triggers: List): SearchSourceBuilder { + triggers.forEach { trigger -> + if (trigger is BucketLevelTrigger) { + // add bucket selector pipeline aggregation for each trigger in query + query.aggregation(trigger.bucketSelector) + // if this request is processing the subsequent pages of input query result, then add after key + if (prevResult?.aggTriggersAfterKey?.get(trigger.id) != null) { + val parentBucketPath = AggregationPath.parse(trigger.bucketSelector.parentBucketPath) + var aggBuilders = (query.aggregations() as AggregatorFactories.Builder).aggregatorFactories + var factory: AggregationBuilder? = null + for (i in 0 until parentBucketPath.pathElements.size) { + factory = null + for (aggFactory in aggBuilders) { + if (aggFactory.name.equals(parentBucketPath.pathElements[i].name)) { + aggBuilders = aggFactory.subAggregations + factory = aggFactory + break + } + } + if (factory == null) { + throw IllegalArgumentException("ParentBucketPath: $parentBucketPath not found in input query results") + } + } + if (factory is CompositeAggregationBuilder) { + // if the afterKey from previous result is null, what does it signify? + // A) result set exhausted OR B) first page ? + val afterKey = prevResult.aggTriggersAfterKey[trigger.id]!!.afterKey + factory.aggregateAfter(afterKey) + } else { + throw IllegalStateException("AfterKeys are not expected to be present in non CompositeAggregationBuilder") + } + } + } + } + + return query + } + + /** + * For each trigger, returns the after keys if present in query result. + */ + fun getAfterKeysFromSearchResponse( + searchResponse: SearchResponse, + triggers: List, + prevBucketLevelTriggerAfterKeys: Map? + ): Map { + val bucketLevelTriggerAfterKeys = mutableMapOf() + triggers.forEach { trigger -> + if (trigger is BucketLevelTrigger) { + val parentBucketPath = AggregationPath.parse(trigger.bucketSelector.parentBucketPath) + var aggs = searchResponse.aggregations + // assuming all intermediate aggregations as SingleBucketAggregation + for (i in 0 until parentBucketPath.pathElements.size - 1) { + aggs = (aggs.asMap()[parentBucketPath.pathElements[i].name] as SingleBucketAggregation).aggregations + } + val lastAgg = aggs.asMap[parentBucketPath.pathElements.last().name] + // if leaf is CompositeAggregation, then fetch afterKey if present + if (lastAgg is CompositeAggregation) { + /* + * Bucket-Level Triggers can have different parent bucket paths that they are tracking for condition evaluation. + * These different bucket paths could have different page sizes, meaning one could be exhausted while another + * bucket path still has pages to iterate in the query responses. + * + * To ensure that these can be exhausted and tracked independently, the after key that led to the last page (which + * should be an empty result for the bucket path) will be saved when the last page is hit and will be continued + * to be passed on for that bucket path if there are still other bucket paths being paginated. + */ + val afterKey = lastAgg.afterKey() + val prevTriggerAfterKey = prevBucketLevelTriggerAfterKeys?.get(trigger.id) + bucketLevelTriggerAfterKeys[trigger.id] = when { + // If the previous TriggerAfterKey was null, this should be the first page + prevTriggerAfterKey == null -> TriggerAfterKey(afterKey, afterKey == null) + // If the previous TriggerAfterKey already hit the last page, pass along the after key it used to get there + prevTriggerAfterKey.lastPage -> prevTriggerAfterKey + // If the previous TriggerAfterKey had not reached the last page and the after key for the current result + // is null, then the last page has been reached so the after key that was used to get there is stored + afterKey == null -> TriggerAfterKey(prevTriggerAfterKey.afterKey, true) + // Otherwise, update the after key to the current one + else -> TriggerAfterKey(afterKey, false) + } + } + } + } + return bucketLevelTriggerAfterKeys + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt b/alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt new file mode 100644 index 000000000..4127afaa2 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt @@ -0,0 +1,73 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchException +import org.opensearch.OpenSearchSecurityException +import org.opensearch.OpenSearchStatusException +import org.opensearch.core.common.Strings +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.IndexNotFoundException +import org.opensearch.index.engine.VersionConflictEngineException +import org.opensearch.indices.InvalidIndexNameException + +private val log = LogManager.getLogger(AlertingException::class.java) + +/** + * Converts into a user friendly message. + */ +class AlertingException(message: String, val status: RestStatus, ex: Exception) : OpenSearchException(message, ex) { + + override fun status(): RestStatus { + return status + } + + companion object { + @JvmStatic + fun wrap(ex: Exception): OpenSearchException { + log.error("Alerting error: $ex") + + var friendlyMsg = "Unknown error" + var status = RestStatus.INTERNAL_SERVER_ERROR + when (ex) { + is IndexNotFoundException -> { + status = ex.status() + friendlyMsg = "Configured indices are not found: ${ex.index}" + } + is OpenSearchSecurityException -> { + status = ex.status() + friendlyMsg = "User doesn't have permissions to execute this action. Contact administrator." + } + is OpenSearchStatusException -> { + status = ex.status() + friendlyMsg = ex.message as String + } + is IllegalArgumentException -> { + status = RestStatus.BAD_REQUEST + friendlyMsg = ex.message as String + } + is VersionConflictEngineException -> { + status = ex.status() + friendlyMsg = ex.message as String + } + is InvalidIndexNameException -> { + status = RestStatus.BAD_REQUEST + friendlyMsg = ex.message as String + } + else -> { + if (!Strings.isNullOrEmpty(ex.message)) { + friendlyMsg = ex.message as String + } + } + } + // Wrapping the origin exception as runtime to avoid it being formatted. + // Currently, alerting-kibana is using `error.root_cause.reason` as text in the toast message. + // Below logic is to set friendly message to error.root_cause.reason. + return AlertingException(friendlyMsg, status, Exception("${ex.javaClass.name}: ${ex.message}")) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt new file mode 100644 index 000000000..33911b216 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt @@ -0,0 +1,141 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.settings.DestinationSettings +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.ActionExecutionScope +import org.opensearch.commons.alerting.util.isBucketLevelMonitor + +private val logger = LogManager.getLogger("AlertingUtils") + +val MAX_SEARCH_SIZE = 10000 + +/** + * RFC 5322 compliant pattern matching: https://www.ietf.org/rfc/rfc5322.txt + * Regex was based off of this post: https://stackoverflow.com/a/201378 + */ +fun isValidEmail(email: String): Boolean { + val validEmailPattern = Regex( + "(?:[a-z0-9!#\$%&'*+\\/=?^_`{|}~-]+(?:\\.[a-z0-9!#\$%&'*+\\/=?^_`{|}~-]+)*" + + "|\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\")" + + "@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?" + + "|\\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\\.){3}" + + "(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:" + + "(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])", + RegexOption.IGNORE_CASE + ) + + return validEmailPattern.matches(email) +} + +fun getRoleFilterEnabled(clusterService: ClusterService, settings: Settings, settingPath: String): Boolean { + var adBackendRoleFilterEnabled: Boolean + val metaData = clusterService.state().metadata() + + // get default value for setting + if (clusterService.clusterSettings.get(settingPath) != null) { + adBackendRoleFilterEnabled = clusterService.clusterSettings.get(settingPath).getDefault(settings) as Boolean + } else { + // default setting doesn't exist, so returning false as it means AD plugins isn't in cluster anyway + return false + } + + // Transient settings are prioritized so those are checked first. + return if (metaData.transientSettings().get(settingPath) != null) { + metaData.transientSettings().getAsBoolean(settingPath, adBackendRoleFilterEnabled) + } else if (metaData.persistentSettings().get(settingPath) != null) { + metaData.persistentSettings().getAsBoolean(settingPath, adBackendRoleFilterEnabled) + } else { + adBackendRoleFilterEnabled + } +} + +/** Allowed Destinations are ones that are specified in the [DestinationSettings.ALLOW_LIST] setting. */ +fun Destination.isAllowed(allowList: List): Boolean = allowList.contains(this.type.value) + +fun Destination.isTestAction(): Boolean = this.type == DestinationType.TEST_ACTION + +fun Monitor.isDocLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR + +fun Monitor.isQueryLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.QUERY_LEVEL_MONITOR + +/** + * Since buckets can have multi-value keys, this converts the bucket key values to a string that can be used + * as the key for a HashMap to easily retrieve [AggregationResultBucket] based on the bucket key values. + */ +fun AggregationResultBucket.getBucketKeysHash(): String = this.bucketKeys.joinToString(separator = "#") + +fun Action.getActionExecutionPolicy(monitor: Monitor): ActionExecutionPolicy? { + // When the ActionExecutionPolicy is null for an Action, the default is resolved at runtime + // so it can be chosen based on the Monitor type at that time. + // The Action config is not aware of the Monitor type which is why the default was not stored during + // the parse. + return this.actionExecutionPolicy ?: if (monitor.isBucketLevelMonitor()) { + ActionExecutionPolicy.getDefaultConfigurationForBucketLevelMonitor() + } else if (monitor.isDocLevelMonitor()) { + ActionExecutionPolicy.getDefaultConfigurationForDocumentLevelMonitor() + } else { + null + } +} + +fun BucketLevelTriggerRunResult.getCombinedTriggerRunResult( + prevTriggerRunResult: BucketLevelTriggerRunResult? +): BucketLevelTriggerRunResult { + if (prevTriggerRunResult == null) return this + + // The aggregation results and action results across to two trigger run results should not have overlapping keys + // since they represent different pages of aggregations so a simple concatenation will combine them + val mergedAggregationResultBuckets = prevTriggerRunResult.aggregationResultBuckets + this.aggregationResultBuckets + val mergedActionResultsMap = (prevTriggerRunResult.actionResultsMap + this.actionResultsMap).toMutableMap() + + // Update to the most recent error if it's not null, otherwise keep the old one + val error = this.error ?: prevTriggerRunResult.error + + return this.copy(aggregationResultBuckets = mergedAggregationResultBuckets, actionResultsMap = mergedActionResultsMap, error = error) +} + +fun defaultToPerExecutionAction( + maxActionableAlertCount: Long, + monitorId: String, + triggerId: String, + totalActionableAlertCount: Int, + monitorOrTriggerError: Exception? +): Boolean { + // If the monitorId or triggerResult has an error, then also default to PER_EXECUTION to communicate the error + if (monitorOrTriggerError != null) { + logger.debug( + "Trigger [$triggerId] in monitor [$monitorId] encountered an error. Defaulting to " + + "[${ActionExecutionScope.Type.PER_EXECUTION}] for action execution to communicate error." + ) + return true + } + + // If the MAX_ACTIONABLE_ALERT_COUNT is set to -1, consider it unbounded and proceed regardless of actionable Alert count + if (maxActionableAlertCount < 0) return false + + // If the total number of Alerts to execute Actions on exceeds the MAX_ACTIONABLE_ALERT_COUNT setting then default to + // PER_EXECUTION for less intrusive Actions + if (totalActionableAlertCount > maxActionableAlertCount) { + logger.debug( + "The total actionable alerts for trigger [$triggerId] in monitor [$monitorId] is [$totalActionableAlertCount] " + + "which exceeds the maximum of [$maxActionableAlertCount]. " + + "Defaulting to [${ActionExecutionScope.Type.PER_EXECUTION}] for action execution." + ) + return true + } + + return false +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt new file mode 100644 index 000000000..e83f45a15 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt @@ -0,0 +1,68 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.apache.lucene.search.join.ScoreMode +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.authuser.User +import org.opensearch.core.common.Strings +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.NestedQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder + +/** + * AD monitor is search input monitor on top of anomaly result index. This method will return + * true if monitor input only contains anomaly result index. + */ +fun isADMonitor(monitor: Monitor): Boolean { + // If monitor has other input than AD result index, it's not AD monitor + if (monitor.inputs.size != 1) { + return false + } + val input = monitor.inputs[0] + // AD monitor can only have 1 anomaly result index. + if (input is SearchInput && input.indices.size == 1 && input.indices[0] == ".opendistro-anomaly-results*") { + return true + } + return false +} + +fun addUserBackendRolesFilter(user: User?, searchSourceBuilder: SearchSourceBuilder): SearchSourceBuilder { + var boolQueryBuilder = BoolQueryBuilder() + val userFieldName = "user" + val userBackendRoleFieldName = "user.backend_roles.keyword" + if (user == null || Strings.isEmpty(user.name)) { + // For 1) old monitor and detector 2) security disabled or superadmin access, they have no/empty user field + val userRolesFilterQuery = QueryBuilders.existsQuery(userFieldName) + val nestedQueryBuilder = NestedQueryBuilder(userFieldName, userRolesFilterQuery, ScoreMode.None) + boolQueryBuilder.mustNot(nestedQueryBuilder) + } else if (user.backendRoles.isNullOrEmpty()) { + // For simple FGAC user, they may have no backend roles, these users should be able to see detectors + // of other users whose backend role is empty. + val userRolesFilterQuery = QueryBuilders.existsQuery(userBackendRoleFieldName) + val nestedQueryBuilder = NestedQueryBuilder(userFieldName, userRolesFilterQuery, ScoreMode.None) + + val userExistsQuery = QueryBuilders.existsQuery(userFieldName) + val userExistsNestedQueryBuilder = NestedQueryBuilder(userFieldName, userExistsQuery, ScoreMode.None) + + boolQueryBuilder.mustNot(nestedQueryBuilder) + boolQueryBuilder.must(userExistsNestedQueryBuilder) + } else { + // For normal case, user should have backend roles. + val userRolesFilterQuery = QueryBuilders.termsQuery(userBackendRoleFieldName, user.backendRoles) + val nestedQueryBuilder = NestedQueryBuilder(userFieldName, userRolesFilterQuery, ScoreMode.None) + boolQueryBuilder.must(nestedQueryBuilder) + } + val query = searchSourceBuilder.query() + if (query == null) { + searchSourceBuilder.query(boolQueryBuilder) + } else { + (query as BoolQueryBuilder).filter(boolQueryBuilder) + } + return searchSourceBuilder +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt b/alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt new file mode 100644 index 000000000..d714288f8 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt @@ -0,0 +1,18 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +enum class DestinationType(val value: String) { + CHIME("chime"), + SLACK("slack"), + CUSTOM_WEBHOOK("custom_webhook"), + EMAIL("email"), + TEST_ACTION("test_action"); + + override fun toString(): String { + return value + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt b/alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt new file mode 100644 index 000000000..0a98a139d --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt @@ -0,0 +1,608 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.apache.logging.log4j.LogManager +import org.opensearch.ExceptionsHelper +import org.opensearch.OpenSearchStatusException +import org.opensearch.ResourceAlreadyExistsException +import org.opensearch.action.admin.indices.alias.Alias +import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.admin.indices.rollover.RolloverRequest +import org.opensearch.action.admin.indices.rollover.RolloverResponse +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.support.WriteRequest.RefreshPolicy +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.MonitorRunnerService.monitorCtx +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.client.Client +import org.opensearch.cluster.ClusterState +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING + +private val log = LogManager.getLogger(DocLevelMonitorQueries::class.java) + +class DocLevelMonitorQueries(private val client: Client, private val clusterService: ClusterService) { + companion object { + + const val PROPERTIES = "properties" + const val NESTED = "nested" + const val TYPE = "type" + const val INDEX_PATTERN_SUFFIX = "-000001" + const val QUERY_INDEX_BASE_FIELDS_COUNT = 8 // 3 fields we defined and 5 builtin additional metadata fields + @JvmStatic + fun docLevelQueriesMappings(): String { + return DocLevelMonitorQueries::class.java.classLoader.getResource("mappings/doc-level-queries.json").readText() + } + fun docLevelQueriesSettings(): Settings { + return Settings.builder().loadFromSource( + DocLevelMonitorQueries::class.java.classLoader.getResource("settings/doc-level-queries.json").readText(), + XContentType.JSON + ).build() + } + } + + suspend fun initDocLevelQueryIndex(): Boolean { + if (!docLevelQueryIndexExists()) { + // Since we changed queryIndex to be alias now, for backwards compatibility, we have to delete index with same name + // as our alias, to avoid name clash. + if (clusterService.state().metadata.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX)) { + val acknowledgedResponse: AcknowledgedResponse = client.suspendUntil { + admin().indices().delete(DeleteIndexRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX), it) + } + if (!acknowledgedResponse.isAcknowledged) { + val errorMessage = "Deletion of old queryIndex [${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}] index is not acknowledged!" + log.error(errorMessage) + throw AlertingException.wrap(OpenSearchStatusException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR)) + } + } + val alias = ScheduledJob.DOC_LEVEL_QUERIES_INDEX + val indexPattern = ScheduledJob.DOC_LEVEL_QUERIES_INDEX + INDEX_PATTERN_SUFFIX + val indexRequest = CreateIndexRequest(indexPattern) + .mapping(docLevelQueriesMappings()) + .alias(Alias(alias)) + .settings(docLevelQueriesSettings()) + return try { + val createIndexResponse: CreateIndexResponse = client.suspendUntil { client.admin().indices().create(indexRequest, it) } + createIndexResponse.isAcknowledged + } catch (t: Exception) { + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + true + } else { + throw t + } + } + } + return true + } + suspend fun initDocLevelQueryIndex(dataSources: DataSources): Boolean { + if (dataSources.queryIndex == ScheduledJob.DOC_LEVEL_QUERIES_INDEX) { + return initDocLevelQueryIndex() + } + // Since we changed queryIndex to be alias now, for backwards compatibility, we have to delete index with same name + // as our alias, to avoid name clash. + if (clusterService.state().metadata.hasIndex(dataSources.queryIndex)) { + val acknowledgedResponse: AcknowledgedResponse = client.suspendUntil { + admin().indices().delete(DeleteIndexRequest(dataSources.queryIndex), it) + } + if (!acknowledgedResponse.isAcknowledged) { + log.warn("Deletion of old queryIndex [${dataSources.queryIndex}] index is not acknowledged!") + } + } + val alias = dataSources.queryIndex + val indexPattern = dataSources.queryIndex + INDEX_PATTERN_SUFFIX + if (!clusterService.state().metadata.hasAlias(alias)) { + val indexRequest = CreateIndexRequest(indexPattern) + .mapping(docLevelQueriesMappings()) + .alias(Alias(alias)) + .settings( + Settings.builder().put("index.hidden", true) + .build() + ) + return try { + val createIndexResponse: CreateIndexResponse = client.suspendUntil { client.admin().indices().create(indexRequest, it) } + createIndexResponse.isAcknowledged + } catch (t: Exception) { + if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { + true + } else { + throw t + } + } + } + return true + } + + fun docLevelQueryIndexExists(dataSources: DataSources): Boolean { + val clusterState = clusterService.state() + return clusterState.metadata.hasAlias(dataSources.queryIndex) + } + + fun docLevelQueryIndexExists(): Boolean { + val clusterState = clusterService.state() + return clusterState.metadata.hasAlias(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) + } + + /** + * Does a DFS traversal of index mappings tree. + * Calls processLeafFn on every leaf node. + * Populates flattenPaths list with full paths of leaf nodes + * @param node current node which we're visiting + * @param currentPath current node path from root node + * @param processLeafFn leaf processor function which is called on every leaf discovered + * @param flattenPaths list of full paths of all leaf nodes relative to root + */ + fun traverseMappingsAndUpdate( + node: MutableMap, + currentPath: String, + processLeafFn: (String, String, MutableMap) -> Triple>, + flattenPaths: MutableMap> + ) { + // If node contains "properties" property then it is internal(non-leaf) node + log.debug("Node in traverse: $node") + // newNodes will hold list of updated leaf properties + var newNodes = ArrayList>(node.size) + node.entries.forEach { + // Compute full path relative to root + val fullPath = if (currentPath.isEmpty()) it.key + else "$currentPath.${it.key}" + val nodeProps = it.value as MutableMap + // If it has type property and type is not "nested" then this is a leaf + if (nodeProps.containsKey(TYPE) && nodeProps[TYPE] != NESTED) { + // At this point we know full path of node, so we add it to output array + flattenPaths.put(fullPath, nodeProps) + // Calls processLeafFn and gets old node name, new node name and new properties of node. + // This is all information we need to update this node + val (oldName, newName, props) = processLeafFn(it.key, fullPath, it.value as MutableMap) + newNodes.add(Triple(oldName, newName, props)) + } else { + // Internal(non-leaf) node - visit children + traverseMappingsAndUpdate(nodeProps[PROPERTIES] as MutableMap, fullPath, processLeafFn, flattenPaths) + } + } + // Here we can update all processed leaves in tree + newNodes.forEach { + // If we renamed leaf, we have to remove it first + if (it.first != it.second) { + node.remove(it.first) + } + // Put new properties of leaf + node.put(it.second, it.third) + } + } + + suspend fun indexDocLevelQueries( + monitor: Monitor, + monitorId: String, + monitorMetadata: MonitorMetadata, + refreshPolicy: RefreshPolicy = RefreshPolicy.IMMEDIATE, + indexTimeout: TimeValue + ) { + val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput + val queries: List = docLevelMonitorInput.queries + + val indices = docLevelMonitorInput.indices + val clusterState = clusterService.state() + + // Run through each backing index and apply appropriate mappings to query index + indices.forEach { indexName -> + var concreteIndices = IndexUtils.resolveAllIndices( + listOf(indexName), + monitorCtx.clusterService!!, + monitorCtx.indexNameExpressionResolver!! + ) + if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || + IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) + ) { + val lastWriteIndex = concreteIndices.find { monitorMetadata.lastRunContext.containsKey(it) } + if (lastWriteIndex != null) { + val lastWriteIndexCreationDate = + IndexUtils.getCreationDateForIndex(lastWriteIndex, monitorCtx.clusterService!!.state()) + concreteIndices = IndexUtils.getNewestIndicesByCreationDate( + concreteIndices, + monitorCtx.clusterService!!.state(), + lastWriteIndexCreationDate + ) + } + } + val updatedIndexName = indexName.replace("*", "_") + val updatedProperties = mutableMapOf() + val allFlattenPaths = mutableSetOf>() + var sourceIndexFieldLimit = 0L + val conflictingFields = getAllConflictingFields(clusterState, concreteIndices) + + concreteIndices.forEach { concreteIndexName -> + if (clusterState.routingTable.hasIndex(concreteIndexName)) { + val indexMetadata = clusterState.metadata.index(concreteIndexName) + if (indexMetadata.mapping()?.sourceAsMap?.get("properties") != null) { + val properties = ( + (indexMetadata.mapping()?.sourceAsMap?.get("properties")) + as MutableMap + ) + // Node processor function is used to process leaves of index mappings tree + // + val leafNodeProcessor = + fun(fieldName: String, fullPath: String, props: MutableMap): + Triple> { + val newProps = props.toMutableMap() + if (monitor.dataSources.queryIndexMappingsByType.isNotEmpty()) { + val mappingsByType = monitor.dataSources.queryIndexMappingsByType + if (props.containsKey("type") && mappingsByType.containsKey(props["type"]!!)) { + mappingsByType[props["type"]]?.entries?.forEach { iter: Map.Entry -> + newProps[iter.key] = iter.value + } + } + } + + return if (conflictingFields.contains(fullPath)) { + if (props.containsKey("path")) { + newProps["path"] = "${props["path"]}_${concreteIndexName}_$monitorId" + } + Triple(fieldName, "${fieldName}_${concreteIndexName}_$monitorId", newProps) + } else { + if (props.containsKey("path")) { + newProps["path"] = "${props["path"]}_${updatedIndexName}_$monitorId" + } + Triple(fieldName, "${fieldName}_${updatedIndexName}_$monitorId", newProps) + } + } + // Traverse and update index mappings here while extracting flatten field paths + val flattenPaths = mutableMapOf>() + traverseMappingsAndUpdate(properties, "", leafNodeProcessor, flattenPaths) + flattenPaths.keys.forEach { allFlattenPaths.add(Pair(it, concreteIndexName)) } + // Updated mappings ready to be applied on queryIndex + properties.forEach { + if ( + it.value is Map<*, *> && + (it.value as Map).containsKey("type") && + (it.value as Map)["type"] == NESTED + ) { + } else { + if (updatedProperties.containsKey(it.key) && updatedProperties[it.key] != it.value) { + val mergedField = mergeConflictingFields( + updatedProperties[it.key] as Map, + it.value as Map + ) + updatedProperties[it.key] = mergedField + } else { + updatedProperties[it.key] = it.value + } + } + } + sourceIndexFieldLimit += checkMaxFieldLimit(concreteIndexName) + } + } + } + // Updates mappings of concrete queryIndex. This can rollover queryIndex if field mapping limit is reached. + val (updateMappingResponse, concreteQueryIndex) = updateQueryIndexMappings( + monitor, + monitorMetadata, + updatedIndexName, + sourceIndexFieldLimit, + updatedProperties + ) + + if (updateMappingResponse.isAcknowledged) { + doIndexAllQueries( + concreteQueryIndex, + updatedIndexName, + monitorId, + queries, + allFlattenPaths, + conflictingFields, + refreshPolicy, + indexTimeout + ) + } + } + } + + private suspend fun doIndexAllQueries( + concreteQueryIndex: String, + sourceIndex: String, + monitorId: String, + queries: List, + flattenPaths: MutableSet>, + conflictingPaths: Set, + refreshPolicy: RefreshPolicy, + indexTimeout: TimeValue + ) { + val indexRequests = mutableListOf() + val conflictingPathToConcreteIndices = mutableMapOf>() + flattenPaths.forEach { fieldPath -> + if (conflictingPaths.contains(fieldPath.first)) { + if (conflictingPathToConcreteIndices.containsKey(fieldPath.first)) { + val concreteIndexSet = conflictingPathToConcreteIndices[fieldPath.first] + concreteIndexSet!!.add(fieldPath.second) + conflictingPathToConcreteIndices[fieldPath.first] = concreteIndexSet + } else { + val concreteIndexSet = mutableSetOf() + concreteIndexSet.add(fieldPath.second) + conflictingPathToConcreteIndices[fieldPath.first] = concreteIndexSet + } + } + } + + val newQueries = mutableListOf() + queries.forEach { + val filteredConcreteIndices = mutableSetOf() + var query = it.query + conflictingPaths.forEach { conflictingPath -> + if (query.contains(conflictingPath)) { + query = query.replace("$conflictingPath:", "${conflictingPath}__$monitorId:") + filteredConcreteIndices.addAll(conflictingPathToConcreteIndices[conflictingPath]!!) + } + } + + if (filteredConcreteIndices.isNotEmpty()) { + filteredConcreteIndices.forEach { filteredConcreteIndex -> + val newQuery = it.copy( + id = "${it.id}_$filteredConcreteIndex", + query = query.replace("", filteredConcreteIndex) + ) + newQueries.add(newQuery) + } + } else { + newQueries.add(it.copy(id = "${it.id}_$sourceIndex")) + } + } + + newQueries.forEach { + var query = it.query + flattenPaths.forEach { fieldPath -> + if (!conflictingPaths.contains(fieldPath.first)) { + query = query.replace("${fieldPath.first}:", "${fieldPath.first}_${sourceIndex}_$monitorId:") + } + } + val indexRequest = IndexRequest(concreteQueryIndex) + .id(it.id + "_$monitorId") + .source( + mapOf( + "query" to mapOf("query_string" to mapOf("query" to query, "fields" to it.fields)), + "monitor_id" to monitorId, + "index" to sourceIndex + ) + ) + indexRequests.add(indexRequest) + } + log.debug("bulk inserting percolate [${queries.size}] queries") + if (indexRequests.isNotEmpty()) { + val bulkResponse: BulkResponse = client.suspendUntil { + client.bulk( + BulkRequest().setRefreshPolicy(refreshPolicy).timeout(indexTimeout).add(indexRequests), + it + ) + } + bulkResponse.forEach { bulkItemResponse -> + if (bulkItemResponse.isFailed) { + log.debug(bulkItemResponse.failureMessage) + } + } + } + } + + private suspend fun updateQueryIndexMappings( + monitor: Monitor, + monitorMetadata: MonitorMetadata, + sourceIndex: String, + sourceIndexFieldLimit: Long, + updatedProperties: MutableMap + ): Pair { + var targetQueryIndex = monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] + if (targetQueryIndex == null) { + // queryIndex is alias which will always have only 1 backing index which is writeIndex + // This is due to a fact that that _rollover API would maintain only single index under alias + // if you don't add is_write_index setting when creating index initially + targetQueryIndex = getWriteIndexNameForAlias(monitor.dataSources.queryIndex) + if (targetQueryIndex == null) { + val message = "Failed to get write index for queryIndex alias:${monitor.dataSources.queryIndex}" + log.error(message) + throw AlertingException.wrap( + OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) + ) + } + monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] = targetQueryIndex + } + val updateMappingRequest = PutMappingRequest(targetQueryIndex) + updateMappingRequest.source(mapOf("properties" to updatedProperties)) + var updateMappingResponse = AcknowledgedResponse(false) + try { + // Adjust max field limit in mappings for query index, if needed. + adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit, targetQueryIndex) + updateMappingResponse = client.suspendUntil { + client.admin().indices().putMapping(updateMappingRequest, it) + } + return Pair(updateMappingResponse, targetQueryIndex) + } catch (e: Exception) { + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + log.debug("exception after rollover queryIndex index: $targetQueryIndex exception: ${unwrappedException.message}") + // If we reached limit for total number of fields in mappings, do a rollover here + if (unwrappedException.message?.contains("Limit of total fields") == true) { + try { + // Do queryIndex rollover + targetQueryIndex = rolloverQueryIndex(monitor) + // Adjust max field limit in mappings for new index. + adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit, targetQueryIndex) + // PUT mappings to newly created index + val updateMappingRequest = PutMappingRequest(targetQueryIndex) + updateMappingRequest.source(mapOf("properties" to updatedProperties)) + updateMappingResponse = client.suspendUntil { + client.admin().indices().putMapping(updateMappingRequest, it) + } + } catch (e: Exception) { + // If we reached limit for total number of fields in mappings after rollover + // it means that source index has more then (FIELD_LIMIT - 3) fields (every query index has 3 fields defined) + // TODO maybe split queries/mappings between multiple query indices? + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + log.debug("exception after rollover queryIndex index: $targetQueryIndex exception: ${unwrappedException.message}") + if (unwrappedException.message?.contains("Limit of total fields") == true) { + val errorMessage = + "Monitor [${monitorMetadata.monitorId}] can't process index [$sourceIndex] due to field mapping limit" + log.error(errorMessage) + throw AlertingException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR, e) + } else { + throw AlertingException.wrap(e) + } + } + } else { + log.debug("unknown exception during PUT mapping on queryIndex: $targetQueryIndex") + val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception + throw AlertingException.wrap(unwrappedException) + } + } + // We did rollover, so try to apply mappings again on new targetQueryIndex + if (targetQueryIndex.isNotEmpty()) { + // add newly created index to monitor's metadata object so that we can fetch it later on, when either applying mappings or running queries + monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] = targetQueryIndex + } else { + val failureMessage = "Failed to resolve targetQueryIndex!" + log.error(failureMessage) + throw AlertingException(failureMessage, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureMessage)) + } + return Pair(updateMappingResponse, targetQueryIndex) + } + + /** + * merge conflicting leaf fields in the mapping tree + */ + private fun mergeConflictingFields(oldField: Map, newField: Map): Map { + val mergedField = mutableMapOf() + oldField.entries.forEach { + if (newField.containsKey(it.key)) { + if (it.value is Map<*, *> && newField[it.key] is Map<*, *>) { + mergedField[it.key] = + mergeConflictingFields(it.value as Map, newField[it.key] as Map) + } else { + mergedField[it.key] = it.value + } + } else { + mergedField[it.key] = it.value + } + } + + newField.entries.forEach { + if (!oldField.containsKey(it.key)) { + mergedField[it.key] = it.value + } + } + return mergedField + } + + /** + * get all fields which have same name but different mappings belonging to an index pattern + */ + fun getAllConflictingFields(clusterState: ClusterState, concreteIndices: List): Set { + val conflictingFields = mutableSetOf() + val allFlattenPaths = mutableMapOf>() + concreteIndices.forEach { concreteIndexName -> + if (clusterState.routingTable.hasIndex(concreteIndexName)) { + val indexMetadata = clusterState.metadata.index(concreteIndexName) + if (indexMetadata.mapping()?.sourceAsMap?.get("properties") != null) { + val properties = ( + (indexMetadata.mapping()?.sourceAsMap?.get("properties")) + as MutableMap + ) + // Node processor function is used to process leaves of index mappings tree + // + val leafNodeProcessor = + fun(fieldName: String, _: String, props: MutableMap): Triple> { + return Triple(fieldName, fieldName, props) + } + // Traverse and update index mappings here while extracting flatten field paths + val flattenPaths = mutableMapOf>() + traverseMappingsAndUpdate(properties, "", leafNodeProcessor, flattenPaths) + + flattenPaths.forEach { + if (allFlattenPaths.containsKey(it.key) && allFlattenPaths[it.key]!! != it.value) { + conflictingFields.add(it.key) + } + allFlattenPaths.putIfAbsent(it.key, it.value) + } + } + } + } + return conflictingFields + } + + /** + * checks the max field limit for a concrete index + */ + private suspend fun checkMaxFieldLimit(sourceIndex: String): Long { + val getSettingsResponse: GetSettingsResponse = client.suspendUntil { + admin().indices().getSettings(GetSettingsRequest().indices(sourceIndex), it) + } + return getSettingsResponse.getSetting(sourceIndex, INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key)?.toLong() ?: 1000L + } + + /** + * Adjusts max field limit index setting for query index if source index has higher limit. + * This will prevent max field limit exception, when source index has more fields then query index limit + */ + private suspend fun adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit: Long, concreteQueryIndex: String) { + val getSettingsResponse: GetSettingsResponse = client.suspendUntil { + admin().indices().getSettings(GetSettingsRequest().indices(concreteQueryIndex), it) + } + val queryIndexLimit = + getSettingsResponse.getSetting(concreteQueryIndex, INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key)?.toLong() ?: 1000L + // Our query index initially has 3 fields we defined and 5 more builtin metadata fields in mappings so we have to account for that + if (sourceIndexFieldLimit > (queryIndexLimit - QUERY_INDEX_BASE_FIELDS_COUNT)) { + val updateSettingsResponse: AcknowledgedResponse = client.suspendUntil { + admin().indices().updateSettings( + UpdateSettingsRequest(concreteQueryIndex).settings( + Settings.builder().put( + INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key, + sourceIndexFieldLimit + QUERY_INDEX_BASE_FIELDS_COUNT + ) + ), + it + ) + } + } + } + + private suspend fun rolloverQueryIndex(monitor: Monitor): String { + val queryIndex = monitor.dataSources.queryIndex + val queryIndexPattern = monitor.dataSources.queryIndex + INDEX_PATTERN_SUFFIX + + val request = RolloverRequest(queryIndex, null) + request.createIndexRequest.index(queryIndexPattern) + .mapping(docLevelQueriesMappings()) + .settings(docLevelQueriesSettings()) + val response: RolloverResponse = client.suspendUntil { + client.admin().indices().rolloverIndex(request, it) + } + if (response.isRolledOver == false) { + val message = "failed to rollover queryIndex:$queryIndex queryIndexPattern:$queryIndexPattern" + log.error(message) + throw AlertingException.wrap( + OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) + ) + } + return response.newIndex + } + + private fun getWriteIndexNameForAlias(alias: String): String? { + return this.clusterService.state().metadata().indicesLookup?.get(alias)?.writeIndex?.index?.name + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt new file mode 100644 index 000000000..387f5cb22 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt @@ -0,0 +1,200 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.support.IndicesOptions +import org.opensearch.action.support.master.AcknowledgedResponse +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.client.IndicesAdminClient +import org.opensearch.cluster.ClusterState +import org.opensearch.cluster.metadata.IndexAbstraction +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.cluster.metadata.IndexNameExpressionResolver +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.util.IndexUtils +import org.opensearch.core.action.ActionListener +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser + +class IndexUtils { + + companion object { + val VALID_INDEX_NAME_REGEX = Regex("""^(?![_\-\+])(?!.*\.\.)[^\s,\\\/\*\?"<>|#:\.]{1,255}$""") + + const val _META = "_meta" + const val SCHEMA_VERSION = "schema_version" + + var scheduledJobIndexSchemaVersion: Int + private set + var alertIndexSchemaVersion: Int + private set + var findingIndexSchemaVersion: Int + private set + + var scheduledJobIndexUpdated: Boolean = false + private set + var alertIndexUpdated: Boolean = false + private set + var findingIndexUpdated: Boolean = false + private set + var lastUpdatedAlertHistoryIndex: String? = null + var lastUpdatedFindingHistoryIndex: String? = null + + init { + scheduledJobIndexSchemaVersion = getSchemaVersion(ScheduledJobIndices.scheduledJobMappings()) + alertIndexSchemaVersion = getSchemaVersion(AlertIndices.alertMapping()) + findingIndexSchemaVersion = getSchemaVersion(AlertIndices.findingMapping()) + } + + @JvmStatic + fun scheduledJobIndexUpdated() { + scheduledJobIndexUpdated = true + } + + @JvmStatic + fun alertIndexUpdated() { + alertIndexUpdated = true + } + + @JvmStatic + fun findingIndexUpdated() { + findingIndexUpdated = true + } + + @JvmStatic + fun getSchemaVersion(mapping: String): Int { + val xcp = XContentType.JSON.xContent().createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + mapping + ) + + while (!xcp.isClosed) { + val token = xcp.currentToken() + if (token != null && token != XContentParser.Token.END_OBJECT && token != XContentParser.Token.START_OBJECT) { + if (xcp.currentName() != _META) { + xcp.nextToken() + xcp.skipChildren() + } else { + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + when (xcp.currentName()) { + SCHEMA_VERSION -> { + val version = xcp.intValue() + require(version > -1) + return version + } + else -> xcp.nextToken() + } + } + } + } + xcp.nextToken() + } + return IndexUtils.NO_SCHEMA_VERSION + } + + @JvmStatic + fun getIndexNameWithAlias(clusterState: ClusterState, alias: String): String { + return clusterState.metadata.indices.entries.first { it.value.aliases.containsKey(alias) }.key + } + + @JvmStatic + fun shouldUpdateIndex(index: IndexMetadata, mapping: String): Boolean { + var oldVersion = IndexUtils.NO_SCHEMA_VERSION + val newVersion = getSchemaVersion(mapping) + + val indexMapping = index.mapping()?.sourceAsMap() + if (indexMapping != null && indexMapping.containsKey(_META) && indexMapping[_META] is HashMap<*, *>) { + val metaData = indexMapping[_META] as HashMap<*, *> + if (metaData.containsKey(SCHEMA_VERSION)) { + oldVersion = metaData[SCHEMA_VERSION] as Int + } + } + return newVersion > oldVersion + } + + @JvmStatic + fun updateIndexMapping( + index: String, + mapping: String, + clusterState: ClusterState, + client: IndicesAdminClient, + actionListener: ActionListener + ) { + if (clusterState.metadata.indices.containsKey(index)) { + if (shouldUpdateIndex(clusterState.metadata.indices[index]!!, mapping)) { + val putMappingRequest: PutMappingRequest = PutMappingRequest(index).source(mapping, XContentType.JSON) + client.putMapping(putMappingRequest, actionListener) + } else { + actionListener.onResponse(AcknowledgedResponse(true)) + } + } + } + + @JvmStatic + fun resolveAllIndices(indices: List, clusterService: ClusterService, resolver: IndexNameExpressionResolver): List { + val result = mutableListOf() + + indices.forEach { index -> + val concreteIndices = resolver.concreteIndexNames( + clusterService.state(), + IndicesOptions.lenientExpand(), + true, + index + ) + result.addAll(concreteIndices) + } + + return result + } + + @JvmStatic + fun isDataStream(name: String, clusterState: ClusterState): Boolean { + return clusterState.metadata().dataStreams().containsKey(name) + } + + @JvmStatic + fun isAlias(name: String, clusterState: ClusterState): Boolean { + return clusterState.metadata().hasAlias(name) + } + + @JvmStatic + fun getWriteIndex(index: String, clusterState: ClusterState): String? { + if (isAlias(index, clusterState) || isDataStream(index, clusterState)) { + val metadata = clusterState.metadata.indicesLookup[index]?.writeIndex + if (metadata != null) { + return metadata.index.name + } + } + return null + } + + @JvmStatic + fun getNewestIndicesByCreationDate(concreteIndices: List, clusterState: ClusterState, thresholdDate: Long): List { + val filteredIndices = mutableListOf() + val lookup = clusterState.metadata().indicesLookup + concreteIndices.forEach { indexName -> + val index = lookup[indexName] + val indexMetadata = clusterState.metadata.index(indexName) + if (index != null && index.type == IndexAbstraction.Type.CONCRETE_INDEX) { + if (indexMetadata.creationDate >= thresholdDate) { + filteredIndices.add(indexName) + } + } + } + return filteredIndices + } + + @JvmStatic + fun getCreationDateForIndex(index: String, clusterState: ClusterState): Long { + return clusterState.metadata.index(index).creationDate + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt new file mode 100644 index 000000000..b5aeaa542 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.core.common.Strings +import org.opensearch.rest.RestRequest +import org.opensearch.search.fetch.subphase.FetchSourceContext + +/** + * Checks to see if the request came from Kibana, if so we want to return the UI Metadata from the document. + * If the request came from the client then we exclude the UI Metadata from the search result. + * + * @param request + * @return FetchSourceContext + */ +fun context(request: RestRequest): FetchSourceContext? { + val userAgent = if (request.header("User-Agent") == null) "" else request.header("User-Agent") + return if (!userAgent.contains(AlertingPlugin.OPEN_SEARCH_DASHBOARDS_USER_AGENT)) { + FetchSourceContext(true, Strings.EMPTY_ARRAY, AlertingPlugin.UI_METADATA_EXCLUDE) + } else null +} + +const val IF_SEQ_NO = "if_seq_no" +const val IF_PRIMARY_TERM = "if_primary_term" +const val REFRESH = "refresh" diff --git a/alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt new file mode 100644 index 000000000..70fe42a38 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt @@ -0,0 +1,70 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.get.GetResponse +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry + +private val log = LogManager.getLogger(ScheduledJobUtils::class.java) + +class ScheduledJobUtils { + companion object { + const val WORKFLOW_DELEGATE_PATH = "workflow.inputs.composite_input.sequence.delegates" + const val WORKFLOW_MONITOR_PATH = "workflow.inputs.composite_input.sequence.delegates.monitor_id" + fun parseWorkflowFromScheduledJobDocSource(xContentRegistry: NamedXContentRegistry, response: GetResponse): Workflow { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + try { + val workflow = ScheduledJob.parse(xcp, response.id, response.version) + if (workflow is Workflow) { + return workflow + } else { + log.error("Unable to parse workflow from ${response.source}") + throw OpenSearchStatusException( + "Unable to parse workflow from ${response.source}", + RestStatus.INTERNAL_SERVER_ERROR + ) + } + } catch (e: java.lang.Exception) { + throw AlertingException("Unable to parse workflow from ${response.source}", RestStatus.INTERNAL_SERVER_ERROR, e) + } + } + } + + fun parseMonitorFromScheduledJobDocSource(xContentRegistry: NamedXContentRegistry, response: GetResponse): Monitor { + XContentHelper.createParser( + xContentRegistry, LoggingDeprecationHandler.INSTANCE, + response.sourceAsBytesRef, XContentType.JSON + ).use { xcp -> + try { + val monitor = ScheduledJob.parse(xcp, response.id, response.version) + if (monitor is Monitor) { + return monitor + } else { + log.error("Unable to parse monitor from ${response.source}") + throw OpenSearchStatusException( + "Unable to parse monitor from ${response.source}", + RestStatus.INTERNAL_SERVER_ERROR + ) + } + } catch (e: java.lang.Exception) { + throw AlertingException("Unable to parse monitor from ${response.source}", RestStatus.INTERNAL_SERVER_ERROR, e) + } + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt new file mode 100644 index 000000000..8e92b597f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt @@ -0,0 +1,859 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.action.ValidateActions +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.state.ClusterStateResponse +import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse +import org.opensearch.action.admin.indices.stats.CommonStats +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.action.support.IndicesOptions +import org.opensearch.alerting.util.IndexUtils.Companion.VALID_INDEX_NAME_REGEX +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.common.time.DateFormatter +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.IndexSettings +import java.time.Instant +import java.time.ZoneOffset +import java.time.ZonedDateTime +import java.util.Locale + +class CatIndicesRequestWrapper(val pathParams: String = "") : ActionRequest() { + val log = LogManager.getLogger(CatIndicesRequestWrapper::class.java) + + var clusterHealthRequest: ClusterHealthRequest = + ClusterHealthRequest().indicesOptions(IndicesOptions.lenientExpandHidden()) + var clusterStateRequest: ClusterStateRequest = + ClusterStateRequest().indicesOptions(IndicesOptions.lenientExpandHidden()) + var indexSettingsRequest: GetSettingsRequest = + GetSettingsRequest() + .indicesOptions(IndicesOptions.lenientExpandHidden()) + .names(IndexSettings.INDEX_SEARCH_THROTTLED.key) + var indicesStatsRequest: IndicesStatsRequest = + IndicesStatsRequest().all().indicesOptions(IndicesOptions.lenientExpandHidden()) + var indicesList = arrayOf() + + init { + if (pathParams.isNotBlank()) { + indicesList = pathParams.split(",").toTypedArray() + + require(validate() == null) { + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases." + } + + clusterHealthRequest = clusterHealthRequest.indices(*indicesList) + clusterStateRequest = clusterStateRequest.indices(*indicesList) + indexSettingsRequest = indexSettingsRequest.indices(*indicesList) + indicesStatsRequest = indicesStatsRequest.indices(*indicesList) + } + } + + override fun validate(): ActionRequestValidationException? { + var exception: ActionRequestValidationException? = null + if (pathParams.isNotBlank() && indicesList.any { !VALID_INDEX_NAME_REGEX.containsMatchIn(it) }) + exception = ValidateActions.addValidationError( + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases.", + exception + ) + return exception + } +} + +class CatIndicesResponseWrapper( + clusterHealthResponse: ClusterHealthResponse, + clusterStateResponse: ClusterStateResponse, + indexSettingsResponse: GetSettingsResponse, + indicesStatsResponse: IndicesStatsResponse +) : ActionResponse(), ToXContentObject { + var indexInfoList: List = listOf() + + init { + indexInfoList = compileIndexInfo( + clusterHealthResponse, + clusterStateResponse, + indexSettingsResponse, + indicesStatsResponse + ) + } + + companion object { + const val WRAPPER_FIELD = "indices" + } + + override fun writeTo(out: StreamOutput) { + out.writeList(indexInfoList) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + builder.startArray(WRAPPER_FIELD) + indexInfoList.forEach { it.toXContent(builder, params) } + builder.endArray() + return builder.endObject() + } + + private fun compileIndexInfo( + clusterHealthResponse: ClusterHealthResponse, + clusterStateResponse: ClusterStateResponse, + indexSettingsResponse: GetSettingsResponse, + indicesStatsResponse: IndicesStatsResponse + ): List { + val list = mutableListOf() + + val indicesSettings = indexSettingsResponse.indexToSettings + val indicesHealths = clusterHealthResponse.indices + val indicesStats = indicesStatsResponse.indices + val indicesMetadatas = hashMapOf() + clusterStateResponse.state.metadata.forEach { indicesMetadatas[it.index.name] = it } + + indicesSettings.forEach { (indexName, settings) -> + if (!indicesMetadatas.containsKey(indexName)) return@forEach + + val indexMetadata = indicesMetadatas[indexName] + val indexState = indexMetadata?.state + val indexStats = indicesStats[indexName] + val searchThrottled = IndexSettings.INDEX_SEARCH_THROTTLED.get(settings) + val indexHealth = indicesHealths[indexName] + + var health = "" + if (indexHealth != null) { + health = indexHealth.status.toString().lowercase(Locale.ROOT) + } else if (indexStats != null) { + health = "red*" + } + + val primaryStats: CommonStats? + val totalStats: CommonStats? + if (indexStats == null || indexState == IndexMetadata.State.CLOSE) { + primaryStats = CommonStats() + totalStats = CommonStats() + } else { + primaryStats = indexStats.primaries + totalStats = indexStats.total + } + + list.add( + IndexInfo( + health = health, + status = indexState.toString().lowercase(Locale.ROOT), + index = indexName, + uuid = indexMetadata?.indexUUID, + pri = "${indexHealth?.numberOfShards}", + rep = "${indexHealth?.numberOfReplicas}", + docsCount = "${primaryStats?.getDocs()?.count}", + docsDeleted = "${primaryStats?.getDocs()?.deleted}", + creationDate = "${indexMetadata?.creationDate}", + creationDateString = DateFormatter.forPattern("strict_date_time") + .format(ZonedDateTime.ofInstant(Instant.ofEpochMilli(indexMetadata!!.creationDate), ZoneOffset.UTC)), + storeSize = "${totalStats?.store?.size}", + priStoreSize = "${primaryStats?.store?.size}", + completionSize = "${totalStats?.completion?.size}", + priCompletionSize = "${primaryStats?.completion?.size}", + fieldDataMemorySize = "${totalStats?.fieldData?.memorySize}", + priFieldDataMemorySize = "${primaryStats?.fieldData?.memorySize}", + fieldDataEvictions = "${totalStats?.fieldData?.evictions}", + priFieldDataEvictions = "${primaryStats?.fieldData?.evictions}", + queryCacheMemorySize = "${totalStats?.queryCache?.memorySize}", + priQueryCacheMemorySize = "${primaryStats?.queryCache?.memorySize}", + queryCacheEvictions = "${totalStats?.queryCache?.evictions}", + priQueryCacheEvictions = "${primaryStats?.queryCache?.evictions}", + requestCacheMemorySize = "${totalStats?.requestCache?.memorySize}", + priRequestCacheMemorySize = "${primaryStats?.requestCache?.memorySize}", + requestCacheEvictions = "${totalStats?.requestCache?.evictions}", + priRequestCacheEvictions = "${primaryStats?.requestCache?.evictions}", + requestCacheHitCount = "${totalStats?.requestCache?.hitCount}", + priRequestCacheHitCount = "${primaryStats?.requestCache?.hitCount}", + requestCacheMissCount = "${totalStats?.requestCache?.missCount}", + priRequestCacheMissCount = "${primaryStats?.requestCache?.missCount}", + flushTotal = "${totalStats?.flush?.total}", + priFlushTotal = "${primaryStats?.flush?.total}", + flushTotalTime = "${totalStats?.flush?.totalTime}", + priFlushTotalTime = "${primaryStats?.flush?.totalTime}", + getCurrent = "${totalStats?.get?.current()}", + priGetCurrent = "${primaryStats?.get?.current()}", + getTime = "${totalStats?.get?.time}", + priGetTime = "${primaryStats?.get?.time}", + getTotal = "${totalStats?.get?.count}", + priGetTotal = "${primaryStats?.get?.count}", + getExistsTime = "${totalStats?.get?.existsTime}", + priGetExistsTime = "${primaryStats?.get?.existsTime}", + getExistsTotal = "${totalStats?.get?.existsCount}", + priGetExistsTotal = "${primaryStats?.get?.existsCount}", + getMissingTime = "${totalStats?.get?.missingTime}", + priGetMissingTime = "${primaryStats?.get?.missingTime}", + getMissingTotal = "${totalStats?.get?.missingCount}", + priGetMissingTotal = "${primaryStats?.get?.missingCount}", + indexingDeleteCurrent = "${totalStats?.indexing?.total?.deleteCurrent}", + priIndexingDeleteCurrent = "${primaryStats?.indexing?.total?.deleteCurrent}", + indexingDeleteTime = "${totalStats?.indexing?.total?.deleteTime}", + priIndexingDeleteTime = "${primaryStats?.indexing?.total?.deleteTime}", + indexingDeleteTotal = "${totalStats?.indexing?.total?.deleteCount}", + priIndexingDeleteTotal = "${primaryStats?.indexing?.total?.deleteCount}", + indexingIndexCurrent = "${totalStats?.indexing?.total?.indexCurrent}", + priIndexingIndexCurrent = "${primaryStats?.indexing?.total?.indexCurrent}", + indexingIndexTime = "${totalStats?.indexing?.total?.indexTime}", + priIndexingIndexTime = "${primaryStats?.indexing?.total?.indexTime}", + indexingIndexTotal = "${totalStats?.indexing?.total?.indexCount}", + priIndexingIndexTotal = "${primaryStats?.indexing?.total?.indexCount}", + indexingIndexFailed = "${totalStats?.indexing?.total?.indexFailedCount}", + priIndexingIndexFailed = "${primaryStats?.indexing?.total?.indexFailedCount}", + mergesCurrent = "${totalStats?.merge?.current}", + priMergesCurrent = "${primaryStats?.merge?.current}", + mergesCurrentDocs = "${totalStats?.merge?.currentNumDocs}", + priMergesCurrentDocs = "${primaryStats?.merge?.currentNumDocs}", + mergesCurrentSize = "${totalStats?.merge?.currentSize}", + priMergesCurrentSize = "${primaryStats?.merge?.currentSize}", + mergesTotal = "${totalStats?.merge?.total}", + priMergesTotal = "${primaryStats?.merge?.total}", + mergesTotalDocs = "${totalStats?.merge?.totalNumDocs}", + priMergesTotalDocs = "${primaryStats?.merge?.totalNumDocs}", + mergesTotalSize = "${totalStats?.merge?.totalSize}", + priMergesTotalSize = "${primaryStats?.merge?.totalSize}", + mergesTotalTime = "${totalStats?.merge?.totalTime}", + priMergesTotalTime = "${primaryStats?.merge?.totalTime}", + refreshTotal = "${totalStats?.refresh?.total}", + priRefreshTotal = "${primaryStats?.refresh?.total}", + refreshTime = "${totalStats?.refresh?.totalTime}", + priRefreshTime = "${primaryStats?.refresh?.totalTime}", + refreshExternalTotal = "${totalStats?.refresh?.externalTotal}", + priRefreshExternalTotal = "${primaryStats?.refresh?.externalTotal}", + refreshExternalTime = "${totalStats?.refresh?.externalTotalTime}", + priRefreshExternalTime = "${primaryStats?.refresh?.externalTotalTime}", + refreshListeners = "${totalStats?.refresh?.listeners}", + priRefreshListeners = "${primaryStats?.refresh?.listeners}", + searchFetchCurrent = "${totalStats?.search?.total?.fetchCurrent}", + priSearchFetchCurrent = "${primaryStats?.search?.total?.fetchCurrent}", + searchFetchTime = "${totalStats?.search?.total?.fetchTime}", + priSearchFetchTime = "${primaryStats?.search?.total?.fetchTime}", + searchFetchTotal = "${totalStats?.search?.total?.fetchCount}", + priSearchFetchTotal = "${primaryStats?.search?.total?.fetchCount}", + searchOpenContexts = "${totalStats?.search?.openContexts}", + priSearchOpenContexts = "${primaryStats?.search?.openContexts}", + searchQueryCurrent = "${totalStats?.search?.total?.queryCurrent}", + priSearchQueryCurrent = "${primaryStats?.search?.total?.queryCurrent}", + searchQueryTime = "${totalStats?.search?.total?.queryTime}", + priSearchQueryTime = "${primaryStats?.search?.total?.queryTime}", + searchQueryTotal = "${totalStats?.search?.total?.queryCount}", + priSearchQueryTotal = "${primaryStats?.search?.total?.queryCount}", + searchScrollCurrent = "${totalStats?.search?.total?.scrollCurrent}", + priSearchScrollCurrent = "${primaryStats?.search?.total?.scrollCurrent}", + searchScrollTime = "${totalStats?.search?.total?.scrollTime}", + priSearchScrollTime = "${primaryStats?.search?.total?.scrollTime}", + searchScrollTotal = "${totalStats?.search?.total?.scrollCount}", + priSearchScrollTotal = "${primaryStats?.search?.total?.scrollCount}", + searchPointInTimeCurrent = "${totalStats?.search?.total?.pitCurrent}", + priSearchPointInTimeCurrent = "${primaryStats?.search?.total?.pitCurrent}", + searchPointInTimeTime = "${totalStats?.search?.total?.pitTime}", + priSearchPointInTimeTime = "${primaryStats?.search?.total?.pitTime}", + searchPointInTimeTotal = "${totalStats?.search?.total?.pitCount}", + priSearchPointInTimeTotal = "${primaryStats?.search?.total?.pitCount}", + segmentsCount = "${totalStats?.segments?.count}", + priSegmentsCount = "${primaryStats?.segments?.count}", + segmentsMemory = "${totalStats?.segments?.zeroMemory}", + priSegmentsMemory = "${primaryStats?.segments?.zeroMemory}", + segmentsIndexWriterMemory = "${totalStats?.segments?.indexWriterMemory}", + priSegmentsIndexWriterMemory = "${primaryStats?.segments?.indexWriterMemory}", + segmentsVersionMapMemory = "${totalStats?.segments?.versionMapMemory}", + priSegmentsVersionMapMemory = "${primaryStats?.segments?.versionMapMemory}", + segmentsFixedBitsetMemory = "${totalStats?.segments?.bitsetMemory}", + priSegmentsFixedBitsetMemory = "${primaryStats?.segments?.bitsetMemory}", + warmerCurrent = "${totalStats?.warmer?.current()}", + priWarmerCurrent = "${primaryStats?.warmer?.current()}", + warmerTotal = "${totalStats?.warmer?.total()}", + priWarmerTotal = "${primaryStats?.warmer?.total()}", + warmerTotalTime = "${totalStats?.warmer?.totalTime()}", + priWarmerTotalTime = "${primaryStats?.warmer?.totalTime()}", + suggestCurrent = "${totalStats?.search?.total?.suggestCurrent}", + priSuggestCurrent = "${primaryStats?.search?.total?.suggestCurrent}", + suggestTime = "${totalStats?.search?.total?.suggestTime}", + priSuggestTime = "${primaryStats?.search?.total?.suggestTime}", + suggestTotal = "${totalStats?.search?.total?.suggestCount}", + priSuggestTotal = "${primaryStats?.search?.total?.suggestCount}", + memoryTotal = "${totalStats?.totalMemory}", + priMemoryTotal = "${primaryStats?.totalMemory}", + searchThrottled = "$searchThrottled", + ) + ) + } + + return list + } + + data class IndexInfo( + val health: String?, + val status: String?, + val index: String?, + val uuid: String?, + val pri: String?, + val rep: String?, + val docsCount: String?, + val docsDeleted: String?, + val creationDate: String?, + val creationDateString: String?, + val storeSize: String?, + val priStoreSize: String?, + val completionSize: String?, + val priCompletionSize: String?, + val fieldDataMemorySize: String?, + val priFieldDataMemorySize: String?, + val fieldDataEvictions: String?, + val priFieldDataEvictions: String?, + val queryCacheMemorySize: String?, + val priQueryCacheMemorySize: String?, + val queryCacheEvictions: String?, + val priQueryCacheEvictions: String?, + val requestCacheMemorySize: String?, + val priRequestCacheMemorySize: String?, + val requestCacheEvictions: String?, + val priRequestCacheEvictions: String?, + val requestCacheHitCount: String?, + val priRequestCacheHitCount: String?, + val requestCacheMissCount: String?, + val priRequestCacheMissCount: String?, + val flushTotal: String?, + val priFlushTotal: String?, + val flushTotalTime: String?, + val priFlushTotalTime: String?, + val getCurrent: String?, + val priGetCurrent: String?, + val getTime: String?, + val priGetTime: String?, + val getTotal: String?, + val priGetTotal: String?, + val getExistsTime: String?, + val priGetExistsTime: String?, + val getExistsTotal: String?, + val priGetExistsTotal: String?, + val getMissingTime: String?, + val priGetMissingTime: String?, + val getMissingTotal: String?, + val priGetMissingTotal: String?, + val indexingDeleteCurrent: String?, + val priIndexingDeleteCurrent: String?, + val indexingDeleteTime: String?, + val priIndexingDeleteTime: String?, + val indexingDeleteTotal: String?, + val priIndexingDeleteTotal: String?, + val indexingIndexCurrent: String?, + val priIndexingIndexCurrent: String?, + val indexingIndexTime: String?, + val priIndexingIndexTime: String?, + val indexingIndexTotal: String?, + val priIndexingIndexTotal: String?, + val indexingIndexFailed: String?, + val priIndexingIndexFailed: String?, + val mergesCurrent: String?, + val priMergesCurrent: String?, + val mergesCurrentDocs: String?, + val priMergesCurrentDocs: String?, + val mergesCurrentSize: String?, + val priMergesCurrentSize: String?, + val mergesTotal: String?, + val priMergesTotal: String?, + val mergesTotalDocs: String?, + val priMergesTotalDocs: String?, + val mergesTotalSize: String?, + val priMergesTotalSize: String?, + val mergesTotalTime: String?, + val priMergesTotalTime: String?, + val refreshTotal: String?, + val priRefreshTotal: String?, + val refreshTime: String?, + val priRefreshTime: String?, + val refreshExternalTotal: String?, + val priRefreshExternalTotal: String?, + val refreshExternalTime: String?, + val priRefreshExternalTime: String?, + val refreshListeners: String?, + val priRefreshListeners: String?, + val searchFetchCurrent: String?, + val priSearchFetchCurrent: String?, + val searchFetchTime: String?, + val priSearchFetchTime: String?, + val searchFetchTotal: String?, + val priSearchFetchTotal: String?, + val searchOpenContexts: String?, + val priSearchOpenContexts: String?, + val searchQueryCurrent: String?, + val priSearchQueryCurrent: String?, + val searchQueryTime: String?, + val priSearchQueryTime: String?, + val searchQueryTotal: String?, + val priSearchQueryTotal: String?, + val searchScrollCurrent: String?, + val priSearchScrollCurrent: String?, + val searchScrollTime: String?, + val priSearchScrollTime: String?, + val searchScrollTotal: String?, + val priSearchScrollTotal: String?, + val searchPointInTimeCurrent: String?, + val priSearchPointInTimeCurrent: String?, + val searchPointInTimeTime: String?, + val priSearchPointInTimeTime: String?, + val searchPointInTimeTotal: String?, + val priSearchPointInTimeTotal: String?, + val segmentsCount: String?, + val priSegmentsCount: String?, + val segmentsMemory: String?, + val priSegmentsMemory: String?, + val segmentsIndexWriterMemory: String?, + val priSegmentsIndexWriterMemory: String?, + val segmentsVersionMapMemory: String?, + val priSegmentsVersionMapMemory: String?, + val segmentsFixedBitsetMemory: String?, + val priSegmentsFixedBitsetMemory: String?, + val warmerCurrent: String?, + val priWarmerCurrent: String?, + val warmerTotal: String?, + val priWarmerTotal: String?, + val warmerTotalTime: String?, + val priWarmerTotalTime: String?, + val suggestCurrent: String?, + val priSuggestCurrent: String?, + val suggestTime: String?, + val priSuggestTime: String?, + val suggestTotal: String?, + val priSuggestTotal: String?, + val memoryTotal: String?, + val priMemoryTotal: String?, + val searchThrottled: String? + ) : ToXContentObject, Writeable { + companion object { + const val HEALTH_FIELD = "health" + const val STATUS_FIELD = "status" + const val INDEX_FIELD = "index" + const val UUID_FIELD = "uuid" + const val PRI_FIELD = "pri" + const val REP_FIELD = "rep" + const val DOCS_COUNT_FIELD = "docs.count" + const val DOCS_DELETED_FIELD = "docs.deleted" + const val CREATION_DATE_FIELD = "creation.date" + const val CREATION_DATE_STRING_FIELD = "creation.date.string" + const val STORE_SIZE_FIELD = "store.size" + const val PRI_STORE_SIZE_FIELD = "pri.store.size" + const val COMPLETION_SIZE_FIELD = "completion.size" + const val PRI_COMPLETION_SIZE_FIELD = "pri.completion.size" + const val FIELD_DATA_MEMORY_SIZE_FIELD = "fielddata.memory_size" + const val PRI_FIELD_DATA_MEMORY_SIZE_FIELD = "pri.fielddata.memory_size" + const val FIELD_DATA_EVICTIONS_FIELD = "fielddata.evictions" + const val PRI_FIELD_DATA_EVICTIONS_FIELD = "pri.fielddata.evictions" + const val QUERY_CACHE_MEMORY_SIZE_FIELD = "query_cache.memory_size" + const val PRI_QUERY_CACHE_MEMORY_SIZE_FIELD = "pri.query_cache.memory_size" + const val QUERY_CACHE_EVICTIONS_FIELD = "query_cache.evictions" + const val PRI_QUERY_CACHE_EVICTIONS_FIELD = "pri.query_cache.evictions" + const val REQUEST_CACHE_MEMORY_SIZE_FIELD = "request_cache.memory_size" + const val PRI_REQUEST_CACHE_MEMORY_SIZE_FIELD = "pri.request_cache.memory_size" + const val REQUEST_CACHE_EVICTIONS_FIELD = "request_cache.evictions" + const val PRI_REQUEST_CACHE_EVICTIONS_FIELD = "pri.request_cache.evictions" + const val REQUEST_CACHE_HIT_COUNT_FIELD = "request_cache.hit_count" + const val PRI_REQUEST_CACHE_HIT_COUNT_FIELD = "pri.request_cache.hit_count" + const val REQUEST_CACHE_MISS_COUNT_FIELD = "request_cache.miss_count" + const val PRI_REQUEST_CACHE_MISS_COUNT_FIELD = "pri.request_cache.miss_count" + const val FLUSH_TOTAL_FIELD = "flush.total" + const val PRI_FLUSH_TOTAL_FIELD = "pri.flush.total" + const val FLUSH_TOTAL_TIME_FIELD = "flush.total_time" + const val PRI_FLUSH_TOTAL_TIME_FIELD = "pri.flush.total_time" + const val GET_CURRENT_FIELD = "get.current" + const val PRI_GET_CURRENT_FIELD = "pri.get.current" + const val GET_TIME_FIELD = "get.time" + const val PRI_GET_TIME_FIELD = "pri.get.time" + const val GET_TOTAL_FIELD = "get.total" + const val PRI_GET_TOTAL_FIELD = "pri.get.total" + const val GET_EXISTS_TIME_FIELD = "get.exists_time" + const val PRI_GET_EXISTS_TIME_FIELD = "pri.get.exists_time" + const val GET_EXISTS_TOTAL_FIELD = "get.exists_total" + const val PRI_GET_EXISTS_TOTAL_FIELD = "pri.get.exists_total" + const val GET_MISSING_TIME_FIELD = "get.missing_time" + const val PRI_GET_MISSING_TIME_FIELD = "pri.get.missing_time" + const val GET_MISSING_TOTAL_FIELD = "get.missing_total" + const val PRI_GET_MISSING_TOTAL_FIELD = "pri.get.missing_total" + const val INDEXING_DELETE_CURRENT_FIELD = "indexing.delete_current" + const val PRI_INDEXING_DELETE_CURRENT_FIELD = "pri.indexing.delete_current" + const val INDEXING_DELETE_TIME_FIELD = "indexing.delete_time" + const val PRI_INDEXING_DELETE_TIME_FIELD = "pri.indexing.delete_time" + const val INDEXING_DELETE_TOTAL_FIELD = "indexing.delete_total" + const val PRI_INDEXING_DELETE_TOTAL_FIELD = "pri.indexing.delete_total" + const val INDEXING_INDEX_CURRENT_FIELD = "indexing.index_current" + const val PRI_INDEXING_INDEX_CURRENT_FIELD = "pri.indexing.index_current" + const val INDEXING_INDEX_TIME_FIELD = "indexing.index_time" + const val PRI_INDEXING_INDEX_TIME_FIELD = "pri.indexing.index_time" + const val INDEXING_INDEX_TOTAL_FIELD = "indexing.index_total" + const val PRI_INDEXING_INDEX_TOTAL_FIELD = "pri.indexing.index_total" + const val INDEXING_INDEX_FAILED_FIELD = "indexing.index_failed" + const val PRI_INDEXING_INDEX_FAILED_FIELD = "pri.indexing.index_failed" + const val MERGES_CURRENT_FIELD = "merges.current" + const val PRI_MERGES_CURRENT_FIELD = "pri.merges.current" + const val MERGES_CURRENT_DOCS_FIELD = "merges.current_docs" + const val PRI_MERGES_CURRENT_DOCS_FIELD = "pri.merges.current_docs" + const val MERGES_CURRENT_SIZE_FIELD = "merges.current_size" + const val PRI_MERGES_CURRENT_SIZE_FIELD = "pri.merges.current_size" + const val MERGES_TOTAL_FIELD = "merges.total" + const val PRI_MERGES_TOTAL_FIELD = "pri.merges.total" + const val MERGES_TOTAL_DOCS_FIELD = "merges.total_docs" + const val PRI_MERGES_TOTAL_DOCS_FIELD = "pri.merges.total_docs" + const val MERGES_TOTAL_SIZE_FIELD = "merges.total_size" + const val PRI_MERGES_TOTAL_SIZE_FIELD = "pri.merges.total_size" + const val MERGES_TOTAL_TIME_FIELD = "merges.total_time" + const val PRI_MERGES_TOTAL_TIME_FIELD = "pri.merges.total_time" + const val REFRESH_TOTAL_FIELD = "refresh.total" + const val PRI_REFRESH_TOTAL_FIELD = "pri.refresh.total" + const val REFRESH_TIME_FIELD = "refresh.time" + const val PRI_REFRESH_TIME_FIELD = "pri.refresh.time" + const val REFRESH_EXTERNAL_TOTAL_FIELD = "refresh.external_total" + const val PRI_REFRESH_EXTERNAL_TOTAL_FIELD = "pri.refresh.external_total" + const val REFRESH_EXTERNAL_TIME_FIELD = "refresh.external_time" + const val PRI_REFRESH_EXTERNAL_TIME_FIELD = "pri.refresh.external_time" + const val REFRESH_LISTENERS_FIELD = "refresh.listeners" + const val PRI_REFRESH_LISTENERS_FIELD = "pri.refresh.listeners" + const val SEARCH_FETCH_CURRENT_FIELD = "search.fetch_current" + const val PRI_SEARCH_FETCH_CURRENT_FIELD = "pri.search.fetch_current" + const val SEARCH_FETCH_TIME_FIELD = "search.fetch_time" + const val PRI_SEARCH_FETCH_TIME_FIELD = "pri.search.fetch_time" + const val SEARCH_FETCH_TOTAL_FIELD = "search.fetch_total" + const val PRI_SEARCH_FETCH_TOTAL_FIELD = "pri.search.fetch_total" + const val SEARCH_OPEN_CONTEXTS_FIELD = "search.open_contexts" + const val PRI_SEARCH_OPEN_CONTEXTS_FIELD = "pri.search.open_contexts" + const val SEARCH_QUERY_CURRENT_FIELD = "search.query_current" + const val PRI_SEARCH_QUERY_CURRENT_FIELD = "pri.search.query_current" + const val SEARCH_QUERY_TIME_FIELD = "search.query_time" + const val PRI_SEARCH_QUERY_TIME_FIELD = "pri.search.query_time" + const val SEARCH_QUERY_TOTAL_FIELD = "search.query_total" + const val PRI_SEARCH_QUERY_TOTAL_FIELD = "pri.search.query_total" + const val SEARCH_SCROLL_CURRENT_FIELD = "search.scroll_current" + const val PRI_SEARCH_SCROLL_CURRENT_FIELD = "pri.search.scroll_current" + const val SEARCH_SCROLL_TIME_FIELD = "search.scroll_time" + const val PRI_SEARCH_SCROLL_TIME_FIELD = "pri.search.scroll_time" + const val SEARCH_SCROLL_TOTAL_FIELD = "search.scroll_total" + const val PRI_SEARCH_SCROLL_TOTAL_FIELD = "pri.search.scroll_total" + const val SEARCH_POINT_IN_TIME_CURRENT_FIELD = "search.point_in_time_current" + const val PRI_SEARCH_POINT_IN_TIME_CURRENT_FIELD = "pri.search.point_in_time_current" + const val SEARCH_POINT_IN_TIME_TIME_FIELD = "search.point_in_time_time" + const val PRI_SEARCH_POINT_IN_TIME_TIME_FIELD = "pri.search.point_in_time_time" + const val SEARCH_POINT_IN_TIME_TOTAL_FIELD = "search.point_in_time_total" + const val PRI_SEARCH_POINT_IN_TIME_TOTAL_FIELD = "pri.search.point_in_time_total" + const val SEGMENTS_COUNT_FIELD = "segments.count" + const val PRI_SEGMENTS_COUNT_FIELD = "pri.segments.count" + const val SEGMENTS_MEMORY_FIELD = "segments.memory" + const val PRI_SEGMENTS_MEMORY_FIELD = "pri.segments.memory" + const val SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "segments.index_writer_memory" + const val PRI_SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "pri.segments.index_writer_memory" + const val SEGMENTS_VERSION_MAP_MEMORY_FIELD = "segments.version_map_memory" + const val PRI_SEGMENTS_VERSION_MAP_MEMORY_FIELD = "pri.segments.version_map_memory" + const val SEGMENTS_FIXED_BITSET_MEMORY_FIELD = "segments.fixed_bitset_memory" + const val PRI_SEGMENTS_FIXED_BITSET_MEMORY_FIELD = "pri.segments.fixed_bitset_memory" + const val WARMER_CURRENT_FIELD = "warmer.current" + const val PRI_WARMER_CURRENT_FIELD = "pri.warmer.current" + const val WARMER_TOTAL_FIELD = "warmer.total" + const val PRI_WARMER_TOTAL_FIELD = "pri.warmer.total" + const val WARMER_TOTAL_TIME_FIELD = "warmer.total_time" + const val PRI_WARMER_TOTAL_TIME_FIELD = "pri.warmer.total_time" + const val SUGGEST_CURRENT_FIELD = "suggest.current" + const val PRI_SUGGEST_CURRENT_FIELD = "pri.suggest.current" + const val SUGGEST_TIME_FIELD = "suggest.time" + const val PRI_SUGGEST_TIME_FIELD = "pri.suggest.time" + const val SUGGEST_TOTAL_FIELD = "suggest.total" + const val PRI_SUGGEST_TOTAL_FIELD = "pri.suggest.total" + const val MEMORY_TOTAL_FIELD = "memory.total" + const val PRI_MEMORY_TOTAL_FIELD = "pri.memory.total" + const val SEARCH_THROTTLED_FIELD = "search.throttled" + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(HEALTH_FIELD, health) + .field(STATUS_FIELD, status) + .field(INDEX_FIELD, index) + .field(UUID_FIELD, uuid) + .field(PRI_FIELD, pri) + .field(REP_FIELD, rep) + .field(DOCS_COUNT_FIELD, docsCount) + .field(DOCS_DELETED_FIELD, docsDeleted) + .field(CREATION_DATE_FIELD, creationDate) + .field(CREATION_DATE_STRING_FIELD, creationDateString) + .field(STORE_SIZE_FIELD, storeSize) + .field(PRI_STORE_SIZE_FIELD, priStoreSize) + .field(COMPLETION_SIZE_FIELD, completionSize) + .field(PRI_COMPLETION_SIZE_FIELD, priCompletionSize) + .field(FIELD_DATA_MEMORY_SIZE_FIELD, fieldDataMemorySize) + .field(PRI_FIELD_DATA_MEMORY_SIZE_FIELD, priFieldDataMemorySize) + .field(FIELD_DATA_EVICTIONS_FIELD, fieldDataEvictions) + .field(PRI_FIELD_DATA_EVICTIONS_FIELD, priFieldDataEvictions) + .field(QUERY_CACHE_MEMORY_SIZE_FIELD, queryCacheMemorySize) + .field(PRI_QUERY_CACHE_MEMORY_SIZE_FIELD, priQueryCacheMemorySize) + .field(QUERY_CACHE_EVICTIONS_FIELD, queryCacheEvictions) + .field(PRI_QUERY_CACHE_EVICTIONS_FIELD, priQueryCacheEvictions) + .field(REQUEST_CACHE_MEMORY_SIZE_FIELD, requestCacheMemorySize) + .field(PRI_REQUEST_CACHE_MEMORY_SIZE_FIELD, priRequestCacheMemorySize) + .field(REQUEST_CACHE_EVICTIONS_FIELD, requestCacheEvictions) + .field(PRI_REQUEST_CACHE_EVICTIONS_FIELD, priRequestCacheEvictions) + .field(REQUEST_CACHE_HIT_COUNT_FIELD, requestCacheHitCount) + .field(PRI_REQUEST_CACHE_HIT_COUNT_FIELD, priRequestCacheHitCount) + .field(REQUEST_CACHE_MISS_COUNT_FIELD, requestCacheMissCount) + .field(PRI_REQUEST_CACHE_MISS_COUNT_FIELD, priRequestCacheMissCount) + .field(FLUSH_TOTAL_FIELD, flushTotal) + .field(PRI_FLUSH_TOTAL_FIELD, priFlushTotal) + .field(FLUSH_TOTAL_TIME_FIELD, flushTotalTime) + .field(PRI_FLUSH_TOTAL_TIME_FIELD, priFlushTotalTime) + .field(GET_CURRENT_FIELD, getCurrent) + .field(PRI_GET_CURRENT_FIELD, priGetCurrent) + .field(GET_TIME_FIELD, getTime) + .field(PRI_GET_TIME_FIELD, priGetTime) + .field(GET_TOTAL_FIELD, getTotal) + .field(PRI_GET_TOTAL_FIELD, priGetTotal) + .field(GET_EXISTS_TIME_FIELD, getExistsTime) + .field(PRI_GET_EXISTS_TIME_FIELD, priGetExistsTime) + .field(GET_EXISTS_TOTAL_FIELD, getExistsTotal) + .field(PRI_GET_EXISTS_TOTAL_FIELD, priGetExistsTotal) + .field(GET_MISSING_TIME_FIELD, getMissingTime) + .field(PRI_GET_MISSING_TIME_FIELD, priGetMissingTime) + .field(GET_MISSING_TOTAL_FIELD, getMissingTotal) + .field(PRI_GET_MISSING_TOTAL_FIELD, priGetMissingTotal) + .field(INDEXING_DELETE_CURRENT_FIELD, indexingDeleteCurrent) + .field(PRI_INDEXING_DELETE_CURRENT_FIELD, priIndexingDeleteCurrent) + .field(INDEXING_DELETE_TIME_FIELD, indexingDeleteTime) + .field(PRI_INDEXING_DELETE_TIME_FIELD, priIndexingDeleteTime) + .field(INDEXING_DELETE_TOTAL_FIELD, indexingDeleteTotal) + .field(PRI_INDEXING_DELETE_TOTAL_FIELD, priIndexingDeleteTotal) + .field(INDEXING_INDEX_CURRENT_FIELD, indexingIndexCurrent) + .field(PRI_INDEXING_INDEX_CURRENT_FIELD, priIndexingIndexCurrent) + .field(INDEXING_INDEX_TIME_FIELD, indexingIndexTime) + .field(PRI_INDEXING_INDEX_TIME_FIELD, priIndexingIndexTime) + .field(INDEXING_INDEX_TOTAL_FIELD, indexingIndexTotal) + .field(PRI_INDEXING_INDEX_TOTAL_FIELD, priIndexingIndexTotal) + .field(INDEXING_INDEX_FAILED_FIELD, indexingIndexFailed) + .field(PRI_INDEXING_INDEX_FAILED_FIELD, priIndexingIndexFailed) + .field(MERGES_CURRENT_FIELD, mergesCurrent) + .field(PRI_MERGES_CURRENT_FIELD, priMergesCurrent) + .field(MERGES_CURRENT_DOCS_FIELD, mergesCurrentDocs) + .field(PRI_MERGES_CURRENT_DOCS_FIELD, priMergesCurrentDocs) + .field(MERGES_CURRENT_SIZE_FIELD, mergesCurrentSize) + .field(PRI_MERGES_CURRENT_SIZE_FIELD, priMergesCurrentSize) + .field(MERGES_TOTAL_FIELD, mergesTotal) + .field(PRI_MERGES_TOTAL_FIELD, priMergesTotal) + .field(MERGES_TOTAL_DOCS_FIELD, mergesTotalDocs) + .field(PRI_MERGES_TOTAL_DOCS_FIELD, priMergesTotalDocs) + .field(MERGES_TOTAL_SIZE_FIELD, mergesTotalSize) + .field(PRI_MERGES_TOTAL_SIZE_FIELD, priMergesTotalSize) + .field(MERGES_TOTAL_TIME_FIELD, mergesTotalTime) + .field(PRI_MERGES_TOTAL_TIME_FIELD, priMergesTotalTime) + .field(REFRESH_TOTAL_FIELD, refreshTotal) + .field(PRI_REFRESH_TOTAL_FIELD, priRefreshTotal) + .field(REFRESH_TIME_FIELD, refreshTime) + .field(PRI_REFRESH_TIME_FIELD, priRefreshTime) + .field(REFRESH_EXTERNAL_TOTAL_FIELD, refreshExternalTotal) + .field(PRI_REFRESH_EXTERNAL_TOTAL_FIELD, priRefreshExternalTotal) + .field(REFRESH_EXTERNAL_TIME_FIELD, refreshExternalTime) + .field(PRI_REFRESH_EXTERNAL_TIME_FIELD, priRefreshExternalTime) + .field(REFRESH_LISTENERS_FIELD, refreshListeners) + .field(PRI_REFRESH_LISTENERS_FIELD, priRefreshListeners) + .field(SEARCH_FETCH_CURRENT_FIELD, searchFetchCurrent) + .field(PRI_SEARCH_FETCH_CURRENT_FIELD, priSearchFetchCurrent) + .field(SEARCH_FETCH_TIME_FIELD, searchFetchTime) + .field(PRI_SEARCH_FETCH_TIME_FIELD, priSearchFetchTime) + .field(SEARCH_FETCH_TOTAL_FIELD, searchFetchTotal) + .field(PRI_SEARCH_FETCH_TOTAL_FIELD, priSearchFetchTotal) + .field(SEARCH_OPEN_CONTEXTS_FIELD, searchOpenContexts) + .field(PRI_SEARCH_OPEN_CONTEXTS_FIELD, priSearchOpenContexts) + .field(SEARCH_QUERY_CURRENT_FIELD, searchQueryCurrent) + .field(PRI_SEARCH_QUERY_CURRENT_FIELD, priSearchQueryCurrent) + .field(SEARCH_QUERY_TIME_FIELD, searchQueryTime) + .field(PRI_SEARCH_QUERY_TIME_FIELD, priSearchQueryTime) + .field(SEARCH_QUERY_TOTAL_FIELD, searchQueryTotal) + .field(PRI_SEARCH_QUERY_TOTAL_FIELD, priSearchQueryTotal) + .field(SEARCH_SCROLL_CURRENT_FIELD, searchScrollCurrent) + .field(PRI_SEARCH_SCROLL_CURRENT_FIELD, priSearchScrollCurrent) + .field(SEARCH_SCROLL_TIME_FIELD, searchScrollTime) + .field(PRI_SEARCH_SCROLL_TIME_FIELD, priSearchScrollTime) + .field(SEARCH_SCROLL_TOTAL_FIELD, searchScrollTotal) + .field(PRI_SEARCH_SCROLL_TOTAL_FIELD, priSearchScrollTotal) + .field(SEARCH_POINT_IN_TIME_CURRENT_FIELD, searchPointInTimeCurrent) + .field(PRI_SEARCH_POINT_IN_TIME_CURRENT_FIELD, priSearchPointInTimeCurrent) + .field(SEARCH_POINT_IN_TIME_TIME_FIELD, searchPointInTimeTime) + .field(PRI_SEARCH_POINT_IN_TIME_TIME_FIELD, priSearchPointInTimeTime) + .field(SEARCH_POINT_IN_TIME_TOTAL_FIELD, searchPointInTimeTotal) + .field(PRI_SEARCH_POINT_IN_TIME_TOTAL_FIELD, priSearchPointInTimeTotal) + .field(SEGMENTS_COUNT_FIELD, segmentsCount) + .field(PRI_SEGMENTS_COUNT_FIELD, priSegmentsCount) + .field(SEGMENTS_MEMORY_FIELD, segmentsMemory) + .field(PRI_SEGMENTS_MEMORY_FIELD, priSegmentsMemory) + .field(SEGMENTS_INDEX_WRITER_MEMORY_FIELD, segmentsIndexWriterMemory) + .field(PRI_SEGMENTS_INDEX_WRITER_MEMORY_FIELD, priSegmentsIndexWriterMemory) + .field(SEGMENTS_VERSION_MAP_MEMORY_FIELD, segmentsVersionMapMemory) + .field(PRI_SEGMENTS_VERSION_MAP_MEMORY_FIELD, priSegmentsVersionMapMemory) + .field(SEGMENTS_FIXED_BITSET_MEMORY_FIELD, segmentsFixedBitsetMemory) + .field(PRI_SEGMENTS_FIXED_BITSET_MEMORY_FIELD, priSegmentsFixedBitsetMemory) + .field(WARMER_CURRENT_FIELD, warmerCurrent) + .field(PRI_WARMER_CURRENT_FIELD, priWarmerCurrent) + .field(WARMER_TOTAL_FIELD, warmerTotal) + .field(PRI_WARMER_TOTAL_FIELD, priWarmerTotal) + .field(WARMER_TOTAL_TIME_FIELD, warmerTotalTime) + .field(PRI_WARMER_TOTAL_TIME_FIELD, priWarmerTotalTime) + .field(SUGGEST_CURRENT_FIELD, suggestCurrent) + .field(PRI_SUGGEST_CURRENT_FIELD, priSuggestCurrent) + .field(SUGGEST_TIME_FIELD, suggestTime) + .field(PRI_SUGGEST_TIME_FIELD, priSuggestTime) + .field(SUGGEST_TOTAL_FIELD, suggestTotal) + .field(PRI_SUGGEST_TOTAL_FIELD, priSuggestTotal) + .field(MEMORY_TOTAL_FIELD, memoryTotal) + .field(PRI_MEMORY_TOTAL_FIELD, priMemoryTotal) + .field(SEARCH_THROTTLED_FIELD, searchThrottled) + return builder.endObject() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(health) + out.writeString(status) + out.writeString(index) + out.writeString(uuid) + out.writeString(pri) + out.writeString(rep) + out.writeString(docsCount) + out.writeString(docsDeleted) + out.writeString(creationDate) + out.writeString(creationDateString) + out.writeString(storeSize) + out.writeString(priStoreSize) + out.writeString(completionSize) + out.writeString(priCompletionSize) + out.writeString(fieldDataMemorySize) + out.writeString(priFieldDataMemorySize) + out.writeString(fieldDataEvictions) + out.writeString(priFieldDataEvictions) + out.writeString(queryCacheMemorySize) + out.writeString(priQueryCacheMemorySize) + out.writeString(queryCacheEvictions) + out.writeString(priQueryCacheEvictions) + out.writeString(requestCacheMemorySize) + out.writeString(priRequestCacheMemorySize) + out.writeString(requestCacheEvictions) + out.writeString(priRequestCacheEvictions) + out.writeString(requestCacheHitCount) + out.writeString(priRequestCacheHitCount) + out.writeString(requestCacheMissCount) + out.writeString(priRequestCacheMissCount) + out.writeString(flushTotal) + out.writeString(priFlushTotal) + out.writeString(flushTotalTime) + out.writeString(priFlushTotalTime) + out.writeString(getCurrent) + out.writeString(priGetCurrent) + out.writeString(getTime) + out.writeString(priGetTime) + out.writeString(getTotal) + out.writeString(priGetTotal) + out.writeString(getExistsTime) + out.writeString(priGetExistsTime) + out.writeString(getExistsTotal) + out.writeString(priGetExistsTotal) + out.writeString(getMissingTime) + out.writeString(priGetMissingTime) + out.writeString(getMissingTotal) + out.writeString(priGetMissingTotal) + out.writeString(indexingDeleteCurrent) + out.writeString(priIndexingDeleteCurrent) + out.writeString(indexingDeleteTime) + out.writeString(priIndexingDeleteTime) + out.writeString(indexingDeleteTotal) + out.writeString(priIndexingDeleteTotal) + out.writeString(indexingIndexCurrent) + out.writeString(priIndexingIndexCurrent) + out.writeString(indexingIndexTime) + out.writeString(priIndexingIndexTime) + out.writeString(indexingIndexTotal) + out.writeString(priIndexingIndexTotal) + out.writeString(indexingIndexFailed) + out.writeString(priIndexingIndexFailed) + out.writeString(mergesCurrent) + out.writeString(priMergesCurrent) + out.writeString(mergesCurrentDocs) + out.writeString(priMergesCurrentDocs) + out.writeString(mergesCurrentSize) + out.writeString(priMergesCurrentSize) + out.writeString(mergesTotal) + out.writeString(priMergesTotal) + out.writeString(mergesTotalDocs) + out.writeString(priMergesTotalDocs) + out.writeString(mergesTotalSize) + out.writeString(priMergesTotalSize) + out.writeString(mergesTotalTime) + out.writeString(priMergesTotalTime) + out.writeString(refreshTotal) + out.writeString(priRefreshTotal) + out.writeString(refreshTime) + out.writeString(priRefreshTime) + out.writeString(refreshExternalTotal) + out.writeString(priRefreshExternalTotal) + out.writeString(refreshExternalTime) + out.writeString(priRefreshExternalTime) + out.writeString(refreshListeners) + out.writeString(priRefreshListeners) + out.writeString(searchFetchCurrent) + out.writeString(priSearchFetchCurrent) + out.writeString(searchFetchTime) + out.writeString(priSearchFetchTime) + out.writeString(searchFetchTotal) + out.writeString(priSearchFetchTotal) + out.writeString(searchOpenContexts) + out.writeString(priSearchOpenContexts) + out.writeString(searchQueryCurrent) + out.writeString(priSearchQueryCurrent) + out.writeString(searchQueryTime) + out.writeString(priSearchQueryTime) + out.writeString(searchQueryTotal) + out.writeString(priSearchQueryTotal) + out.writeString(searchScrollCurrent) + out.writeString(priSearchScrollCurrent) + out.writeString(searchScrollTime) + out.writeString(priSearchScrollTime) + out.writeString(searchScrollTotal) + out.writeString(priSearchScrollTotal) + out.writeString(searchPointInTimeCurrent) + out.writeString(priSearchPointInTimeCurrent) + out.writeString(searchPointInTimeTime) + out.writeString(priSearchPointInTimeTime) + out.writeString(searchPointInTimeTotal) + out.writeString(priSearchPointInTimeTotal) + out.writeString(segmentsCount) + out.writeString(priSegmentsCount) + out.writeString(segmentsMemory) + out.writeString(priSegmentsMemory) + out.writeString(segmentsIndexWriterMemory) + out.writeString(priSegmentsIndexWriterMemory) + out.writeString(segmentsVersionMapMemory) + out.writeString(priSegmentsVersionMapMemory) + out.writeString(segmentsFixedBitsetMemory) + out.writeString(priSegmentsFixedBitsetMemory) + out.writeString(warmerCurrent) + out.writeString(priWarmerCurrent) + out.writeString(warmerTotal) + out.writeString(priWarmerTotal) + out.writeString(warmerTotalTime) + out.writeString(priWarmerTotalTime) + out.writeString(suggestCurrent) + out.writeString(priSuggestCurrent) + out.writeString(suggestTime) + out.writeString(priSuggestTime) + out.writeString(suggestTotal) + out.writeString(priSuggestTotal) + out.writeString(memoryTotal) + out.writeString(priMemoryTotal) + out.writeString(searchThrottled) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt new file mode 100644 index 000000000..12152e69d --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt @@ -0,0 +1,495 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.action.ActionRequest +import org.opensearch.action.ActionRequestValidationException +import org.opensearch.action.ValidateActions +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.state.ClusterStateResponse +import org.opensearch.action.admin.indices.stats.CommonStats +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.action.admin.indices.stats.ShardStats +import org.opensearch.alerting.util.IndexUtils.Companion.VALID_INDEX_NAME_REGEX +import org.opensearch.cluster.routing.UnassignedInfo +import org.opensearch.common.unit.TimeValue +import org.opensearch.core.action.ActionResponse +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentObject +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.cache.query.QueryCacheStats +import org.opensearch.index.engine.CommitStats +import org.opensearch.index.engine.Engine +import org.opensearch.index.engine.SegmentsStats +import org.opensearch.index.fielddata.FieldDataStats +import org.opensearch.index.flush.FlushStats +import org.opensearch.index.get.GetStats +import org.opensearch.index.merge.MergeStats +import org.opensearch.index.refresh.RefreshStats +import org.opensearch.index.search.stats.SearchStats +import org.opensearch.index.seqno.SeqNoStats +import org.opensearch.index.shard.DocsStats +import org.opensearch.index.store.StoreStats +import org.opensearch.search.suggest.completion.CompletionStats +import java.time.Instant +import java.util.Locale +import java.util.function.Function + +class CatShardsRequestWrapper(val pathParams: String = "") : ActionRequest() { + var clusterStateRequest: ClusterStateRequest = + ClusterStateRequest().clear().nodes(true).routingTable(true) + var indicesStatsRequest: IndicesStatsRequest = + IndicesStatsRequest().all() + var indicesList = arrayOf() + + init { + if (pathParams.isNotBlank()) { + indicesList = pathParams.split(",").toTypedArray() + + require(validate() == null) { + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases." + } + + clusterStateRequest = clusterStateRequest.indices(*indicesList) + indicesStatsRequest = indicesStatsRequest.indices(*indicesList) + } + } + + override fun validate(): ActionRequestValidationException? { + var exception: ActionRequestValidationException? = null + if (pathParams.isNotBlank() && indicesList.any { !VALID_INDEX_NAME_REGEX.containsMatchIn(it) }) + exception = ValidateActions.addValidationError( + "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases.", + exception + ) + return exception + } +} + +class CatShardsResponseWrapper( + stateResp: ClusterStateResponse, + indicesResp: IndicesStatsResponse +) : ActionResponse(), ToXContentObject { + var shardInfoList: List = listOf() + + init { + shardInfoList = compileShardInfo(stateResp, indicesResp) + } + + companion object { + const val WRAPPER_FIELD = "shards" + } + + override fun writeTo(out: StreamOutput) { + out.writeList(shardInfoList) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + builder.startArray(WRAPPER_FIELD) + shardInfoList.forEach { it.toXContent(builder, params) } + builder.endArray() + return builder.endObject() + } + + private fun getOrNull(stats: S?, accessor: Function, func: Function): Any? { + if (stats != null) { + val t: T? = accessor.apply(stats) + if (t != null) { + return func.apply(t) + } + } + return null + } + + private fun compileShardInfo( + stateResp: ClusterStateResponse, + indicesResp: IndicesStatsResponse + ): List { + val list = mutableListOf() + + for (shard in stateResp.state.routingTable.allShards()) { + val shardStats = indicesResp.asMap()[shard] + var commonStats: CommonStats? = null + var commitStats: CommitStats? = null + if (shardStats != null) { + commonStats = shardStats.stats + commitStats = shardStats.commitStats + } + + var shardInfo = ShardInfo( + index = shard.indexName, + shard = "${shard.id}", + primaryOrReplica = if (shard.primary()) "p" else "r", + state = shard.state().name, + docs = getOrNull(commonStats, CommonStats::getDocs, DocsStats::getCount)?.toString(), + store = getOrNull(commonStats, CommonStats::getStore, StoreStats::getSize)?.toString(), + id = null, // Added below + node = null, // Added below + completionSize = getOrNull(commonStats, CommonStats::getCompletion, CompletionStats::getSize)?.toString(), + fieldDataMemory = getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getMemorySize)?.toString(), + fieldDataEvictions = getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getEvictions)?.toString(), + flushTotal = getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotal)?.toString(), + flushTotalTime = getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotalTime)?.toString(), + getCurrent = getOrNull(commonStats, CommonStats::getGet, GetStats::current)?.toString(), + getTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getTime)?.toString(), + getTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getCount)?.toString(), + getExistsTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsTime)?.toString(), + getExistsTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsCount)?.toString(), + getMissingTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingTime)?.toString(), + getMissingTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingCount)?.toString(), + indexingDeleteCurrent = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteCurrent })?.toString(), + indexingDeleteTime = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteTime })?.toString(), + indexingDeleteTotal = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteCount })?.toString(), + indexingIndexCurrent = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexCurrent })?.toString(), + indexingIndexTime = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexTime })?.toString(), + indexingIndexTotal = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexCount })?.toString(), + indexingIndexFailed = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexFailedCount })?.toString(), + mergesCurrent = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrent)?.toString(), + mergesCurrentDocs = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentNumDocs)?.toString(), + mergesCurrentSize = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentSize)?.toString(), + mergesTotal = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotal)?.toString(), + mergesTotalDocs = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalNumDocs)?.toString(), + mergesTotalSize = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalSize)?.toString(), + mergesTotalTime = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalTime)?.toString(), + queryCacheMemory = getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getMemorySize)?.toString(), + queryCacheEvictions = getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getEvictions)?.toString(), + recoverySourceType = null, // Added below + refreshTotal = getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotal)?.toString(), + refreshTime = getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotalTime)?.toString(), + searchFetchCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchCurrent })?.toString(), + searchFetchTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchTime })?.toString(), + searchFetchTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchCount })?.toString(), + searchOpenContexts = getOrNull(commonStats, CommonStats::getSearch, SearchStats::getOpenContexts)?.toString(), + searchQueryCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryCurrent })?.toString(), + searchQueryTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryTime })?.toString(), + searchQueryTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryCount })?.toString(), + searchScrollCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollCurrent })?.toString(), + searchScrollTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollTime })?.toString(), + searchScrollTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollCount })?.toString(), + segmentsCount = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)?.toString(), + segmentsMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)?.toString(), + segmentsIndexWriterMemory = + getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getIndexWriterMemory)?.toString(), + segmentsVersionMapMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getVersionMapMemory)?.toString(), + fixedBitsetMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getBitsetMemory)?.toString(), + globalCheckpoint = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getGlobalCheckpoint)?.toString(), + localCheckpoint = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getLocalCheckpoint)?.toString(), + maxSeqNo = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getMaxSeqNo)?.toString(), + syncId = commitStats?.userData?.get(Engine.SYNC_COMMIT_ID), + unassignedAt = null, // Added below + unassignedDetails = null, // Added below + unassignedFor = null, // Added below + unassignedReason = null // Added below + ) + + if (shard.assignedToNode()) { + val id = shard.currentNodeId() + val node = StringBuilder() + node.append(stateResp.state.nodes().get(id).name) + + if (shard.relocating()) { + val reloNodeId = shard.relocatingNodeId() + val reloName = stateResp.state.nodes().get(reloNodeId).name + node.append(" -> ") + node.append(reloNodeId) + node.append(" ") + node.append(reloName) + } + + shardInfo = shardInfo.copy( + id = id, + node = node.toString() + ) + } + + if (shard.unassignedInfo() != null) { + val unassignedTime = Instant.ofEpochMilli(shard.unassignedInfo().unassignedTimeInMillis) + shardInfo = shardInfo.copy( + unassignedReason = shard.unassignedInfo().reason.name, + unassignedAt = UnassignedInfo.DATE_TIME_FORMATTER.format(unassignedTime), + unassignedFor = + TimeValue.timeValueMillis(System.currentTimeMillis() - shard.unassignedInfo().unassignedTimeInMillis).stringRep, + unassignedDetails = shard.unassignedInfo().details + ) + } + + if (shard.recoverySource() != null) { + shardInfo = shardInfo.copy( + recoverySourceType = shard.recoverySource().type.toString().lowercase(Locale.ROOT) + ) + } + + list.add(shardInfo) + } + return list + } + + data class ShardInfo( + val index: String?, + val shard: String?, + val primaryOrReplica: String?, + val state: String?, + val docs: String?, + val store: String?, + val id: String?, + val node: String?, + val completionSize: String?, + val fieldDataMemory: String?, + val fieldDataEvictions: String?, + val flushTotal: String?, + val flushTotalTime: String?, + val getCurrent: String?, + val getTime: String?, + val getTotal: String?, + val getExistsTime: String?, + val getExistsTotal: String?, + val getMissingTime: String?, + val getMissingTotal: String?, + val indexingDeleteCurrent: String?, + val indexingDeleteTime: String?, + val indexingDeleteTotal: String?, + val indexingIndexCurrent: String?, + val indexingIndexTime: String?, + val indexingIndexTotal: String?, + val indexingIndexFailed: String?, + val mergesCurrent: String?, + val mergesCurrentDocs: String?, + val mergesCurrentSize: String?, + val mergesTotal: String?, + val mergesTotalDocs: String?, + val mergesTotalSize: String?, + val mergesTotalTime: String?, + val queryCacheMemory: String?, + val queryCacheEvictions: String?, + val recoverySourceType: String?, + val refreshTotal: String?, + val refreshTime: String?, + val searchFetchCurrent: String?, + val searchFetchTime: String?, + val searchFetchTotal: String?, + val searchOpenContexts: String?, + val searchQueryCurrent: String?, + val searchQueryTime: String?, + val searchQueryTotal: String?, + val searchScrollCurrent: String?, + val searchScrollTime: String?, + val searchScrollTotal: String?, + val segmentsCount: String?, + val segmentsMemory: String?, + val segmentsIndexWriterMemory: String?, + val segmentsVersionMapMemory: String?, + val fixedBitsetMemory: String?, + val globalCheckpoint: String?, + val localCheckpoint: String?, + val maxSeqNo: String?, + val syncId: String?, + val unassignedAt: String?, + val unassignedDetails: String?, + val unassignedFor: String?, + val unassignedReason: String? + ) : ToXContentObject, Writeable { + companion object { + const val INDEX_FIELD = "index" + const val SHARD_FIELD = "shard" + const val PRIMARY_OR_REPLICA_FIELD = "primaryOrReplica" + const val STATE_FIELD = "state" + const val DOCS_FIELD = "docs" + const val STORE_FIELD = "store" + const val ID_FIELD = "id" + const val NODE_FIELD = "node" + const val COMPLETION_SIZE_FIELD = "completionSize" + const val FIELD_DATA_MEMORY_FIELD = "fielddataMemory" + const val FIELD_DATA_EVICTIONS_FIELD = "fielddataEvictions" + const val FLUSH_TOTAL_FIELD = "flushTotal" + const val FLUSH_TOTAL_TIME_FIELD = "flushTotalTime" + const val GET_CURRENT_FIELD = "getCurrent" + const val GET_TIME_FIELD = "getTime" + const val GET_TOTAL_FIELD = "getTotal" + const val GET_EXISTS_TIME_FIELD = "getExistsTime" + const val GET_EXISTS_TOTAL_FIELD = "getExistsTotal" + const val GET_MISSING_TIME_FIELD = "getMissingTime" + const val GET_MISSING_TOTAL_FIELD = "getMissingTotal" + const val INDEXING_DELETE_CURRENT_FIELD = "indexingDeleteCurrent" + const val INDEXING_DELETE_TIME_FIELD = "indexingDeleteTime" + const val INDEXING_DELETE_TOTAL_FIELD = "indexingDeleteTotal" + const val INDEXING_INDEX_CURRENT_FIELD = "indexingIndexCurrent" + const val INDEXING_INDEX_TIME_FIELD = "indexingIndexTime" + const val INDEXING_INDEX_TOTAL_FIELD = "indexingIndexTotal" + const val INDEXING_INDEX_FAILED_FIELD = "indexingIndexFailed" + const val MERGES_CURRENT_FIELD = "mergesCurrent" + const val MERGES_CURRENT_DOCS_FIELD = "mergesCurrentDocs" + const val MERGES_CURRENT_SIZE_FIELD = "mergesCurrentSize" + const val MERGES_TOTAL_FIELD = "mergesTotal" + const val MERGES_TOTAL_DOCS_FIELD = "mergesTotalDocs" + const val MERGES_TOTAL_SIZE_FIELD = "mergesTotalSize" + const val MERGES_TOTAL_TIME_FIELD = "mergesTotalTime" + const val QUERY_CACHE_MEMORY_FIELD = "queryCacheMemory" + const val QUERY_CACHE_EVICTIONS_FIELD = "queryCacheEvictions" + const val RECOVERY_SOURCE_TYPE_FIELD = "recoverysource.type" + const val REFRESH_TOTAL_FIELD = "refreshTotal" + const val REFRESH_TIME_FIELD = "refreshTime" + const val SEARCH_FETCH_CURRENT_FIELD = "searchFetchCurrent" + const val SEARCH_FETCH_TIME_FIELD = "searchFetchTime" + const val SEARCH_FETCH_TOTAL_FIELD = "searchFetchTotal" + const val SEARCH_OPEN_CONTEXTS_FIELD = "searchOpenContexts" + const val SEARCH_QUERY_CURRENT_FIELD = "searchQueryCurrent" + const val SEARCH_QUERY_TIME_FIELD = "searchQueryTime" + const val SEARCH_QUERY_TOTAL_FIELD = "searchQueryTotal" + const val SEARCH_SCROLL_CURRENT_FIELD = "searchScrollCurrent" + const val SEARCH_SCROLL_TIME_FIELD = "searchScrollTime" + const val SEARCH_SCROLL_TOTAL_FIELD = "searchScrollTotal" + const val SEGMENTS_COUNT_FIELD = "segmentsCount" + const val SEGMENTS_MEMORY_FIELD = "segmentsMemory" + const val SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "segmentsIndexWriterMemory" + const val SEGMENTS_VERSION_MAP_MEMORY_FIELD = "segmentsVersionMapMemory" + const val FIXED_BITSET_MEMORY_FIELD = "fixedBitsetMemory" + const val GLOBAL_CHECKPOINT_FIELD = "globalCheckpoint" + const val LOCAL_CHECKPOINT_FIELD = "localCheckpoint" + const val MAX_SEQ_NO_FIELD = "maxSeqNo" + const val SYNC_ID_FIELD = "sync_id" + const val UNASSIGNED_AT_FIELD = "unassigned.at" + const val UNASSIGNED_DETAILS_FIELD = "unassigned.details" + const val UNASSIGNED_FOR_FIELD = "unassigned.for" + const val UNASSIGNED_REASON_FIELD = "unassigned.reason" + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.startObject() + .field(INDEX_FIELD, index) + .field(SHARD_FIELD, shard) + .field(PRIMARY_OR_REPLICA_FIELD, primaryOrReplica) + .field(STATE_FIELD, state) + .field(DOCS_FIELD, docs) + .field(STORE_FIELD, store) + .field(ID_FIELD, id) + .field(NODE_FIELD, node) + .field(COMPLETION_SIZE_FIELD, completionSize) + .field(FIELD_DATA_MEMORY_FIELD, fieldDataMemory) + .field(FIELD_DATA_EVICTIONS_FIELD, fieldDataEvictions) + .field(FLUSH_TOTAL_FIELD, flushTotal) + .field(FLUSH_TOTAL_TIME_FIELD, flushTotalTime) + .field(GET_CURRENT_FIELD, getCurrent) + .field(GET_TIME_FIELD, getTime) + .field(GET_TOTAL_FIELD, getTotal) + .field(GET_EXISTS_TIME_FIELD, getExistsTime) + .field(GET_EXISTS_TOTAL_FIELD, getExistsTotal) + .field(GET_MISSING_TIME_FIELD, getMissingTime) + .field(GET_MISSING_TOTAL_FIELD, getMissingTotal) + .field(INDEXING_DELETE_CURRENT_FIELD, indexingDeleteCurrent) + .field(INDEXING_DELETE_TIME_FIELD, indexingDeleteTime) + .field(INDEXING_DELETE_TOTAL_FIELD, indexingDeleteTotal) + .field(INDEXING_INDEX_CURRENT_FIELD, indexingIndexCurrent) + .field(INDEXING_INDEX_TIME_FIELD, indexingIndexTime) + .field(INDEXING_INDEX_TOTAL_FIELD, indexingIndexTotal) + .field(INDEXING_INDEX_FAILED_FIELD, indexingIndexFailed) + .field(MERGES_CURRENT_FIELD, mergesCurrent) + .field(MERGES_CURRENT_DOCS_FIELD, mergesCurrentDocs) + .field(MERGES_CURRENT_SIZE_FIELD, mergesCurrentSize) + .field(MERGES_TOTAL_FIELD, mergesTotal) + .field(MERGES_TOTAL_DOCS_FIELD, mergesTotalDocs) + .field(MERGES_TOTAL_SIZE_FIELD, mergesTotalSize) + .field(MERGES_TOTAL_TIME_FIELD, mergesTotalTime) + .field(QUERY_CACHE_MEMORY_FIELD, queryCacheMemory) + .field(QUERY_CACHE_EVICTIONS_FIELD, queryCacheEvictions) + .field(RECOVERY_SOURCE_TYPE_FIELD, recoverySourceType) + .field(REFRESH_TOTAL_FIELD, refreshTotal) + .field(REFRESH_TIME_FIELD, refreshTime) + .field(SEARCH_FETCH_CURRENT_FIELD, searchFetchCurrent) + .field(SEARCH_FETCH_TIME_FIELD, searchFetchTime) + .field(SEARCH_FETCH_TOTAL_FIELD, searchFetchTotal) + .field(SEARCH_OPEN_CONTEXTS_FIELD, searchOpenContexts) + .field(SEARCH_QUERY_CURRENT_FIELD, searchQueryCurrent) + .field(SEARCH_QUERY_TIME_FIELD, searchQueryTime) + .field(SEARCH_QUERY_TOTAL_FIELD, searchQueryTotal) + .field(SEARCH_SCROLL_CURRENT_FIELD, searchScrollCurrent) + .field(SEARCH_SCROLL_TIME_FIELD, searchScrollTime) + .field(SEARCH_SCROLL_TOTAL_FIELD, searchScrollTotal) + .field(SEGMENTS_COUNT_FIELD, segmentsCount) + .field(SEGMENTS_MEMORY_FIELD, segmentsMemory) + .field(SEGMENTS_INDEX_WRITER_MEMORY_FIELD, segmentsIndexWriterMemory) + .field(SEGMENTS_VERSION_MAP_MEMORY_FIELD, segmentsVersionMapMemory) + .field(FIXED_BITSET_MEMORY_FIELD, fixedBitsetMemory) + .field(GLOBAL_CHECKPOINT_FIELD, globalCheckpoint) + .field(LOCAL_CHECKPOINT_FIELD, localCheckpoint) + .field(MAX_SEQ_NO_FIELD, maxSeqNo) + .field(SYNC_ID_FIELD, syncId) + .field(UNASSIGNED_AT_FIELD, unassignedAt) + .field(UNASSIGNED_DETAILS_FIELD, unassignedDetails) + .field(UNASSIGNED_FOR_FIELD, unassignedFor) + .field(UNASSIGNED_REASON_FIELD, unassignedReason) + return builder.endObject() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(index) + out.writeString(shard) + out.writeString(primaryOrReplica) + out.writeString(state) + out.writeString(docs) + out.writeString(store) + out.writeString(id) + out.writeString(node) + out.writeString(completionSize) + out.writeString(fieldDataMemory) + out.writeString(fieldDataEvictions) + out.writeString(flushTotal) + out.writeString(flushTotalTime) + out.writeString(getCurrent) + out.writeString(getTime) + out.writeString(getTotal) + out.writeString(getExistsTime) + out.writeString(getExistsTotal) + out.writeString(getMissingTime) + out.writeString(getMissingTotal) + out.writeString(indexingDeleteCurrent) + out.writeString(indexingDeleteTime) + out.writeString(indexingDeleteTotal) + out.writeString(indexingIndexCurrent) + out.writeString(indexingIndexTime) + out.writeString(indexingIndexTotal) + out.writeString(indexingIndexFailed) + out.writeString(mergesCurrent) + out.writeString(mergesCurrentDocs) + out.writeString(mergesCurrentSize) + out.writeString(mergesTotal) + out.writeString(mergesTotalDocs) + out.writeString(mergesTotalSize) + out.writeString(mergesTotalTime) + out.writeString(queryCacheMemory) + out.writeString(queryCacheEvictions) + out.writeString(recoverySourceType) + out.writeString(refreshTotal) + out.writeString(refreshTime) + out.writeString(searchFetchCurrent) + out.writeString(searchFetchTime) + out.writeString(searchFetchTotal) + out.writeString(searchOpenContexts) + out.writeString(searchQueryCurrent) + out.writeString(searchQueryTime) + out.writeString(searchQueryTotal) + out.writeString(searchScrollCurrent) + out.writeString(searchScrollTime) + out.writeString(searchScrollTotal) + out.writeString(segmentsCount) + out.writeString(segmentsMemory) + out.writeString(segmentsIndexWriterMemory) + out.writeString(segmentsVersionMapMemory) + out.writeString(fixedBitsetMemory) + out.writeString(globalCheckpoint) + out.writeString(localCheckpoint) + out.writeString(maxSeqNo) + out.writeString(syncId) + out.writeString(unassignedAt) + out.writeString(unassignedDetails) + out.writeString(unassignedFor) + out.writeString(unassignedReason) + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt new file mode 100644 index 000000000..92605c652 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt @@ -0,0 +1,171 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest +import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse +import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.cluster.state.ClusterStateResponse +import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest +import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest +import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse +import org.opensearch.action.admin.indices.recovery.RecoveryRequest +import org.opensearch.action.admin.indices.recovery.RecoveryResponse +import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse +import org.opensearch.alerting.opensearchapi.convertToMap +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.settings.SupportedClusterMetricsSettings +import org.opensearch.alerting.settings.SupportedClusterMetricsSettings.Companion.resolveToActionRequest +import org.opensearch.client.Client +import org.opensearch.cluster.metadata.Metadata +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.support.XContentMapValues +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.core.action.ActionResponse + +/** + * Calls the appropriate transport action for the API requested in the [clusterMetricsInput]. + * @param clusterMetricsInput The [ClusterMetricsInput] to resolve. + * @param client The [Client] used to call the respective transport action. + * @throws IllegalArgumentException When the requested API is not supported by this feature. + */ +suspend fun executeTransportAction(clusterMetricsInput: ClusterMetricsInput, client: Client): ActionResponse { + val request = resolveToActionRequest(clusterMetricsInput) + return when (clusterMetricsInput.clusterMetricType) { + ClusterMetricsInput.ClusterMetricType.CAT_INDICES -> { + request as CatIndicesRequestWrapper + val healthResponse: ClusterHealthResponse = client.suspendUntil { admin().cluster().health(request.clusterHealthRequest, it) } + val indexSettingsResponse: GetSettingsResponse = + client.suspendUntil { admin().indices().getSettings(request.indexSettingsRequest, it) } + val indicesResponse: IndicesStatsResponse = + client.suspendUntil { admin().indices().stats(request.indicesStatsRequest, it) } + val stateResponse: ClusterStateResponse = + client.suspendUntil { admin().cluster().state(request.clusterStateRequest, it) } + return CatIndicesResponseWrapper(healthResponse, stateResponse, indexSettingsResponse, indicesResponse) + } + ClusterMetricsInput.ClusterMetricType.CAT_PENDING_TASKS -> + client.suspendUntil { + admin().cluster().pendingClusterTasks(request as PendingClusterTasksRequest, it) + } + ClusterMetricsInput.ClusterMetricType.CAT_RECOVERY -> + client.suspendUntil { admin().indices().recoveries(request as RecoveryRequest, it) } + ClusterMetricsInput.ClusterMetricType.CAT_SHARDS -> { + request as CatShardsRequestWrapper + val stateResponse: ClusterStateResponse = + client.suspendUntil { admin().cluster().state(request.clusterStateRequest, it) } + val indicesResponse: IndicesStatsResponse = + client.suspendUntil { admin().indices().stats(request.indicesStatsRequest, it) } + return CatShardsResponseWrapper(stateResponse, indicesResponse) + } + ClusterMetricsInput.ClusterMetricType.CAT_SNAPSHOTS -> + client.suspendUntil { admin().cluster().getSnapshots(request as GetSnapshotsRequest, it) } + ClusterMetricsInput.ClusterMetricType.CAT_TASKS -> + client.suspendUntil { admin().cluster().listTasks(request as ListTasksRequest, it) } + ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH -> + client.suspendUntil { admin().cluster().health(request as ClusterHealthRequest, it) } + ClusterMetricsInput.ClusterMetricType.CLUSTER_SETTINGS -> { + val stateResponse: ClusterStateResponse = + client.suspendUntil { admin().cluster().state(request as ClusterStateRequest, it) } + val metadata: Metadata = stateResponse.state.metadata + return ClusterGetSettingsResponse(metadata.persistentSettings(), metadata.transientSettings(), Settings.EMPTY) + } + ClusterMetricsInput.ClusterMetricType.CLUSTER_STATS -> + client.suspendUntil { admin().cluster().clusterStats(request as ClusterStatsRequest, it) } + ClusterMetricsInput.ClusterMetricType.NODES_STATS -> + client.suspendUntil { admin().cluster().nodesStats(request as NodesStatsRequest, it) } + else -> throw IllegalArgumentException("Unsupported API request type: ${request.javaClass.name}") + } +} + +/** + * Populates a [HashMap] with the values in the [ActionResponse]. + * @return The [ActionResponse] values formatted in a [HashMap]. + * @throws IllegalArgumentException when the [ActionResponse] is not supported by this feature. + */ +fun ActionResponse.toMap(): Map { + return when (this) { + is ClusterHealthResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH.defaultPath) + ) + is ClusterStatsResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_STATS.defaultPath) + ) + is ClusterGetSettingsResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_SETTINGS.defaultPath) + ) + is CatIndicesResponseWrapper -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_INDICES.defaultPath) + ) + is CatShardsResponseWrapper -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_SHARDS.defaultPath) + ) + is NodesStatsResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.NODES_STATS.defaultPath) + ) + is PendingClusterTasksResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_PENDING_TASKS.defaultPath) + ) + is RecoveryResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_RECOVERY.defaultPath) + ) + is GetSnapshotsResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_SNAPSHOTS.defaultPath) + ) + is ListTasksResponse -> redactFieldsFromResponse( + this.convertToMap(), + SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_TASKS.defaultPath) + ) + else -> throw IllegalArgumentException("Unsupported ActionResponse type: ${this.javaClass.name}") + } +} + +/** + * Populates a [HashMap] with only the values that support being exposed to users. + * @param mappedActionResponse The response from the [ClusterMetricsInput] API call. + * @param supportedJsonPayload The JSON payload as configured in [SupportedClusterMetricsSettings.RESOURCE_FILE]. + * @return The response values [HashMap] without the redacted fields. + */ +@Suppress("UNCHECKED_CAST") +fun redactFieldsFromResponse( + mappedActionResponse: Map, + supportedJsonPayload: Map> +): Map { + return when { + supportedJsonPayload.isEmpty() -> mappedActionResponse + else -> { + val output = hashMapOf() + for ((key, value) in supportedJsonPayload) { + when (val mappedValue = mappedActionResponse[key]) { + is Map<*, *> -> output[key] = XContentMapValues.filter( + mappedActionResponse[key] as MutableMap?, + value.toTypedArray(), + arrayOf() + ) + else -> output[key] = mappedValue ?: hashMapOf() + } + } + output + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt new file mode 100644 index 000000000..667548c60 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt @@ -0,0 +1,184 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.destinationmigration + +import org.apache.http.client.utils.URIBuilder +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.Recipient +import org.opensearch.alerting.util.DestinationType +import org.opensearch.commons.notifications.model.Chime +import org.opensearch.commons.notifications.model.ConfigType +import org.opensearch.commons.notifications.model.Email +import org.opensearch.commons.notifications.model.EmailGroup +import org.opensearch.commons.notifications.model.EmailRecipient +import org.opensearch.commons.notifications.model.HttpMethodType +import org.opensearch.commons.notifications.model.MethodType +import org.opensearch.commons.notifications.model.NotificationConfig +import org.opensearch.commons.notifications.model.Slack +import org.opensearch.commons.notifications.model.SmtpAccount +import org.opensearch.commons.notifications.model.Webhook +import org.opensearch.core.common.Strings +import java.net.URI +import java.net.URISyntaxException +import java.util.Locale + +class DestinationConversionUtils { + + companion object { + + fun convertDestinationToNotificationConfig(destination: Destination): NotificationConfig? { + when (destination.type) { + DestinationType.CHIME -> { + val alertChime = destination.chime ?: return null + val chime = Chime(alertChime.url) + val description = "Chime destination created from the Alerting plugin" + return NotificationConfig( + destination.name, + description, + ConfigType.CHIME, + chime + ) + } + DestinationType.SLACK -> { + val alertSlack = destination.slack ?: return null + val slack = Slack(alertSlack.url) + val description = "Slack destination created from the Alerting plugin" + return NotificationConfig( + destination.name, + description, + ConfigType.SLACK, + slack + ) + } + // TODO: Add this back after adding SNS to Destination data models +// DestinationType.SNS -> { +// val alertSNS = destination.sns ?: return null +// val sns = Sns(alertSNS.topicARN, alertSNS.roleARN) +// val description = "SNS destination created from the Alerting plugin" +// return NotificationConfig( +// destination.name, +// description, +// ConfigType.SNS, +// sns +// ) +// } + DestinationType.CUSTOM_WEBHOOK -> { + val alertWebhook = destination.customWebhook ?: return null + val uri = buildUri( + alertWebhook.url, + alertWebhook.scheme, + alertWebhook.host, + alertWebhook.port, + alertWebhook.path, + alertWebhook.queryParams + ).toString() + val methodType = when (alertWebhook.method?.uppercase(Locale.ENGLISH)) { + "POST" -> HttpMethodType.POST + "PUT" -> HttpMethodType.PUT + "PATCH" -> HttpMethodType.PATCH + else -> HttpMethodType.POST + } + val webhook = Webhook(uri, alertWebhook.headerParams, methodType) + val description = "Webhook destination created from the Alerting plugin" + return NotificationConfig( + destination.name, + description, + ConfigType.WEBHOOK, + webhook + ) + } + DestinationType.EMAIL -> { + val alertEmail = destination.email ?: return null + val recipients = mutableListOf() + val emailGroupIds = mutableListOf() + alertEmail.recipients.forEach { + if (it.type == Recipient.RecipientType.EMAIL_GROUP) { + it.emailGroupID?.let { emailGroup -> emailGroupIds.add(emailGroup) } + } else it.email?.let { emailRecipient -> recipients.add(EmailRecipient(emailRecipient)) } + } + + val email = Email(alertEmail.emailAccountID, recipients, emailGroupIds) + val description = "Email destination created from the Alerting plugin" + return NotificationConfig( + destination.name, + description, + ConfigType.EMAIL, + email + ) + } + else -> return null + } + } + + fun convertEmailAccountToNotificationConfig(emailAccount: EmailAccount): NotificationConfig { + val methodType = convertAlertingToNotificationMethodType(emailAccount.method) + val smtpAccount = SmtpAccount(emailAccount.host, emailAccount.port, methodType, emailAccount.email) + val description = "Email account created from the Alerting plugin" + return NotificationConfig( + emailAccount.name, + description, + ConfigType.SMTP_ACCOUNT, + smtpAccount + ) + } + + fun convertEmailGroupToNotificationConfig( + emailGroup: org.opensearch.alerting.model.destination.email.EmailGroup + ): NotificationConfig { + val recipients = mutableListOf() + emailGroup.emails.forEach { + recipients.add(EmailRecipient(it.email)) + } + val notificationEmailGroup = EmailGroup(recipients) + + val description = "Email group created from the Alerting plugin" + return NotificationConfig( + emailGroup.name, + description, + ConfigType.EMAIL_GROUP, + notificationEmailGroup + ) + } + + private fun buildUri( + endpoint: String?, + scheme: String?, + host: String?, + port: Int, + path: String?, + queryParams: Map + ): URI? { + return try { + if (Strings.isNullOrEmpty(endpoint)) { + if (host == null) { + throw IllegalStateException("No host was provided when endpoint was null") + } + var uriScheme = scheme + if (Strings.isNullOrEmpty(scheme)) { + uriScheme = "https" + } + val uriBuilder = URIBuilder() + if (queryParams.isNotEmpty()) { + for ((key, value) in queryParams) uriBuilder.addParameter(key, value) + } + return uriBuilder.setScheme(uriScheme).setHost(host).setPort(port).setPath(path).build() + } + URIBuilder(endpoint).build() + } catch (e: URISyntaxException) { + throw IllegalStateException("Error creating URI", e) + } + } + + fun convertAlertingToNotificationMethodType(alertMethodType: EmailAccount.MethodType): MethodType { + return when (alertMethodType) { + EmailAccount.MethodType.NONE -> MethodType.NONE + EmailAccount.MethodType.SSL -> MethodType.SSL + EmailAccount.MethodType.TLS -> MethodType.START_TLS + } + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt new file mode 100644 index 000000000..82891396e --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt @@ -0,0 +1,106 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.destinationmigration + +import kotlinx.coroutines.CoroutineName +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.launch +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.client.Client +import org.opensearch.client.node.NodeClient +import org.opensearch.cluster.ClusterChangedEvent +import org.opensearch.cluster.ClusterStateListener +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.lifecycle.LifecycleListener +import org.opensearch.common.unit.TimeValue +import org.opensearch.threadpool.Scheduler +import org.opensearch.threadpool.ThreadPool +import kotlin.coroutines.CoroutineContext + +class DestinationMigrationCoordinator( + private val client: Client, + private val clusterService: ClusterService, + private val threadPool: ThreadPool, + private val scheduledJobIndices: ScheduledJobIndices +) : ClusterStateListener, CoroutineScope, LifecycleListener() { + + private val logger = LogManager.getLogger(javaClass) + + override val coroutineContext: CoroutineContext + get() = Dispatchers.Default + CoroutineName("DestinationMigrationCoordinator") + + private var scheduledMigration: Scheduler.Cancellable? = null + + @Volatile + private var runningLock = false + + init { + clusterService.addListener(this) + clusterService.addLifecycleListener(this) + } + + override fun clusterChanged(event: ClusterChangedEvent) { + logger.info("Detected cluster change event for destination migration") + if (DestinationMigrationUtilService.finishFlag) { + logger.info("Reset destination migration process.") + scheduledMigration?.cancel() + DestinationMigrationUtilService.finishFlag = false + } + if ( + event.localNodeClusterManager() && + !runningLock && + (scheduledMigration == null || scheduledMigration!!.isCancelled) + ) { + try { + runningLock = true + initMigrateDestinations() + } finally { + runningLock = false + } + } else if (!event.localNodeClusterManager()) { + scheduledMigration?.cancel() + } + } + + private fun initMigrateDestinations() { + if (!scheduledJobIndices.scheduledJobIndexExists()) { + logger.debug("Alerting config index is not initialized") + scheduledMigration?.cancel() + return + } + + if (!clusterService.state().nodes().isLocalNodeElectedMaster) { + scheduledMigration?.cancel() + return + } + + if (DestinationMigrationUtilService.finishFlag) { + logger.info("Destination migration is already complete, cancelling migration process.") + scheduledMigration?.cancel() + return + } + + val scheduledJob = Runnable { + launch { + try { + if (DestinationMigrationUtilService.finishFlag) { + logger.info("Cancel background destination migration process.") + scheduledMigration?.cancel() + } + + logger.info("Performing migration of destination data.") + DestinationMigrationUtilService.migrateDestinations(client as NodeClient) + } catch (e: Exception) { + logger.error("Failed to migrate destination data", e) + } + } + } + + scheduledMigration = threadPool.scheduleWithFixedDelay(scheduledJob, TimeValue.timeValueMinutes(1), ThreadPool.Names.MANAGEMENT) + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt new file mode 100644 index 000000000..f4c650a9f --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt @@ -0,0 +1,228 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.destinationmigration + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.delete.DeleteRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertDestinationToNotificationConfig +import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertEmailAccountToNotificationConfig +import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertEmailGroupToNotificationConfig +import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils.Companion.createNotificationConfig +import org.opensearch.client.node.NodeClient +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.notifications.action.CreateNotificationConfigRequest +import org.opensearch.commons.notifications.model.NotificationConfig +import org.opensearch.commons.notifications.model.NotificationConfigInfo +import org.opensearch.core.common.Strings +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext +import java.time.Instant + +class DestinationMigrationUtilService { + + companion object { + + private val logger = LogManager.getLogger(DestinationMigrationUtilService::class) + + @Volatile + private var runningLock = false // In case 2 migrateDestinations() processes are running + + // Used in DestinationMigrationCoordinator to cancel scheduled process + @Volatile + var finishFlag = false + internal set + + suspend fun migrateDestinations(client: NodeClient) { + if (runningLock) { + logger.info("There is already a migrate destination process running...") + return + } else if (finishFlag) { + logger.info("Destination migration has finished.") + return + } + try { + runningLock = true + + val emailAccountsToMigrate = retrieveConfigsToMigrate(client, "email_account") + val emailGroupsToMigrate = retrieveConfigsToMigrate(client, "email_group") + val destinationsToMigrate = retrieveConfigsToMigrate(client, "destination") + val configsToMigrate = emailAccountsToMigrate + emailGroupsToMigrate + destinationsToMigrate + logger.info( + "Need to migrate ${emailAccountsToMigrate.size} email accounts, " + + "${emailGroupsToMigrate.size} email groups and " + + "${destinationsToMigrate.size} destinations " + + "(${configsToMigrate.size} configs total)" + ) + if (configsToMigrate.isEmpty()) { + finishFlag = true + runningLock = false + return + } + val migratedConfigs = createNotificationChannelIfNotExists(client, configsToMigrate) + logger.info("Migrated ${migratedConfigs.size} configs") + val failedDeleteConfigs = deleteOldDestinations(client, migratedConfigs) + logger.info("Failed to delete ${failedDeleteConfigs.size} configs from migration process cleanup") + } finally { + runningLock = false + } + } + + private suspend fun deleteOldDestinations(client: NodeClient, destinationIds: List): List { + val bulkDeleteRequest = BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + destinationIds.forEach { + val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, it) + bulkDeleteRequest.add(deleteRequest) + } + + val failedToDeleteDestinations = mutableListOf() + try { + val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkDeleteRequest, it) } + failedToDeleteDestinations.addAll(bulkResponse.items.filter { it.isFailed }.map { it.id }) + } catch (e: Exception) { + logger.error("Failed to delete all destinations", e) + failedToDeleteDestinations.addAll(destinationIds) + } + return failedToDeleteDestinations + } + + private suspend fun createNotificationChannelIfNotExists( + client: NodeClient, + notificationConfigInfoList: List> + ): List { + val migratedNotificationConfigs = mutableListOf() + notificationConfigInfoList.forEach { + val notificationConfigInfo = it.first + val userStr = it.second + val createNotificationConfigRequest = CreateNotificationConfigRequest( + notificationConfigInfo.notificationConfig, + notificationConfigInfo.configId + ) + try { + // TODO: recreate user object to pass along the same permissions. Make sure this works when user based security is removed + client.threadPool().threadContext.stashContext().use { + if (userStr.isNotBlank()) { + client.threadPool().threadContext + .putTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT, userStr) + } + val createResponse = createNotificationConfig(client, createNotificationConfigRequest) + migratedNotificationConfigs.add(createResponse.configId) + logger.debug(("Migrated destination: ${createResponse.configId}")) + } + } catch (e: Exception) { + if (e.message?.contains("version conflict, document already exists") == true) { + migratedNotificationConfigs.add(notificationConfigInfo.configId) + } else { + logger.warn( + "Failed to migrate over Destination ${notificationConfigInfo.configId} because failed to " + + "create channel in Notification plugin.", + e + ) + } + } + } + return migratedNotificationConfigs + } + + private suspend fun retrieveConfigsToMigrate(client: NodeClient, configName: String): List> { + var start = 0 + val size = 100 + val notificationConfigInfoList = mutableListOf>() + var hasMoreResults = true + + while (hasMoreResults) { + val searchSourceBuilder = SearchSourceBuilder() + .size(size) + .from(start) + .fetchSource(FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)) + .seqNoAndPrimaryTerm(true) + .version(true) + val queryBuilder = QueryBuilders.boolQuery() + .should(QueryBuilders.existsQuery(configName)) + searchSourceBuilder.query(queryBuilder) + + val searchRequest = SearchRequest() + .source(searchSourceBuilder) + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + val response: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } + + if (response.status() != RestStatus.OK) { + logger.error("Failed to retrieve ${configName}s to migrate") + hasMoreResults = false + } else { + if (response.hits.hits.isEmpty()) { + hasMoreResults = false + } + for (hit in response.hits) { + val xcp = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) + var notificationConfig: NotificationConfig? = null + var userStr = "" + when (configName) { + "email_group" -> { + val emailGroup = EmailGroup.parseWithType(xcp, hit.id, hit.version) + notificationConfig = convertEmailGroupToNotificationConfig(emailGroup) + } + "email_account" -> { + val emailAccount = EmailAccount.parseWithType(xcp, hit.id, hit.version) + notificationConfig = convertEmailAccountToNotificationConfig(emailAccount) + } + "destination" -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val destination = Destination.parse( + xcp, + hit.id, + hit.version, + hit.seqNo.toInt(), + hit.primaryTerm.toInt() + ) + userStr = destination.user.toString() + notificationConfig = convertDestinationToNotificationConfig(destination) + } + else -> logger.info("Unrecognized config name [$configName] to migrate") + } + + if (notificationConfig != null) { + notificationConfigInfoList.add( + Pair( + NotificationConfigInfo( + hit.id, + Instant.now(), + Instant.now(), + notificationConfig + ), + userStr + ) + ) + } + } + } + + start += size + } + + return notificationConfigInfoList + } + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt new file mode 100644 index 000000000..9d77e5b17 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt @@ -0,0 +1,172 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.destinationmigration + +import org.apache.logging.log4j.LogManager +import org.opensearch.OpenSearchSecurityException +import org.opensearch.OpenSearchStatusException +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.opensearchapi.retryForNotification +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.client.Client +import org.opensearch.client.node.NodeClient +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.ConfigConstants +import org.opensearch.commons.destination.message.LegacyBaseMessage +import org.opensearch.commons.notifications.NotificationsPluginInterface +import org.opensearch.commons.notifications.action.CreateNotificationConfigRequest +import org.opensearch.commons.notifications.action.CreateNotificationConfigResponse +import org.opensearch.commons.notifications.action.GetNotificationConfigRequest +import org.opensearch.commons.notifications.action.GetNotificationConfigResponse +import org.opensearch.commons.notifications.action.LegacyPublishNotificationRequest +import org.opensearch.commons.notifications.action.LegacyPublishNotificationResponse +import org.opensearch.commons.notifications.action.SendNotificationResponse +import org.opensearch.commons.notifications.model.ChannelMessage +import org.opensearch.commons.notifications.model.EventSource +import org.opensearch.commons.notifications.model.NotificationConfigInfo +import org.opensearch.commons.notifications.model.SeverityType +import org.opensearch.core.rest.RestStatus + +class NotificationApiUtils { + + companion object { + + private val logger = LogManager.getLogger(NotificationApiUtils::class) + + private val defaultRetryPolicy = + BackoffPolicy.constantBackoff(TimeValue.timeValueMillis(100), 2) + + /** + * Gets a NotificationConfigInfo object by ID if it exists. + */ + suspend fun getNotificationConfigInfo(client: NodeClient, id: String): NotificationConfigInfo? { + return try { + val res: GetNotificationConfigResponse = getNotificationConfig(client, GetNotificationConfigRequest(setOf(id))) + res.searchResult.objectList.firstOrNull() + } catch (e: OpenSearchSecurityException) { + throw e + } catch (e: OpenSearchStatusException) { + if (e.status() == RestStatus.NOT_FOUND) { + logger.debug("Notification config [$id] was not found") + } + null + } + } + + private suspend fun getNotificationConfig( + client: NodeClient, + getNotificationConfigRequest: GetNotificationConfigRequest + ): GetNotificationConfigResponse { + val getNotificationConfigResponse: GetNotificationConfigResponse = NotificationsPluginInterface.suspendUntil { + this.getNotificationConfig( + client, + getNotificationConfigRequest, + it + ) + } + return getNotificationConfigResponse + } + + suspend fun createNotificationConfig( + client: NodeClient, + createNotificationConfigRequest: CreateNotificationConfigRequest, + retryPolicy: BackoffPolicy = defaultRetryPolicy + ): CreateNotificationConfigResponse { + lateinit var createNotificationConfigResponse: CreateNotificationConfigResponse + val userStr = client.threadPool().threadContext + .getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) + client.threadPool().threadContext.stashContext().use { + client.threadPool().threadContext.putTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT, userStr) + retryPolicy.retryForNotification(logger) { + createNotificationConfigResponse = NotificationsPluginInterface.suspendUntil { + this.createNotificationConfig( + client, + createNotificationConfigRequest, + it + ) + } + } + } + return createNotificationConfigResponse + } + } +} + +/** + * Extension function for publishing a notification to a legacy destination. + * + * We now support the new channels from the Notification plugin. However, we still need to support + * the old legacy destinations that have not been migrated to Notification configs. To accommodate this even after removing the + * notification logic in Alerting, we have a separate API in the NotificationsPluginInterface that allows + * us to publish these old legacy ones directly. + */ +suspend fun LegacyBaseMessage.publishLegacyNotification(client: Client): String { + val baseMessage = this + val res: LegacyPublishNotificationResponse = NotificationsPluginInterface.suspendUntil { + this.publishLegacyNotification( + (client as NodeClient), + LegacyPublishNotificationRequest(baseMessage), + it + ) + } + validateResponseStatus(RestStatus.fromCode(res.destinationResponse.statusCode), res.destinationResponse.responseContent) + return res.destinationResponse.responseContent +} + +/** + * Extension function for publishing a notification to a channel in the Notification plugin. + */ +suspend fun NotificationConfigInfo.sendNotification(client: Client, title: String, compiledMessage: String): String { + val config = this + val res: SendNotificationResponse = NotificationsPluginInterface.suspendUntil { + this.sendNotification( + (client as NodeClient), + EventSource(title, config.configId, SeverityType.INFO), + ChannelMessage(compiledMessage, null, null), + listOf(config.configId), + it + ) + } + validateResponseStatus(res.getStatus(), res.notificationEvent.toString()) + return res.notificationEvent.toString() +} + +/** + * A placeholder Alerting title will be used if no subject is passed in. + */ +fun NotificationConfigInfo.getTitle(subject: String?): String { + val defaultTitle = "Alerting-Notification Action" + return if (subject.isNullOrEmpty()) defaultTitle else subject +} + +/** + * All valid response statuses. + */ +private val VALID_RESPONSE_STATUS = setOf( + RestStatus.OK.status, + RestStatus.CREATED.status, + RestStatus.ACCEPTED.status, + RestStatus.NON_AUTHORITATIVE_INFORMATION.status, + RestStatus.NO_CONTENT.status, + RestStatus.RESET_CONTENT.status, + RestStatus.PARTIAL_CONTENT.status, + RestStatus.MULTI_STATUS.status +) + +@Throws(OpenSearchStatusException::class) +fun validateResponseStatus(restStatus: RestStatus, responseContent: String) { + if (!VALID_RESPONSE_STATUS.contains(restStatus.status)) { + throw OpenSearchStatusException("Failed: $responseContent", restStatus) + } +} + +/** + * Small data class used to hold either a Destination or a Notification channel config. + * This is used since an ID being referenced in a Monitor action could be either config depending on if + * it's prior to or after migration. + */ +data class NotificationActionConfigs(val destination: Destination?, val channel: NotificationConfigInfo?) diff --git a/alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt b/alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt new file mode 100644 index 000000000..94e8b9bc3 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt @@ -0,0 +1,395 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.workflow + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.BucketLevelMonitorRunner +import org.opensearch.alerting.DocumentLevelMonitorRunner +import org.opensearch.alerting.MonitorRunnerExecutionContext +import org.opensearch.alerting.QueryLevelMonitorRunner +import org.opensearch.alerting.WorkflowMetadataService +import org.opensearch.alerting.model.ChainedAlertTriggerRunResult +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.isDocLevelMonitor +import org.opensearch.alerting.util.isQueryLevelMonitor +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.util.isBucketLevelMonitor +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.query.QueryBuilders.boolQuery +import org.opensearch.index.query.QueryBuilders.existsQuery +import org.opensearch.index.query.QueryBuilders.termsQuery +import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneOffset +import java.util.UUID + +object CompositeWorkflowRunner : WorkflowRunner() { + + private val logger = LogManager.getLogger(javaClass) + + override suspend fun runWorkflow( + workflow: Workflow, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryRun: Boolean, + ): WorkflowRunResult { + val workflowExecutionStartTime = Instant.now() + + val isTempWorkflow = dryRun || workflow.id == Workflow.NO_ID + + val executionId = generateExecutionId(isTempWorkflow, workflow) + + val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( + workflow = workflow, + skipIndex = isTempWorkflow, + executionId = executionId + ) + var dataSources: DataSources? = null + logger.debug("Workflow ${workflow.id} in $executionId execution is running") + val delegates = (workflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } + var monitors: List + + try { + monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) + } catch (e: Exception) { + logger.error("Failed getting workflow delegates. Error: ${e.message}", e) + return WorkflowRunResult( + workflow.id, + workflow.name, + emptyList(), + workflowExecutionStartTime, + Instant.now(), + executionId, + AlertingException.wrap(e) + ) + } + // Validate the monitors size + validateMonitorSize(delegates, monitors, workflow) + val monitorsById = monitors.associateBy { it.id } + val resultList = mutableListOf>() + var lastErrorDelegateRun: Exception? = null + + for (delegate in delegates) { + var indexToDocIds = mapOf>() + var delegateMonitor: Monitor + delegateMonitor = monitorsById[delegate.monitorId] + ?: throw AlertingException.wrap( + IllegalStateException("Delegate monitor not found ${delegate.monitorId} for the workflow $workflow.id") + ) + if (delegate.chainedMonitorFindings != null) { + val chainedMonitorIds: MutableList = mutableListOf() + if (delegate.chainedMonitorFindings!!.monitorId.isNullOrBlank()) { + chainedMonitorIds.addAll(delegate.chainedMonitorFindings!!.monitorIds) + } else { + chainedMonitorIds.add(delegate.chainedMonitorFindings!!.monitorId!!) + } + val chainedMonitors = mutableListOf() + chainedMonitorIds.forEach { + val chainedMonitor = monitorsById[it] + ?: throw AlertingException.wrap( + IllegalStateException("Chained finding monitor not found ${delegate.monitorId} for the workflow $workflow.id") + ) + chainedMonitors.add(chainedMonitor) + } + + try { + indexToDocIds = monitorCtx.workflowService!!.getFindingDocIdsByExecutionId(chainedMonitors, executionId) + } catch (e: Exception) { + logger.error("Failed to execute workflow due to failure in chained findings. Error: ${e.message}", e) + return WorkflowRunResult( + workflow.id, workflow.name, emptyList(), workflowExecutionStartTime, Instant.now(), executionId, + AlertingException.wrap(e) + ) + } + } + val workflowRunContext = WorkflowRunContext( + workflowId = workflowMetadata.workflowId, + workflowMetadataId = workflowMetadata.id, + chainedMonitorId = delegate.chainedMonitorFindings?.monitorId, + matchingDocIdsPerIndex = indexToDocIds, + auditDelegateMonitorAlerts = if (workflow.auditDelegateMonitorAlerts == null) true + else workflow.auditDelegateMonitorAlerts!! + ) + try { + dataSources = delegateMonitor.dataSources + val delegateRunResult = + runDelegateMonitor(delegateMonitor, monitorCtx, periodStart, periodEnd, dryRun, workflowRunContext, executionId) + resultList.add(delegateRunResult!!) + } catch (ex: Exception) { + logger.error("Error executing workflow delegate monitor ${delegate.monitorId}", ex) + lastErrorDelegateRun = AlertingException.wrap(ex) + break + } + } + logger.debug("Workflow ${workflow.id} delegate monitors in execution $executionId completed") + // Update metadata only if the workflow is not temp + if (!isTempWorkflow) { + WorkflowMetadataService.upsertWorkflowMetadata( + workflowMetadata.copy(latestRunTime = workflowExecutionStartTime, latestExecutionId = executionId), + true + ) + } + val triggerResults = mutableMapOf() + val workflowRunResult = WorkflowRunResult( + workflowId = workflow.id, + workflowName = workflow.name, + monitorRunResults = resultList, + executionStartTime = workflowExecutionStartTime, + executionEndTime = null, + executionId = executionId, + error = lastErrorDelegateRun, + triggerResults = triggerResults + ) + val currentAlerts = try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(dataSources!!) + monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(dataSources) + monitorCtx.alertService!!.loadCurrentAlertsForWorkflow(workflow, dataSources) + } catch (e: Exception) { + logger.error("Failed to fetch current alerts for workflow", e) + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (workflow.id.trim().isEmpty()) "_na_" else workflow.id + logger.error("Error loading alerts for workflow: $id", e) + return workflowRunResult.copy(error = e) + } + try { + monitorCtx.alertIndices!!.createOrUpdateAlertIndex(dataSources) + val updatedAlerts = mutableListOf() + val monitorIdToAlertIdsMap = fetchAlertsGeneratedInCurrentExecution(dataSources, executionId, monitorCtx, workflow) + for (trigger in workflow.triggers) { + val currentAlert = currentAlerts[trigger] + val caTrigger = trigger as ChainedAlertTrigger + val triggerCtx = ChainedAlertTriggerExecutionContext( + workflow = workflow, + workflowRunResult = workflowRunResult, + periodStart = workflowRunResult.executionStartTime, + periodEnd = workflowRunResult.executionEndTime, + trigger = caTrigger, + alertGeneratingMonitors = monitorIdToAlertIdsMap.keys, + monitorIdToAlertIdsMap = monitorIdToAlertIdsMap, + alert = currentAlert + ) + runChainedAlertTrigger( + monitorCtx, + workflow, + trigger, + executionId, + triggerCtx, + dryRun, + triggerResults, + updatedAlerts + ) + } + if (!dryRun && workflow.id != Workflow.NO_ID && updatedAlerts.isNotEmpty()) { + monitorCtx.retryPolicy?.let { + monitorCtx.alertService!!.saveAlerts( + dataSources, + updatedAlerts, + it, + routingId = workflow.id + ) + } + } + } catch (e: Exception) { + // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts + val id = if (workflow.id.trim().isEmpty()) "_na_" else workflow.id + logger.error("Error loading current chained alerts for workflow: $id", e) + return WorkflowRunResult( + workflowId = workflow.id, + workflowName = workflow.name, + monitorRunResults = emptyList(), + executionStartTime = workflowExecutionStartTime, + executionEndTime = Instant.now(), + executionId = executionId, + error = AlertingException.wrap(e), + triggerResults = emptyMap() + ) + } + workflowRunResult.executionEndTime = Instant.now() + + val sr = SearchRequest(dataSources!!.alertsIndex) + sr.source().query(QueryBuilders.matchAllQuery()).size(10) + val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(sr, it) } + searchResponse.hits + return workflowRunResult + } + + private suspend fun runDelegateMonitor( + delegateMonitor: Monitor, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryRun: Boolean, + workflowRunContext: WorkflowRunContext, + executionId: String, + ): MonitorRunResult<*>? { + + if (delegateMonitor.isBucketLevelMonitor()) { + return BucketLevelMonitorRunner.runMonitor( + delegateMonitor, + monitorCtx, + periodStart, + periodEnd, + dryRun, + workflowRunContext, + executionId + ) + } else if (delegateMonitor.isDocLevelMonitor()) { + return DocumentLevelMonitorRunner.runMonitor( + delegateMonitor, + monitorCtx, + periodStart, + periodEnd, + dryRun, + workflowRunContext, + executionId + ) + } else if (delegateMonitor.isQueryLevelMonitor()) { + return QueryLevelMonitorRunner.runMonitor( + delegateMonitor, + monitorCtx, + periodStart, + periodEnd, + dryRun, + workflowRunContext, + executionId + ) + } else { + throw AlertingException.wrap( + IllegalStateException("Unsupported monitor type ${delegateMonitor.monitorType}") + ) + } + } + + fun generateExecutionId( + isTempWorkflow: Boolean, + workflow: Workflow, + ): String { + val randomPart = "_${LocalDateTime.now(ZoneOffset.UTC)}_${UUID.randomUUID()}" + return if (isTempWorkflow) randomPart else workflow.id.plus(randomPart) + } + + private fun validateMonitorSize( + delegates: List, + monitors: List, + workflow: Workflow, + ) { + if (delegates.size != monitors.size) { + val diffMonitorIds = delegates.map { it.monitorId }.minus(monitors.map { it.id }.toSet()).joinToString() + logger.error("Delegate monitors don't exist $diffMonitorIds for the workflow $workflow.id") + throw AlertingException.wrap( + IllegalStateException("Delegate monitors don't exist $diffMonitorIds for the workflow $workflow.id") + ) + } + } + + private suspend fun runChainedAlertTrigger( + monitorCtx: MonitorRunnerExecutionContext, + workflow: Workflow, + trigger: ChainedAlertTrigger, + executionId: String, + triggerCtx: ChainedAlertTriggerExecutionContext, + dryRun: Boolean, + triggerResults: MutableMap, + updatedAlerts: MutableList, + ) { + val triggerRunResult = monitorCtx.triggerService!!.runChainedAlertTrigger( + workflow, trigger, triggerCtx.alertGeneratingMonitors, triggerCtx.monitorIdToAlertIdsMap + ) + triggerResults[trigger.id] = triggerRunResult + if (monitorCtx.triggerService!!.isChainedAlertTriggerActionable(triggerCtx, triggerRunResult)) { + val actionCtx = triggerCtx + for (action in trigger.actions) { + triggerRunResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, workflow, dryRun) + } + } + val alert = monitorCtx.alertService!!.composeChainedAlert( + triggerCtx, executionId, workflow, triggerRunResult.associatedAlertIds.toList(), triggerRunResult + ) + if (alert != null) { + updatedAlerts.add(alert) + } + } + + private suspend fun fetchAlertsGeneratedInCurrentExecution( + dataSources: DataSources, + executionId: String, + monitorCtx: MonitorRunnerExecutionContext, + workflow: Workflow, + ): MutableMap> { + try { + val searchRequest = + SearchRequest(getDelegateMonitorAlertIndex(dataSources, workflow, monitorCtx.alertIndices!!.isAlertHistoryEnabled())) + val queryBuilder = boolQuery() + queryBuilder.must(QueryBuilders.termQuery("execution_id", executionId)) + queryBuilder.must(QueryBuilders.termQuery("state", getDelegateMonitorAlertState(workflow))) + val noErrorQuery = boolQuery() + .should(boolQuery().mustNot(existsQuery(Alert.ERROR_MESSAGE_FIELD))) + .should(termsQuery(Alert.ERROR_MESSAGE_FIELD, "")) + queryBuilder.must(noErrorQuery) + searchRequest.source().query(queryBuilder).size(9999) + val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(searchRequest, it) } + val alerts = searchResponse.hits.map { hit -> + val xcp = XContentHelper.createParser( + monitorCtx.xContentRegistry, LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + val map = mutableMapOf>() + for (alert in alerts) { + if (map.containsKey(alert.monitorId)) { + map[alert.monitorId]!!.add(alert.id) + } else { + map[alert.monitorId] = mutableSetOf(alert.id) + } + } + return map + } catch (e: Exception) { + logger.error("failed to get alerts generated by delegate monitors in current execution $executionId", e) + return mutableMapOf() + } + } + + fun getDelegateMonitorAlertIndex( + dataSources: DataSources, + workflow: Workflow, + isAlertHistoryEnabled: Boolean, + ): String { + return if (workflow.triggers.isNotEmpty()) { + if (isAlertHistoryEnabled) { + dataSources.alertsHistoryIndex!! + } else dataSources.alertsIndex + } else dataSources.alertsIndex + } + + fun getDelegateMonitorAlertState( + workflow: Workflow, + ): Alert.State { + return if (workflow.triggers.isNotEmpty()) { + Alert.State.AUDIT + } else Alert.State.ACTIVE + } +} diff --git a/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt b/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt new file mode 100644 index 000000000..14488a16a --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.workflow + +data class WorkflowRunContext( + // In case of dry run it's random generated id, while in other cases it's workflowId + val workflowId: String, + val workflowMetadataId: String, + val chainedMonitorId: String?, + val matchingDocIdsPerIndex: Map>, + val auditDelegateMonitorAlerts: Boolean +) diff --git a/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt b/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt new file mode 100644 index 000000000..ce6ac23f9 --- /dev/null +++ b/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt @@ -0,0 +1,199 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.workflow + +import org.opensearch.OpenSearchSecurityException +import org.opensearch.alerting.MonitorRunnerExecutionContext +import org.opensearch.alerting.MonitorRunnerService +import org.opensearch.alerting.action.GetDestinationsAction +import org.opensearch.alerting.action.GetDestinationsRequest +import org.opensearch.alerting.action.GetDestinationsResponse +import org.opensearch.alerting.model.ActionRunResult +import org.opensearch.alerting.model.WorkflowRunResult +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.opensearchapi.InjectorContextElement +import org.opensearch.alerting.opensearchapi.suspendUntil +import org.opensearch.alerting.opensearchapi.withClosableContext +import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext +import org.opensearch.alerting.util.destinationmigration.NotificationActionConfigs +import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils +import org.opensearch.alerting.util.destinationmigration.getTitle +import org.opensearch.alerting.util.destinationmigration.publishLegacyNotification +import org.opensearch.alerting.util.destinationmigration.sendNotification +import org.opensearch.alerting.util.isAllowed +import org.opensearch.alerting.util.isTestAction +import org.opensearch.client.node.NodeClient +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.notifications.model.NotificationConfigInfo +import org.opensearch.core.common.Strings +import org.opensearch.script.Script +import org.opensearch.script.TemplateScript +import java.time.Instant + +abstract class WorkflowRunner { + abstract suspend fun runWorkflow( + workflow: Workflow, + monitorCtx: MonitorRunnerExecutionContext, + periodStart: Instant, + periodEnd: Instant, + dryRun: Boolean + ): WorkflowRunResult + + suspend fun runAction( + action: Action, + ctx: ChainedAlertTriggerExecutionContext, + monitorCtx: MonitorRunnerExecutionContext, + workflow: Workflow, + dryrun: Boolean + ): ActionRunResult { + return try { + if (!MonitorRunnerService.isActionActionable(action, ctx.alert)) { + return ActionRunResult(action.id, action.name, mapOf(), true, null, null) + } + val actionOutput = mutableMapOf() + actionOutput[Action.SUBJECT] = if (action.subjectTemplate != null) { + compileTemplate(action.subjectTemplate!!, ctx) + } else "" + actionOutput[Action.MESSAGE] = compileTemplate(action.messageTemplate, ctx) + if (Strings.isNullOrEmpty(actionOutput[Action.MESSAGE])) { + throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") + } + if (!dryrun) { + val client = monitorCtx.client + client!!.threadPool().threadContext.stashContext().use { + withClosableContext( + InjectorContextElement( + workflow.id, + monitorCtx.settings!!, + monitorCtx.threadPool!!.threadContext, + workflow.user?.roles, + workflow.user + ) + ) { + actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( + action, + monitorCtx, + actionOutput[Action.SUBJECT], + actionOutput[Action.MESSAGE]!! + ) + } + } + } + ActionRunResult(action.id, action.name, actionOutput, false, MonitorRunnerService.currentTime(), null) + } catch (e: Exception) { + ActionRunResult(action.id, action.name, mapOf(), false, MonitorRunnerService.currentTime(), e) + } + } + + protected suspend fun getConfigAndSendNotification( + action: Action, + monitorCtx: MonitorRunnerExecutionContext, + subject: String?, + message: String + ): String { + val config = getConfigForNotificationAction(action, monitorCtx) + if (config.destination == null && config.channel == null) { + throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.destinationId}]") + } + + // Adding a check on TEST_ACTION Destination type here to avoid supporting it as a LegacyBaseMessage type + // just for Alerting integration tests + if (config.destination?.isTestAction() == true) { + return "test action" + } + + if (config.destination?.isAllowed(monitorCtx.allowList) == false) { + throw IllegalStateException( + "Monitor contains a Destination type that is not allowed: ${config.destination.type}" + ) + } + + var actionResponseContent = "" + actionResponseContent = config.channel + ?.sendNotification( + monitorCtx.client!!, + config.channel.getTitle(subject), + message + ) ?: actionResponseContent + + actionResponseContent = config.destination + ?.buildLegacyBaseMessage(subject, message, monitorCtx.destinationContextFactory!!.getDestinationContext(config.destination)) + ?.publishLegacyNotification(monitorCtx.client!!) + ?: actionResponseContent + + return actionResponseContent + } + + /** + * The "destination" ID referenced in a Monitor Action could either be a Notification config or a Destination config + * depending on whether the background migration process has already migrated it from a Destination to a Notification config. + * + * To cover both of these cases, the Notification config will take precedence and if it is not found, the Destination will be retrieved. + */ + private suspend fun getConfigForNotificationAction( + action: Action, + monitorCtx: MonitorRunnerExecutionContext + ): NotificationActionConfigs { + var destination: Destination? = null + var notificationPermissionException: Exception? = null + + var channel: NotificationConfigInfo? = null + try { + channel = NotificationApiUtils.getNotificationConfigInfo(monitorCtx.client as NodeClient, action.destinationId) + } catch (e: OpenSearchSecurityException) { + notificationPermissionException = e + } + + // If the channel was not found, try to retrieve the Destination + if (channel == null) { + destination = try { + val table = Table( + "asc", + "destination.name.keyword", + null, + 1, + 0, + null + ) + val getDestinationsRequest = GetDestinationsRequest( + action.destinationId, + 0L, + null, + table, + "ALL" + ) + + val getDestinationsResponse: GetDestinationsResponse = monitorCtx.client!!.suspendUntil { + monitorCtx.client!!.execute(GetDestinationsAction.INSTANCE, getDestinationsRequest, it) + } + getDestinationsResponse.destinations.firstOrNull() + } catch (e: IllegalStateException) { + // Catching the exception thrown when the Destination was not found so the NotificationActionConfigs object can be returned + null + } catch (e: OpenSearchSecurityException) { + if (notificationPermissionException != null) { + throw notificationPermissionException + } else { + throw e + } + } + + if (destination == null && notificationPermissionException != null) { + throw notificationPermissionException + } + } + + return NotificationActionConfigs(destination, channel) + } + + internal fun compileTemplate(template: Script, ctx: ChainedAlertTriggerExecutionContext): String { + return MonitorRunnerService.monitorCtx.scriptService!!.compile(template, TemplateScript.CONTEXT) + .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) + .execute() + } +} diff --git a/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$1.class b/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$1.class new file mode 100644 index 0000000000000000000000000000000000000000..915343f8d988ec245c45d1773483190cddb7fa00 GIT binary patch literal 2105 zcmbVNTT|0O6#h080wrjvLPf<37KBm>AYO|oauF2p0)x*>y0*cP)Lb0!SLux4=!4^v zGy0nx$Fpe)Qt7nVPP@A&doJI(eEIqN%U1xm@I-~eFyXib-6;v%6Wle5dP%s3V{zYc z^=HvmJP(As^Dr>2ym05X{q&fMSft5I+$aj&3JhTjo!i{n@k~#j6_zM)-?R(UQuI!^ zFI`?D+YBv!(e%=zq`!20*Exo+x7S_R;UGOXq_Q85E;$Cb*12oSb0mz(b|E*_KpIGJdQ6r$&27it*>VDp zdhYsvKxLwexK-BBGp+?N-;<$u96_*+GstKr9P7PO( zW@zKlP*Gw!2ZzdW1ioqM%e<6p#{dRZTxIA81FSsIFoX<46MznhL>7k~yqFX`R4L z6*p>yP(x26D!k2*q7Y`qD;`*fBcUSI4MY;rerHh6we8cBZ0dugMqR?tS}7nye?u)p zwOsYN@tW!z%|(3G30yQKxPc%xTOXpz#VE!wtlXDse{!c(z8+}=P39%f2P*dgYmoUaJa zUvNC1rjMq8UV%-Cgk(4%k(B%vuEcKiM4?|hG`iP8yT^2HrnNgWyo(c=Yr9BfdSbgs zX0o4f`a^g>MgJ{yAId45k#|EI-RQwtI?3}noF`cfy|@tBeMe^*M_=Y65_`Bf{2BcU zzF~Md`yI{M_asj4;o2Uqzo%PGkUo-R0ZKPIl@z*^Ge{|CLw}dbmiGPi;STPSpHBMb vFiD>7k#;NCFQilqla(ntE4Ysbw8k)v8O+lzNwP=ud@&~JzWjMii-PGtg$Z^8 literal 0 HcmV?d00001 diff --git a/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$2.class b/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$2.class new file mode 100644 index 0000000000000000000000000000000000000000..e1530dd85788b9009763a25c0d0120c0d9ade397 GIT binary patch literal 5688 zcmbtY`(GSa760C4mnqAHV4|&9NfSyb&y1~Uqq_;M5NOu$N|q9-v2mEaz=UCDJ3C9j zS2b#1+IMSxH?^tHs+)*= z4&omQVhjfycTRKi+%9n4HRiQEcMZqVJ;&81f=7P5$laBJqG{#0JGAVz->e|c&|Ai7 z+Bv?g-6Lo<$&sDc-Q1vKdwkhrXkOB-_QIxD`$x7TBF#{L$h1wbpW$@p_E@(^-8ID! zA9QlO0St{Q5~xq24m(sRXkxf&o63c_og=2r$BK(H+?~{CEY7fN#4&VhN_S21cknXq z&6@>=Tg&loDJtZk#&+Fy?3G2QSa=sh(;W9QA}S}iPN8GCv9n7`*7LeC&o!%PaGPs> z;6j8XNS!gg0{4bZ%M*+fzR8>Uf{c-|PDcq22+laBtqqu7mU|PflOv^WdKaZ&i;8#P z8iodf=^ryBWd1Phjlj7{AK6N5!;U*_ax2Ghca@+l>UnaWHY$GQB<92yMtD!*MqSmjrd7Q^Ac4*Oe7sIjM>+bTcTVZA@Pz zi{DD(&*)3KX6g2vmi1iIo+Ej^*sq|EihJX1eC7-{JQW%!1Fs?hgO*;PoZh`jf_zC? zz;LsQcjEv<9g#AOR&h|3wnA}~nv0%kX|qM!@Jz?nvc-JfGAYZFxE1eFaFC&=%wRuM z(aE9Wy-1Nw>&XzbK8E?uN+;9=;8#T`Cbd(fJSTZ_19@Panua(#iSL~sw9u2D3-0smE&yGulp#O>&B#1RZD z7-G1kOxV_}QgH{4Qi;!qn}p$fl~C6-FkN1Ak#RLXl>Q z=6NuhvzndjNkFbLL{RCH-kt=R3Iw6a*c3~K|Hpe&%$B}5lhfO&t}Q=yHSPn6c=NgM zuo14$`u9;V8bn?lvY`D-fec9wEgL*9YP+D|PUvNKAx zul7#r6)Nt+2k8!`o-^s>NB9!A)(4flqHOcAXErX=THvb^EBFvYMMx}=a<6?2%UWOn&k*YqeA z2mTv-WH`g}(+xNSOTj{9qyoyUsOwQ0!Bi~5rbrErk4}sXO%9zN8$X>L96vFXWoVB` zvT}a915P2Y;vP^Xss?p6kDuh`+&o=2)XuBM!k=oYpRWT<57!udd`*PG!;*qxaPe-= z%>c1as^Tn`Y0xbQ`w8(_6huG2wpK`Cf4Fq7d?eEAYvp#tF)}%t(~|fYKB3^_wE#;G zxGL_&C#g43a=2n^phg)vWCl3a6l4N&a?Lq>M!}~Ux+-H)#ZVQW#eL)?3Z@)uJ0dlf zGn1+Z@Q{KBBRx<~t>Sa|Jn2}}mkG1HNHa-1g2xm*%FtSgb2__R(@RVo@Pm>W*sSzd?CA@-XRD2O%qJ>6G^bGqVeHCV-YB$!Fww{Or zDEHJ_C!{6EEAOSfRrxcOt(3CJ&k~XNQN{&1Bw4dTo21*J{%las`y#XjJV7BcFyVvh zbbeBk&>dM%Hx_7Zkeiu;Z!+wu40<44HVXycX6UY@@%>e4FI{U{r|24-mMm(lt<~1; z{o=AC=MLG17Z^|S?ZbTo%MK$ zVI(Tueb;WS&4&xP8LuX3vHFRM<2XU`uh5I~4bI=v(#`mp*e-vm;#c@JRgxtgx6#8x zsTHKUPoeqc7x;N+0>5Y${A$Lwxhr=&PLta28E&YEaa5!f{DI1=HYuU=)hYNB0gOz; zY9lqZv|zJR@Yl+!@f}@_IhdQ`VmYD~HO33xQK#TxFZsBRK4Y-vUE&K7JtgQz!BzCW zfu3$dOrAG(_r8K;ch4%+AJOX=cGCa#M19>%t^jyZfxiS( zEx}2}TjD(>6jF(n#Or9if|co(L?tzlX=Gy0gbX@{FXJl&EP-Ks6<;H$O*o9N<5}^whd#fY$8!X> z8~gALJWo^?$dGHn{!P}`D|kV{IR)QRa7uxv(6lD9?>jO|-^KT&%`f5y5D6#J>kkCR%8@|0Dif-#{M^{(`@ew?w*$e}AW+7+(Aje3;%d literal 0 HcmV?d00001 diff --git a/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt.class b/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt.class new file mode 100644 index 0000000000000000000000000000000000000000..ee24cb1101d5087d6966c904da1f2040b181be60 GIT binary patch literal 33817 zcmdUYd0?6a>)zVd zTGv*^)~zmRt5#7HmR7J9siL*j+C{(G+S*0CSikMp+E&Z&bMAX@=FQB@Vt?O1em`6C z-n;KE=brtXd*7Ys|Gn)gB09!;!6wBtEs`|7%$c(Ogw6-y_Qbql@Q z`02gz|WKtG-6hwJC%-grx(Y%dgBo)L6Hwhe0h9*nrY)3~Uyc@_{eP$!EU7qv7k zXl5!}DboTXyJKtOOeGzO_TDw|?sST&%tdN%y0fcpUS}$extFwbuIi4Zdy}X-EARF3 zp{Q$5tXY%ju3NDo9Z%KGl)sDOE91#{cYD0yMD)0?Kj4|5RvH7hZsN?>Jw08W@g&p10W_`b?QTz_ZJqxb;4d8P{eOnj;)OF7wKOhbI%aSz z8u>`}0;KL$3s#&LZ%-RWPXelo|Ff0_@cQyLh~zi82ry_|oapQ@Y%zLbER~8U1zD-t zovGN0t~lt&OPM0aw|8}RccxEd(vznw2ItO7bi|`{I8C*w4&s^B1hcx(i9<)wG*Gc4 zp6X1-J7)OZwxq$w;Bs`bw1k+Ba%eJ5v1vMlW^f%4p<@AtPidR#!3xCqv?2u zP9T6;e7pzmIP_UTR&lJoJ>HWpCyPE)g7P`#R3vvy zS3C4YDn-kg3l_9^MqYaSl0(-@kB-;loUTMHEi!w(LpRWk5a?C$v}**QGn$QQ+}G!i zpqC+?ec7R}(9KNyd7UkaU(hS6#yAWuakv>{Cx?o;G``iLjna63uW{r0_O9Mk=UUg@ zFs(<;>+Fs<_pVtHPqvC86n7`l9_v~hOLoe$S2zM1t^NG@u^#U^6(%WRj53&sE+nEc z=NXa*t#*U3@SSwGO?NSkF&fcG!>mMC7i^2Da)-W7_kizGusB`ubfR1Q3Z{ucP+j6T z1XY6ZKBA!KeWC^Khk^?>@HA;a3z+(apu>~P0G{e zI2c-)Y75&8f?1}^hMsYO@f{M36B`Pl7zjyG2-d@b{%V`P#WW#n@tQ#1Rk1E8QR6v+ zgCUHdc7z^-EnQ|D%(E*yQ$74RTxPINc-c0reXDhNa_D;l}? zifsBW>V`>EA%_V--*e~yI#BMOb?6`$$nQIJuzUBsLx;F`FE}*Lz59Vfl{8*j{LrBZ zbSSh)uGGW(HIi=AE0{-ig04ud3C<%&LW1h6(E0d;CC2X+BK<^2Z5)t+C={WeF&((9 zvtytNqC8%6=ym!p1RC)*J?RbZRLXtDn~g+ldefh?nT~)leBU7BeT(SF2Sik^Dfb)w zCsHs+cz-Wq_UkCUOMkTK4@^_TxOcG89Qu=(#wdpGa5+jkd(WZ2NO8FoFYvq; z_q8zZe>*gWilyS;9Qq&n0PSH~GcnWwh0crjuAziJq<`A<4{?Ta(+OS;hyFz$!988P zeA@D<%csJI=4&&*t0=nqw?lgoK~V9E_)5g|phf|{{pwSe+Kep5ID<&TU!IDSm?54) zYJ@FBj=o}&XtZ)oZz^56B3{`IT@q!c$0KYbsgM_47FMNqxy0e91S+M1$C-&lS3K4Y zw6Wvx2p$QEkn+VYqa$WHHA-)AnZx^Vxp?T3;aC9oc=Y5`J;Rxii-vM4=6xL=!($OF zNiR&t6{0<8EV9hDB-xpci<2>N5gHmF_+8%L;R-$gF_3iT%4kyoGKbk;ftfF*m-!%v z4;Eb5ovB9A1E3#|bGTCISCZ&nkesnH9Zw2&COCX3PlQJypk!=1Q`GG`VdSIo!gn;6NZHDXCQnGlJ683I;c&ar4y$9S zW`JEvPjlSil`I;=psF@L@lh5<8F?KY1a^Z9E#Qix0H9c*(y`AE> zfeOEsZ?kzLvV8^A4Vy`(rNdw4uVIa(J<+paR&NsPE@?<6lAdM>XX+4S&YhM~zt{2| z4&O=D5TEw0L@FNTyZIiQzwXB!Mwhg%PA1lg*XZ!Qd>_2CSVxBhI>|V)?{Gi&4{C}` zWly_pa`*v$5ONXk?l7{BP-VCn^YyBzrEiK_dWdOJaIiwPgx8*4nHolFTw16J#DXWmq|hWPX;HVogQO2 zfWpCB7MdEjEu6B@TvZr$JgKvJ`%r9JFdA3{fTrTJJr|eh++kwR8#G)o45mt=yn~;% zd1n}`xk}fUf;m`yPKfpRt`Mm7yi}swqn)tZ_e9ioVTI&>1MorZ?QVy^Zv=29GR3P) zc^Nq0{Kn1TU@1LBmW$UO2-9R4XIh4ao%+J7yL`O@NkD$OAruTYw+jUZs97=reb#gP1OE|hx8A_uZx$#7-FR?=Nq;!UXT&ZuFxLE-|LkxbPqq0kSgFV} zzfcKq=;|=1x>Mq5S^0^lFunH}d9M@ruS_{aFSI^h2{NesKVX&RU9zwv?f>pzQT9Wo z`XJNzwexj(C=~EN-s68dSn~Z)?mlv`misZ&6v+?`XBJ3|AYpcJUR4jNSeROXEgVt) zH$7ZVwo+J%Q<~|v0xlYkz~sY|J4k_>7(U%#?3+oi1{PqWsz}+kDlTNY0=~^s>#EdIjv4{o0!}fxA@t`*>&9hr6Gr6} zQKKLzs*LHx0#xRqC3M1mK3N!7l}pO^7g+uPTP8QOtd1o+JmC#-2!_A{a#zKvL~pVk z2@2#QMQdQY4s;Y|1xtK)TDz3p($>%9GKrpJb;Iz(04YFuGxK4`~|8ZX(5F|n?$#Ja}yvTF{@ zztUxAJY_2^{7lG`!3C((Vs_SLa?vH~Fg3|m$YdUxSJ?y6t=|$)uI+4(J8H6;f(7dt z^X4sBvb^!kg^kTEO^X|s%ht7~#+K0SSH>qoc2PD9rRpI6a)UWnt6E#tge``Jws6$p zvRY6Kt~o8fp`1$8R7V}5fPnqh#Lka{JrN@>=}g1uxtsT3g>%;AGCDAuo%f2OYJ$Qp zo)gs3m_Ufy46}=ig=oh(>R3tHm3AlAHFeML?CR=FMN~cFl+-`I)1=O{bT_5umFdr*{Jzj~V+5F%lrEz_F;=;@hQ1e128dY=E zskVY=lAnAZ3_XrIP5390Mi1h|V3U%XFC5g2OiT#w3nv!z`MkN>3RzZn-OT856L!kN zMpP}JhlSWJj%rnBxK5NiD^HXV9Tf(rg%e16)gdrTRGY2NgdxsDR*pJX@NEgcZM9+uuv?6QXib=?Oz_s>&}j-5 zimigwz{Hl0m9{+Ol6~1 zgc!IFwsZ*S9Q6ftHB80A#zo%V?nRAfv@|Yn zXm)&)RA-ci(YHvz*6Cw+{Y6Gx*rMgC#_)ARB74Xe4rr|oxpKQ>4wjK(_ z1&+Dfz&M+A#wW^kpk%E}T_^7C^}?Y|K~3!OsI4H^gQ}-o){m+i)R!Ig71=XlK|iL1 z#cy%ct>RLZnFXR$I__oC;7#2IN@X2~%x2)mWHOPAs*O=1C1;`4jE z(w)9mbp0sdUTho@?ivEc4Xyz)W+l53ZZK!ENtB^Kj1ld-9rbl}kNDqa>xXPz2>SkR zskbi#tNjkeO4a@90b6Z?SCw7z%cdPYJBeEO85b|~Zm`g20w+aE4U&QRP$cMHqqxm=!WmA@7d}ZPf&6h z&}$!GgG?nplaPyHC`av5&w2?vzbB^7pvPpEgpsr#mg;C&y3}=Rhc~eqgH?y?|rr7LIyJ{SdYXp^H&Tvm|~q0YahldLw4* z$&!R>R4>aWrW-{y{Mb>is-J+Z)+I%7&M8pMF8&8pf#E84pz0+U`)7{&IZ3bvo;ai1 zFoqi}q-LaAk%o(^*VP-gg8m!ES6BIdxuf1x$PbjYC%RX5uELs-$86~A6V42~9|YRB z9Q7;pYd_=Q<^POr4p~O5^Vp*QNLBcLx|-u4ASo)-Hii)<_XPw=`RpZeKM36 z#A*ZD+KGluS_CF=_aW*XN4+ci+uV&g-d?goAf{JtVu@}O5`C23_x<`j&{HF1s8wf|is5>J#FoDAuuaMJ`9nE9* zf_z|(a>;cX%@<~piS-ndgIk}ifW^if-$f*;pz|Tu7v$5%QRvoR8*{l_1~P&p{Kj-s z7|jbgB{0*h2U!5p;PVZ$)uG&FYsbr9<}yx7^ql#Cb7t>Ke87pX^01u+g&U%}lpePA zsL*i9g{nzhTC?_)wDmq-3s(<*@ftW$<`zFSZTB$K70$q zu{)W}48j!U0Pw&eD5p`n0-Nvj0YFsHRm~Cy?tM1M$hcJk)XL7c1@pzz_b1654iM4qqZc~*biK%3a1$ef)gh+ex#}&@uS+`5Su){UbqjY@2fGM6M595rtFT_~ z@GJadAh0X8W<^JAA~bPR=vI<^5Ql*iS&Y{0KneZ0gb&kkN3SF#pR7|`kcTdU-vg2+XMk1%YFb+d@|n1S!~m`)$Stii~6eBO~OUxsmNQ3*xlw zEk1g&pw*NCxf_i6wm{(*+?=7`o}DxM6sy&l;-iN2|4mBRxCsc98cKDey1N{oC`)=?-|XmHBm-}+#KB0g z(!+^cGw_KF=%?MD^G79k#xG zkf^bsb4TB)?*daLO-LjgNCtLtvyG9(o2y=q>U;Ekw!YWfb~E^79DTpWB1<{EzL}lf zvE&9=9rMi>>{(baF)$c;&EfHl(GTLB6b%-c66QSyl|lwtGi(shfOX^P@mP{Gk0b94+}QK;8KYHs1^AX9X|DJ z{p27admj3;cKukUbb-u!i1-hzU_NE?HFc^7b}$apZEGxgx%*#?=aJc06xG}HcWjLX zufvAW`fgv2eo7WzBlvE2J=Ao{)>!YFoY!+DR&oz`D(bF_EYoa^>dDX zUhlySA*Sy88L64xQDj)250kDTHh^H?tA5eZKah-u#>$cy{2w~{NAes^bYq4FwFu)e zWcF#&Fewr+17Hap43N!1U|li3O8l)5CiW6~5*%&-0~d89+Qnxc9nw57!f{M*{ePt@ zLZ&k$Hlq4<{f4cPwQ`RFaRYc4myUi@KE91~dM5=&^%MG+j($ty!!hLP*2WM(G_Hs6 z$ctY)`fdFi5Z2WWM%lZ|%fjWk`pB4npV5$iz)IFZhLX^!Qf#)FnS&gVYr?{HiGD}_ z!Pf738=wa14Bva`KjLHZWizGCpB??4z8>~2v2rDtQHtJo^naV8u6XyV^y;Yot;YEa zCunRkwS&OI6?1aN``33tjK0fR$!3uU}lJ^`WYO_HqyHm zJC?FEL>ySoWE=)LN5V8Ja;##*U*e4{hWy%A#4`?o%!2FL1=0duMnQ&+I##KLw7fZD zBR4~Y1)HFDaCecr(h%r;I8<$jS|hD8+d>{cdxA)o0&;%Dyi%*&+Sj&57w+~Ag758~ zH{btRW2~{V#*SpTrRix>}*?;yi*QybxhEJfXiJp%t2}$-+?R~;NaH7MWZoc zdth#=EC_vA+NmIkJDd!Ggk@#;)-s*KM!BB(@$~9MM=EO7T6MN{I6U^Oo$Tyhi*I-9 z=9y#`D#lhqfy>u>)>I409;}pRikMyq%8=L6?cDuTI|Y}~Y<>)w9;glVpbm<{ex@<(R zIIhTQa;#IW(;z=aO56q0#vovRoBW}NV!Vb?Mb_NxI13%?bPIuZ(F*y5SB|BaQ=mVG z)8zuAit~iv6QaQ!LkL?_X0>9QeF0L#)j06rTVg1|EIWFHE3=FhYICfm;sYBS;`=1- zIlRCJQr+Hm#(@4A&Tx724K`U3cI0oFb(WzoTjx4@onAl6wwB{yn8K;%WPS2UY=#*q znN()2DC4E@(D^r*S-ec|iCUf3`L=Z)Lcc6}Jh5@P2$3ST0Qw*Jsw1o}rdxx|mLtCZ zYf8am;gH04kdPq)zB*zhuu*cE&tLJgD@9DOBqWFFbSF%V6ki{2@5Kg@%5|&b-IbGg zlAOT@&qbC=SK^D*_~8)?=ZNfYcED6F)Bkw~6hQ@NryEyHA9^4ZWuql(l`VIQ=)a^}D#n)()_I z+*D?*A5|i`IsbT$WK9A7 zB9rN~t?h-8g;AE7RjlmM3upzA!>K~H_0#}|Xk}XEu6?=Zo4{n6b53mQ>B8oPlI`}+1=)f z0EI9k+Paj4^{^RS*xsPgZ zBiq_UmVTU$+)hWgl^>(OL&x<~!{c<4d7hDZo@t(EXPz6)^C_9`1z=6Px6`K;}9PFs0o8=XtkPaTibs?D@&3w3U$xqY<8++VPb(nOo- zq&`~j7kpX@X7xUQwmGNa?B$P9g5OCQ}doJzMm2BK)X4S%mP)J@~a_jNc#c0Em{ z4RkV&{9Z^G0U4jB9=ez=qf6*|9J_rNpO15TFTp9fm(maTGJGz6IZg+?g8q)Ptu%jz zN8yyJ1948(6#g6z+xt8>;*6#&3O5vQPPxJMteFI9b#nVmn zRluU~?`!y1M9MiCD`?mRaJ$P$v;+rsTX}p?NV^?H-G~f!T2?5R!CI^O>7nY~G-5kF(pLUx zKW*Mo<3BwqPgT`VQh!y=leD#}b_;FaNl!mX&s6b~v`hZ%mOszQpFQ&DMfvlRKy4YQ zPsG9dgmY{yx(hOMHze{N=!1Jfh5I2}n;=CG;FR$PsUC2iMBk*@^bj@C!_N4fL^A>^a?El z@aNL221#>W;4?^yWsnq;^OX&fVsgGRrin?)`O1d;F*#q^D6kmD9gllq&PvjPFxy8M zaF+T18^GKMw8cpZ8J7B`uoS(9zNPn>g(!5HD2NI$)JuYtP%g?c&~%WA=%*O(XTZ_V z0o3at`F{caH=wHDghKuW)zU9%8ofow(68ut`Zdjj;jtC2`e}0nQ z$K|h^>F<5?@kZKD%I4QROCz^4w~c1q&qWX|Q*qZ|6^jEEcY6(<^R9cU`#7?Z_Q4~U zZsAd2w@nmr<*<4ik0zWIzMnAP6SQ?Jv=&6N#vh9h*+?U$;dn6SCaS2a9?ggK@g!Hi zy>9Y3b}cxFaCUJK{Q-3OBh2offVV%>Bzh0#@Gmr-{z@m(-{>?PZM={^fDrzjmg89B zRrC>lCgNkL-+x2R?xim<(KU=+JFMvrwy^oBi2Avhc0fMA%Mtn^&i;LcOL0Q>2pl0g zGN1afo%|c1W&`eT(0;wxI$FxJHY!(8Hyh% zgsetUe;*$$*Z2Fl9{4}OXOT!XO9|fW8t~0-uf}uUbq}s+@FEmPHJt1drS*$_JhP7* zLBeY76@mJQ2K#tEUgQ3BrshplUb~g?3I9%BHov;2kI%+q3@ozu4{jyHE|Yx#g~+P}pG-m6VUKIKtWsZ`2q9^l0;9qLF){XbqR3 z1m@M^HQb#~Q}uOBFoLNqt}c)Cabiie@tUp!4nWS{CAhx-ES00Qv5%w_#5JY9+}(y( zaH6j3)mQ7LZMJWPOnoM7@HFq8;_c6-B#IP+VWMiT0@|swln>Pq_5)U3e|S88TmuKJOP; zcA+*dyPu^Cf}?Yn-}6dwA1NwUIgx*OV4vFy#!Lf!At0EK9l~+ zOX;6DCawgji!wfk$H8&?6p(T>b{rnh9XNY<6^*N3{1`EgZS~1Pm8c z81DMGAF!WA9fs93W-*nw@K)#wVY)6FRkIg%rAU+z_t?D6<+;7kl95cA&C9@vHs5Xj zf2~x#`=2yoCN3jJ>_zv5%}2V{$?cLxjFJB+DtjmMITwC|GTP{5EV|9#!cR5VR`u~S zOKSW0S@76%wa=1``xhUb=igQDm=Aw2QZsK8ji|1<4{8M3;jZc(MgqnPJ<#7FaNP*^ zRp#mn{~?$)@y!(BTWBQT3I(tcG`o!s!L<@saerM+da}V0L82-RF15kdVN|>q9a}bE z>m#E_kg@D!gOJD!TuMr)gnz&?sT`#XO)q{fyg$SSRUpd`cM@k6x{|IqP3 zuwUyXlT_|=Vc0-m^eI}z*`RDMB*&wEf_+K;^ zn&~xgxeMwUpn*)cI`SQUy-j1_euPT6Y(=1i%dVETVu7Zm&Bo=WmM#2-z~Y)W7aI=; zuu<)e%lsDI=nYYes|=ybBl(Ej9B8V&rWXH-f9*EqA3$OxDBdFgd7ej6)uRH*vvj;% zw({F-Ob>z$yxC6NhT?vSduq1u?-2Sl;~6^Dsg-LV|7l6>qucpCskICLcK7r9^J_-) z-}?B2B{e93_W6gofx`Jf)EX4dD0$(U!+$N=&i`)fSJYfxRa>)7VPbvC@`tT$>r>Gs z+tf%z#%iB>!6SwqQDJUm>Jva!|+)860)f0F-eTu(LlldvAryWoN zJ7Mph28Ex3&3gu_>U%VgpM~9f4mR(3*t|WI;1_5;zetw?U0>uM(oOs$x|83At@{l< z#lNNR^Y0CsUDtk3hBNl4(Q02HdIHs`F(%SGp32o&aErzdD6LWZsr^Cco1l;;s0x(z z)4t}dm2r37cL(5|C@DDoBR_&!;bkAup-@WyApVGsxB0z~{mzbz}x9(0;4V?!0R*~usSa552=Rv zV1tV7>ZCUDD*Csn8GiGbJ8GU*jknRknU^(v>XdD2Ubez^wE%=Yy&2;yf;BAaQ;Yl5 zSz-opXK3kJ+sG!GRuS>7ZM|JvVQp8jHneN+Q!7WS^D9JFp6OR>`c%(Wl~S}ssvB$h ze1+Ag*7d22cB;$k?d|HywyMqQvlVuAzxrZ*1g;bOE9)MrsMTB5wTgDC8~W5uZt1<$ zv6+s<6ueq@(P-)H=G&-ryV?lQzq+F$QoBWct-i#RZ=}O(x2n7FA+)-eXsfzU;qIGq z_bs`5T<*Zg8@)u+^yns<5*+qB6%k8sQBT*GR+RRs-RQN1`_vviJzBF z`!jw`>^&&?KSRm?4}O&A@9^|K#4iB;3mpFuox&f}*-)q*5VzGzQK!;4`iJQfxLa4L z61qkqo1seSb{v=3uSU>oxW28*=zTSsSyl3QHJ)dv3EYHJ-%eK(c|ER|s>ApS9PD@sl z>9^`b^$(c5COS_2Q~e9p=}wyK-CasreFR)Z`2jk~yStUj)W_=Iz}q8eu@@0)DV&LE zCZKSjMQStyz7fy`W3|#6zh>wMsRmAMc~ToA=a2(Gq6xN|Te%n3t3=!hbqF3MzoE{y z)e;2luDfygUYavN1zTNiE2QGaeMI%Pdf{aLCs`lUVYr3z*F_m)PzRKWF%a9*pJxu# zF$UAFT`lr0i7rN%?&X}cZR~5XldbukG=y|QI>Hk%t{CTF<7e_4pB&PdbxA1r@8>t3 z9nv@=N8V(>j|v1zG9@K|q<_0EZR^(~<|FY}Z*A8j+iJG!vbL&zUEZ(9)E8B6)5x@m z0)o@64?MG?Xp0_KZyO~rxx((#hq;R3PEnZ~+p;Cw^knp|t9NWyFQB+zA6_4+h=}l3 zL@J7&CH(2rM?Bh`y|3L-Zy_NC#esls6cpNhRD!?o*J^h(i#{H$r_~xotdH6OZXSpH z?xmQ*2!!F+;Cd*B72eB zih!eS-DvAmJU3F0v~`oMD+#HGgCGb4yav;Y485=yT@@Er^8E6sxS^$`dojAsRKh!8 zEh5Krctw^LO1gTMdhF*-e5jlRv8}TTNCn6f%u9vh`fg9Vh;~0-mT@hgPs6GpN zhRSC1EBb7&hbib={NIwQ>Yc=W`kaUG5O5h$%}ZgIMfJHl<~HNyAd<+%`YJtbCmlDh z%9He)wSLO#ttwnf`6OWz%@(2HS0mdcmW;8Nvh=(P3JV7#Jx0| z*Wx(gbvOZdJ)O@NnpvIY&OxuztI?lL7kJsik#wGyMi@iwU_UVw2h%d$38u4<#BR~& z;jRc-$kX)sxDzIxsk?BskzhJbuYrRV$tZER4{^(cVGonU^b&BS?v@NI;y}qwXHu=W zLAG@?kc}-$AViM)1?7$%ujr(xYKy)gkJMPa$OaWfIt4dX{wvaH-AjxPvA^k)b$HpJ z9V*$p;<5wQQ(Wi2vbLtGy29F~HxN}=u7|BZO|(T{x>bKhQ#NZPzf?;6GA!X+v>&eH z_*Z7MN|6q#^ldj)>M!W4-7@_}eGOXqwh^^Z@+Gn`HSB5dAUPRR6p1K3z`ED-mV~3- zfXo$%iDn?zbd~aH`g*(<%3yh>MC#6rlZcn1`%9OorapUlk9ws!!kDr@syhQViX`O+NsYQBho7Q@c&W4-`_n zQy2>{Nu7oBrq4zMa1QqOFQkr$+w&Trb`}IGo`}N1-px5+S1n1qd zrX!t!&|#l3j&)}sTrjUjm{(mK4q)BJI@Z(l6X1F}?EzH?_rEdMsk8@c0(iy` z3d(hVdR5jk@I2aFUFHY7gZWpGquRmz9W+(N;h3zXdbNt0)M`2v_Y2hdbcX7ov(yFD zqmr~vrRZvvrdw1mJ)+hkkXlDitM#-;ZJ<|i|1-6oi`536rY-u$DzvKBS)=2LL+ph_hz5+4Sp|)k8Wm}(W!as3LoZ+tt(& zxtnFpw&s|-Q>=N`d~?@qwOD7EyCv2#>uhs(juo@o&0UAJ(pqJ$!M?6;^S8(RO`3OU ztJhj*?lxE#TAwy|7h9KEmz%pQtj}6kncAN-&sSStvaU6E*I74OUp9ANv2L@zYVN*f z-EQ4!?(VYgvFs!`iFj)XX8)_3??Ysc&oYp3;H>w8pU?Xq@T&lN}1m1vLu^b+@qGtOSOo~MYl2hUiaQdjCF F{|DL!g*X5J literal 0 HcmV?d00001 diff --git a/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$Builder.class b/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$Builder.class new file mode 100644 index 0000000000000000000000000000000000000000..c44daa1b273d2c6493f89fd87d79874d66db05b4 GIT binary patch literal 9395 zcmcIq349dQ9sVBKWH!rih07ID#E3~qSP(A=LOFt@CNYwLg0|^qcYuY>Zk*jkqOJDQ zw)W84)_PU6^{O{$5-L@(hi&m_v8`HLdmr?^?^gT0H?zC5JDb@Atv`O*d2inP-}iX% z)khx(aJo26!#IJZNpou`*>5CLM%avO3-udjBpDB>ed=teGi-+YjIXG}w2=j(&EtZjREXE>Be$KoOKY%k;C#MV$3SxIbdwhlOVmNzda5mPS880J9N zwy+spolK;SU1@>Y4rp64iAXw@OoX~J{r&NnBD~$}{j$=WC^VsSi;*cBs+PqPvGj6* zlk41Mdf{rW-zd;K#!F)+P$KcIPDYIYDlkQdjv9gKC9u|S3}6~&XqYZ==IgQ!9W!wh zEkv4{G8uuDb@insTgGnl7FP{{qjglGNM%k>>Bn4&^;vVSF;-9e9Nat7~AvF|7 z-san4sWicFbZxrm9hI-=5G`wvUJ^hZ8Z^`kOwOZRX`11Il#cmm6sQWPBnbj1)?MIW z>zv;0MkL)_g3`)f8IJ^Fb)HJK;#W5I*bYi~MaDG>B{XYTD6n?y7i)_`YBzNfp#7Yz zV=+zky3%PnPRAJneky-sIG&-^Ov|cR*^Mn=4NFN#UN;n5 z03j@szIvuWZ3$m>7!OY~kOZ&-D>b}HU{R5f3QVS?Ms%#gY6gk^aC)1-{4$=Z)R>?( zr?=6x1X^^g#aXb)1Luv++intib9rYH~C- z#eP}9o)U@0or2}ZTbX`stuVv(m_{lSPfHYU(-D>^rsvzMHJW3Cq-n{FHY9| zND7QMS^F40TV%Sa#zjbJFd3b_s9P^@NX_U-BO@@bwUgfFsnXmP<;NH6*o6UsIR)Ni zMAfN6t=v3Tf68O>BB)go0sAp8s09@SXN#RE0D^(jCFAtWfr5W&2Qvf?43H^ zg?9@COsNxvgB6u0=Xof$4DuTataO_9>bL?|vc9zx)Xu82FVokxEt!#>LOQa|#`J&i z8LPl@NQ)Npeq5#D17&>1by14>gXPOX*S%WDPEODt*6|VPMKk+K%R(l_Yjk`R*D}AV zXx3$<)9fc$ft5&ekz`*VTZNQ;CX_v_+~eaqK7s25f=uSr(S}5yY`Cm|xH8qvP)CDv zMXM1()^CZJeopop*V|Gj_HkC`4-lpoXg4*`{?QE4tVcm(lHQZIEQ7r>TM-x6PaGgt{ z?xbYqPLilvALwS#KEo5e6Vz&TBS&@s4`Ppo zhXhV5(&mW76he@~+AI4{S7}`a0F|{HkH|W6P>4Najp|CGj*?*1{5#C1dDNFImhDM^ z8Wp&kdrx4_a>z0UZ{%Y=?=CDn$;n6f(GcSZfA;asva#19*{R23Ph{4eti@%GUtnGF ztx?2}QY{s`*Qs{3XGEPv<-%eF1r}LC#H^=0uehbmzBZp|Ek;>zdjfIEo>_H-9Fhg- zO_f_c)%2_0Omf z#*J_yfP;8e$8-3)fZl8E*^Ms~)@BN#SSurtnqt5Ie3@|QwwwEi#(|`dx086@ni>q^~QYdU~y?wAH5s@Ctq`Q^`*_2P=_E7_a1! zpGo<#g3;{D0;d-n#pnnj6TcKV#fynzH_!4Qv)-?D{6^Mu{%|B>q*ApDWwH1>9lysP z$e$u%wHhl#HyUP&4KAmT?)+-Mqh%b%pCt2t7T8dNc~2G9T0KX$t;e4*Tn=eky6V=uyYb|HtAQ4)yCWi ztQ}=`-eJjo)uv4`H(8n>yHam29vOzf_BY;m?Fo4CS1`J%5Yr^V3j{X3@o24}Pa_X> zB{OD(t-t&rF}r94ZJlb_m0lndC^KbiB4L=T1twDnYp36rO?FeG9y;U z9kwo3O|6MJ0*k%8#c|8o^Knq+DCISA0_P`QU`k{p=a#fZMuXxlP0SN$@Pb>IY&B6Q zFt_}9fr+-L?Rr=f^UHksa+|SZ$0C1{YbRjs$k&<(m67xamdoIq`>LaflgsqhuJD`R zu!x*kyfL-NMy+H14kMLbmrSKGhXJ{Q->}D_Mt*_gN>eL8*ZdBbTaV{6mFHBbBR$tN!;Ub0-yEV&Uo(lP>U0DG|W>pEa9%aHOcoFrfsTdcm%<&eK_eMZjDm} zNVdQ!k3?#Ln`9&9lnV>^maK@Y$()}jhW8`1sbT&Q7D-4$IQ0OQ45PW7JIfn}ux1GB zYTD=T!#N#|O+)A!#HO0J$mLRBW6-BIdbsReTG<$^9Kx1C?5IiXML+j;F0E<|Rt@2j zL0rz|dzNaAL2U@{8^nhi3H|DQ_}CymDe(;9rv2ExNup~c6}L%p?%=~cLwJBcvzmgw zU}dl>sM*(s@o=8q1ZI(q8~LsFCd%_>)N{tP2)AGftD{x873**tzpvbm3vmYw4s~|m zE{>M(#^tyNSK?l3={}C%?x)5c!1Wwq?8ZYJD(&Hoy*&LePm&M}^3)Jl_bRcjw?voI zy|IBI*hAb9j_GUxLf{9pO=GZ5@&vm+z-#T*!|XpT}dgz#aTR^f;cN zl?CKr&QcD}AOxwziF-wGAZrxr0m~>`ycMEz_{0N6e=|QSHndb z*Ol}k{xXqt?0mSR>Vx~>dy4B-9d$zA#aPB z;wb)J%K-KYwu&IC#nIvzaV#zAK|aa#aaFxGtf?8YHmx|hnAv+yc_ugP2I~LCfHdpTUCVih~ z4{sXxA}r{e4LhxSR6xqK+G(dXnaZ}h8z>96#^Px* zojBOpn#(w?c56Xer&UvLjVCi%$Br3ztKhU*Yq_$T=)@}Jp>GomwV#^c)#~0=GUVw} zfx6*ek%;up0pS^oclDFOEZJNj+6S`BHV*27458A$1?NzDUHM*1Ts*1})PC7$eo*3-{vxwr1 zfolb)YtY40;ogDb6Ty#uMIx@LG(nVl2uD-nguB)MZO$Gi?Ifd4*E%=g z{YtfBVhaWg^b0OlPI}_KENsQ~^o+euR$FEpLzn{jB@FY`6kJa)nW#ZGXkj~c2+oM6 z9Xspv*vVKt#tYBMVs5-B=cSrbr9R6&p(Fx$I%YAMU_cIU&+Sh*4cs8OumYWa z(=Ci(m!R$dV|95#^Wvp}n*`@oE5O5~e%3#f|f&GGW ztLM3vP}7RjjaQAcI)n29@aw_@&HtoMBcQtaP@_ZoOl_4&i|sp={u?qn>y5ASDvm~?LM zPbK>`Pn+Ti)gf_9~a5|^(TgH7Cv4=N#kP4Kdh5S5^+N$AZ5bFJEw z4r@LErA+h_Jy$i3d7G*yb4z?o8)u4Vt$v#8xcYRYQx|>SAdmdXArkR}u^9Hka^j4mc^>N9-XUZyhw|X=1S;4B4#b*hu z41A9I*Bcj|&A_kIaVm%vJ>{^U_f>m9rD6*HR8?mFpKDB2++VctCH%&0%R~k9%+<-5 zZeS2!eJ3ZxNioad_+?sYR);iIGbZsB3(u;&7|6u$aGLQnzG7ku#|#`LGHNE2Qjf6k zoFcG*WzZITq9i636_rWhtUsb$;HMV;Qx%oQn4Rg@*8UkAohRk(ZC(e{#A`UNjP_p^npD;IKlon* zug_J5^`(+~wQpqHbE<_mFvH4rPdYVDEqKk&^*-XyA$%oKy7(k9B6WhL)dFcG70y=V zUIG>x6!$4X0VneO9uCB;XHMLLFpa;~aT?07;Q1zwZ$ELke8Y@&^-?mi(fSE*jgHdSZK z1(sYWZ3N>1)sY$?EEl(rltxN#g*vCo#fsfaDvnVewo8st1+=s3olktL_YqD^3D2w= za-&*oYbu^}V|Awcpa(uPlN(j4;aP;@=v2fwQlf`q9$?8cKV5wD;P!VHR#Rpy;yVuY z-{#@3L08~{@Cltj+ox#NLLbo!9EB#9uZLE_g2}Q>1w2psY+sINRmi)VU?1;6J^cg) zveysSWK@p#D<{kMU{7Z@2UD*#JLs?g@qHr;Gz#;6xl*5f#v zInrav2HD8a!sgEbg}{YnRh&`*1Kc&fIo6fG&$0Kohdb}phbfz+*OF`HI+A6JO*iN+ z_Ki=K6uL~=A_Io7A?VX0s+?`fR$()69!otPOVn#QN-)<}v-7yspi0`?=h$j?wjm5_ z=U2ygm!;yoCfOl3SnTPauLK#fWS88?5X5RrMZr}*@!l`lxI}g6B$pyLs{nk9;GEL+ zJ$%_T=rFjcUM-tHVoTHz_Vkw2;!+c2*)B{M7tLV~J#S+&H_pxX$e1B}=McR(nU%b# zwxBQJ@+f4;t-e)j*{nj%C;RBAvQLthq=YTOMpbzyDMFhcB+E!>wd@nt8Y!Xr1MS^P zPx~Zi$pOWynN1byGLwdzu;idRRh>hB(ut~9_5L@+*?Cbteto`DW_`OQcM5xdstH|^ z&;(bO+NbR0NqN?A?OU>xioQ$kHsqaiv{g%{Tkw|DxBC%3W(;KaNhZJ=Er4Zbh$ zvE;q#aMTb@q>_%J|9(sEEiSVsN|x~~C?BxogTk&{{r()!O;?mkQajbVu~NVP@+Q$N zK8em1cs5Wlpa$XuE5)rjyND@HURraO3lbJGd(?~2Zk|F3HoERyl#4E_*vJx^T&}CA zS{<}9r}F%gYa4a(M8~S#`6@9Q7ntrDgxyuX^<1*G##bemstnEQqc@p!(kyy28C@9Z zldvJ5@blDp?+}*|Euj*@K?>QU)cUMf9mumje6+kdINW9y|)3sAHi~O=@!bXIBl6m z=Wyr3OY6Uew@hK>*DJ`pUczTfk=iG*?$5;*s7)!U57Tm*vs5e8o|$ ztzb^*U3|XGomHf2YFbJj6Ft~Ks_JSZHKG{ZglqV_nO6bs)9b1II;97BipOnbltZCF z9(@sGS;z=A=CLgj3)N$UH0%^%+eI+T~2Fma{M*A#9Vg`9ADi?3N{PHFb598zfU4R50!J`P` z9E{>IJWf@$V+TIL6($Bq>6095#!cMIz1mG)KE*LhJKy5su7^3-rS@r3dxE>IMEwl5 zq1k{KIAY*Q1Cs`}@a8E4V+M8_V0D4lVe;a2Sp3gbe7EUi$j>W7*8i9sw@`OaEL)z( z7y4Id<4r~ayrAu#jx+>9fn#_^ushUn3}5B%QRI)n2uvbSzq)Z6)5BBvTEtLKpBEg% z@9>>g(iS5;RM&r^t|ueSq2~Hy`13m6d_O{yawG3%9v$Pa z^Z474NmI`|ikFY#pCa?=WAl&U-|Dba5&wzc(V54m@fsQUc_g@~@e%ChvR}}jj^e-R zet}~OtIDS?4K=&Wh>!te*88NMvqR>TG|>Az(wwSEzZeQmOK_N*Hcg57jLgf+={qOU ztA-cTbKSLpDLKn~7Bbuo%}JacGCk=v4nj?ZDIC_sUnZ4u44f|KxFGPRPGjdV5hP0% zH6E4or{tnpZ$F@XSK81h7okNi=0|7k{Cueco%~%b%lX+zC*5ELYxzr&l`!s>x8Qzx zD~`xYe38$l5_|OjdaLb>6Uf;EN{Iy{7vyV@PKU6 zh{fP{dN$$$STFN!~bvu)?C>90RWzoT6tgn_)<3 zsyPpsp%^aqtM(!tm&oE7N;?-33c>ds)d4})Dbv>7Plc#pT7dSJX<0u!C0F+^&r7!l zSJ{-8%@Ib1y!7Q|TcmLkFN7LBi0z0pFhVyxD?9Tt%)ns0fSuDK!+K~+Zelnv^0J!% zZOV%iAzD^V%RX+APys=?{5a!6UT))!$(znlb6&Cq-0LMF^IJ17h8ps6d*Cqoio4wr zA*kGVMB<9lJMwZ5gG*tt8hT&p&z1KrL1x)aieplrcu zGJp$YE4t)*TqE1i$AjaDY-by72X2)if^H|j7`TDJ8|Itz5j@24!*UazlA9SqZ^3cd zP2kylFEGjtx3XdFXmH&_*QVx+;A-^16~r4FNUHStnFdlltCb(}K&`CRML>T@9-#YP zgU|DPsUS6iP4ZzvAjkq~Jz@9|onj1E$VU|{=+dq7F?pCS{Z*`$j|)>Qj`Pbt1vnQf zs!r0o5+L5L1W3I*i(qKdU>HS%a&4pFvdZxd>PLI?3>hmrtM|l^9eiepZ3w&Ed-xv+ z{{n#-Twj9<>g7>-Y@IyDu%Z%RBVYYIEuUyqUFJ{9;j5bH_*3#}a`XdvLOz2Kp2WTK TWMh!A{}uTx?KP>^ literal 0 HcmV?d00001 diff --git a/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$TypeParser.class b/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$TypeParser.class new file mode 100644 index 0000000000000000000000000000000000000000..545dd4e791524dc84769cbb98ff67c81beec7a68 GIT binary patch literal 1964 zcmc&#+fLL_6kTU-WyTq~s30nWh&nS|stBSKHA*0wj7Tzs#K+P(4khKZp0;NA2S34& z&=*NG@xe!b!UzAuxZ5c(Q*J|I;zQegcF(@7b@txp=bx|N0nA}uL4x72E$gc7aN%-Y z8k?%arD0pTXG?W;UtBi1RePm7^mD1zal=!P zVVI0LHbsp$)a}rT+Vs`ph#|9J3e$VSkSvzg8B&XOjpvZWxjYnfF?3fqwdMHcgk|X(bl^Y`8Rrv=bm?_*kqQ;H!Gl6m_ks@jcTDkZAE( zCqo@;QnzRek2gH66u0gq)91cv)wrZIb4v|_JDzC^SHVSw=@{-KatY!SihVQI>X>Q zpUa)vIkym}ku}m%*hKr?^QhMtPBREKPZ-bV@07ioPzd;9k^0^!~4!cO`+hM01 zDq-p{K^+$KRrqhem>CLT<;y5r8WHgdJqrs|qW$fCO2Z@S)!FvY9o7}Yhm)!J_bqu8 z;Jp^ifng$#9`sT(R&8GzeAx`f)6nWM^K&X7oFV^G2rd^b-E}$j?mdQb2h|pZRdAnS z^z{7GCdepGim{Pwz~q{DlI^`Q`?kM>nnLPC+j@=_5Hss|&b9QUd*O zFBsg#@F!9vTipH*-IkjhBe!INQQQne6)=WdQ&_rZJnz(ws@-0f8Ez`*15hl=7!7jF1`?~LT-+XOtU-!)?@B2IV&g9OLfS>pN=|exc_uO;N z?|05`|NYLHm)`i~b3}BW+G~(vS{jLNtBwp?;g}VO_6=4KThYEqC=icCtJ_nrYlBv( zza=n?&c>Z_gFH;jGUf%t{npOvAw8zLJrE5HS@CFam(}l@%e1(ubxq@@-j=%d-tJcX zZEsxDySAyZc}-_;U1x7st@qWtp{}{R5u0kS>!|B$>tN#g zJdmH5W?{D-bq!rjZLPfxZ7uE1ja`jin#JVBMu$UI9GfX|jRAzNrn+V?&0#Wz1JQUe z5Mr9?8nL#nskw2Dm;6ix1A$=3>Svng^n=h{jU6qWHo1FS>so;9ElkB&Y&6gpxB7eI zR&*%F^cEMyj>gXJ=B~6krvRrjCm0F$My=RL2-}@|X?J5sPj7v9Q}dd}j zSkckmts}uuzZLBr5P4#n(NWiWabw!pGnk5^V2PC+b4p87Yg0>iOK<18w(jON5S^}u zb!k%;VUt6_aByg3s5dqk8430G4h7)C4~D7-M#6n@ zY^Az$WOz6fw4z{e%T)cVG9}EGeT}C2QrV~*nHH*I-Pt7s*80G7c5Wb!0$pN_?ubO) zx`Rc{o*;XNrn&>l-eA2=TjyK#l;G=w;Xrg$54E)#&=)u~?9?P|5em8nFij1yb!2F( z6?IJj#2K2Au5V0Brd(EsFy1vDFir+Ry2F|(HZ@{(u`bO*-VEW@<^-aBNn_8(wvIJS zXKDn2;XvP@RUI1Xv%*$&f240@$O^}+HR!IZm{dpgOmq6JSTJh!*Cj!A#$k=1O8~OD z5W2p`q?L58m)0^>YH3+AVJ?4M=`bQK=~81gJtbe*zst5w2&^Q z^(I|H%}l-~vCEx(=wUi1llqfb)D)yzAGK1uL2V%Hi7mpUO9@7lcgU|Bz)9On@s8<$VR~`Edv5(i%I9v`O^7TlP;%T z_y_o(uE>>ESezRKWYOlu8>ByA($-{u+fXpx8iBGGQa=qC1ls3Vgl#r7gW86*Aeyv| z1_5(TTSI4VSRkB38;F$5T0qS|REPz0;)Y=?1V>q>5=UE3io;gxmi8???uW@xdUh(DF>h zIRU!WGC!SbyBQovTRQkZ>;!z;%4VWX8|>O=Sqnmlp#Bi6J>f|Wk>Sy@p~rqd>3lcrF}+7<}aMYrj6CdFni9f3>+N|Sj`X78`n*YBpcj$w z0pCDy8_F=LL>nPfPBKm+H?=yL-bY_F>1Fy7R8DJ06gg0~K=ehTR`pKVqU3g)G|F&X zGbUPFL|>+_8uS&WiVXZyO*83h^mXh54AddMD6ItM5QY=QpPT|=WcsE_-=c4$V4}Bi zsgwp%0rAaQl$mN}Sz%G`!71#MM2;@9Cj)C)=XXu|9(^Bna=R4`4vgxBLgTYmXa{gP zfGpl%&=0aRRn(QFuo6RIB@&HSw}&8%b^6=d5vkVs=oQ*jL_emV8uXLw9Mg@i!`cxI z#x2nflYT}&hg*o+pX$Q>Yb{hOm~NVK!7d0{)O2IgyLSdVa-20@Q{UO0y`kn{404F# zk;MG3&@WB;l~~n6d(g!eQu+U6Ix9mMv+z1x5&QWYlYUFDq1b2}%T6o0J=mu`#U#vi zj4JdLy6y~z82+CAV9P_*CdnvNPn``ph=exoQf+rl&uE#7ShH6>0jt?2K^Q0 zD<{&l(I)+!{s9jOrPJRxSkV}yrXgwtP?bb+FB_*bMC;hUOnROE8xqAcapj zBw@SDiYlK5(l{X%X1Rxp!edmK$!GAH zQi-zSDMHWB(xD`!?MaJFUd-hP^|dFI>Q(>7q)%izKWldG#i!5H>zU7$CRgzi1OV|! zib4p87cJgW4_Cxzn|uy0g(5f4>~z;6y4Weq*kC zJ*mOYhLKhpS8a71M1jp$rc>-<5_p!PLK^dsoAU3hd>Ap$51uGryv+U3& zePK45CJ6vjIjJt|N`yPcmdOJQJD&%&!y0q(Fc$>qpD-b4@)dj~49>)vCgi`^Rm{cd zQEd?qal~NcU(+TE80av0n6E;jjf5a7R~4W!2ofiTG&$b^AjTsGLuE^{drjUhA-X3l z2W`BQ>AVw_2`wS8f^u+YE@Gg_D=C@199f!P33v+lqn;c!}hX%gGNq5p|t}8U! z7<5>&9=7(6Y@#nUu|+L&I$x68~s#9!14s2zNl$#?U6v52jhIM+3KWH$jd71UA$&HIlH2Fi^h|Py0JFIAXBp8khq$f?@qd&tZ zI-d>Rn^WvkHptF-^htz|KO+3tH(BM+;Ya4Fg<$@e$sZ?Q(nm?aA)f1LllSv8qVnNB z9IESylHIvXlEa!U^;_yC^5CcVpl~pO>@Mdp;gc(+TbBWU%@(w;Z@(*(xNw&NQe7Ra*g%NhoM7WO_{TvAlS zSqy;n0fml;>{&j3LFo7SY-Xm(bJjVIEb>K@U*a#qXl38YK$_U(!m=vEK~T>(7KbLRQ(chN#5`PjODt_xi0x@PmcjwbhQDtFjsH@fCb45LOyWDv6oCM zr$CVIeltuhAR1fkn=Z*4VpABEWEOyZfs*n!u+!l0!8~Vk)17_#XfMBF@(<)Ze-1)) z$WLTPTJbX`Lw;oPk7dYw3;`X|2GqrDVMZnZO-bv*6&AbC$3Np=82t0>!kNu0_~z(n zb=}sOCY+{(vm=v#$-lzdhUHkGDU7<4yO(K`ZMt3e`lie^CkfMS{BFo_T&QB@EU7wK zk_Z+8jfvtk{|0`2t-g&`8jqCf6&sAY)(q{DbtrFV3qF38e=lNt{`Kx16ZI!~kF@yg$ zSp_X^sE_}_|1$WWNt$XOt+U*i$*(htV+Dbj6eyX_Sd@zb_Tu$48HTXw7~(F4iq!Op zu*n86Lo?7W&S^Vos652}6TIvWJGBL?zYS(X*Uu-ULFI#1_AR~Aa3o&3)hZ2!x8piZ zzfUQ89^OG0W5RKXG*N0&g{JaJJY%3dC??s9Y8Ms4()aca;xeI6O;(h%GY)peu)TuS5 zY83jTxaW9_pi~@Y$VEN(*c@d_H}FiM6Mx(n3XH@wuuMAw5wbfWYW-Qu$`K&DQDyG~ z$Y!RNKXH=gz5*mNJc)I^N_$iHUd6;JR4(RDSg+I#&cobF^<1Dr|OK*-h!KeDwfT7^` zY7esTyv7qcNOmDwYv+OBrMquWX&XScY7+az71rrIT4`Jo{0XZqCJ z)jJGzO}gh8#VOv9sjgMm$=RJ0isjM=P6pgzw&OgS>|MDcnA)Z$3B9N5{4bS7z)m6h zY}8o~(rO=q&{U%sDM$$ShLbrz)2W(!Qv|ip0*`P!M-A$Dd;^`gLSUR=C4(J?B#Yia zpH%S6V4bs%e;x3U-n1WJ#KaE}t2<0}r@9T%9#+>RWg|OEkS%vcMxuSV)*u&17fhBx zED<$?h0^h?zFsXx#aNe=Zg28}v!ULXt&z6raN;6OdpnLQv+v*->i+B!SXoHvlm@6>+2d#^ofMD|fKb zGSE`wMZ1t7DO9A}Cfd`fIMtqk_DolQ3EC&Q+OyF<+0~wl_B>a6KH3Xh?S*KUy4t6r zeY&e%hW44R_E}USH-1y=tw0-B*09zRxv-|6xUQz3xU8n1xT>a~xTuEbJh?1RG-IyZ z_D8!Id#<5cKwO2^BcO%+yQG8@bio5;c=nJd?-^<+Y0UFHOBeU#?Wd;BL)7vNb&gYa z&lqidhBoh~&3OsB3=dC&-iF71>Pt`%F9{lY8sPG17=M>&qDE*2MX}r%`6&)!Tt%nz z(^SRJ&~iS2^OS?M2DsPr7!ds#nH89g2Pwzro{`JsJKwxwJX1)J6fXGD8^74wxD!*rp-d*m`KS)=X z`wI@zt>u2>Al+W>_a3A>%l*ECba%PG@F3k&KD+23-8Zq(Ga*C7*Ag5k74ti3HeXNk z`35=_bxm;QVXXQ=!kM-cMNQ6(ni7XV+!+5u8BG4)Tjpc9)I6r_Mq3&Zs z6^`OvAUq@#K1@*FqqMiBB0(R)hdUGWbb=Bu(6n*-R8PrsV|4ULC3x_eL-YcMywqA% zb(l~EIz->7@%TN5=sV-|3IykeHTix|r9b}={m7r6pkExNS8F_&@aqKqE%=vipf*lnJE)?o}w3yXHz=&_fFWzpWg?tw+ z;(O?9zK>S$`>2lJj~}?~#;-*lppE<>1^8ha;zwwdKY+`skI}vSI6cT8)SS7@-gAmG z5Aigf4w_7(J83Z&gGwv#TZor=1~^kkTX-gyU44{C6?&`*Lb)lf1Ky{jPZh+00MUqzMQm*DCIFY_0^Kr8))30|>>&H*ja z^y;SHI8x*F8=ohi-&hy6N{E5r)dt{2Ew| znSPHyU#8+AQ$0tX26am50$Pvt&xY>x;8{#NwJsKbGWViw8wy*0_CR-@qEg;VXYz-s z61CoCypJy6kJ3*57;aF0Tytri*53n~(f|O;TnNb*{W+jHA_nAu=3E}IUC$SB9XL{o z`f5ElfWymB1zy9A&xmDw1O?`j&gI22d$K-SNIDKaVzMTzn{CFq_ak5PxEf4`9BmYmDiEV*y$4CwJ4TB(B?m z%e5ites(u)Nb4?1?RO8Yaz-qebi};0p4q9n zvbKvBf;XR}d>*Ii{4CAm=jb%l-^=+Z3ii)a1Am4t=g-mre~u!kmtV)9r}L8ne;MnYjw89R0KNw+yaG6SU^V7J0BkFB zIo$@F-+;8q@1wrS--2`>p$gz`#M_JTKHo{n`6FZ;qXsez5#{fSkkgpK5aVu0KG87* zJ%zdZ430_v=1KYu-rh`>kob@sPG?AQ8~o-`{nZ!{6k@% z(#C!|$U^)Yi1a%s^6xcC=h(wRzG95SME(if9imdX>FNNz3PwNzz>~`l@lQX|GS08| zRLtUESLQv-za8V>9lpWKVoJ9&E!8} zoYFQ05lEu}I4mc^_@?6xK#+KA7jOusv%SyqUwiUI89RGCc--FElaI%poju|O@9yj| z@VKY*5dZx!zri#S-`KYDUs%)Y@Ob}CC-EDIHIC6~AlX^`FDh54Vc>Zcu-Lx-Y)95S z=y}v3$A#F5rt!E^J+qW&Ocfl#1e?Y>gi-}=+9t&nw6&e0M;X|R9Yj{)DVxeGF(=k( z11WN?BQFI6*SDRi)y`De`+fT|R#5DkobOEb%B^5~vN{`^KtRML<%g9QnTIM$sA;=t zy2K^3)GP^1l0CECJtym)=aqjq^_3r0{^oH7x2hJjR8}O^!ov!r+$gVSPP{Ijc&&6^ zHDAQ@mH0ijRi;;4s}Sx;Qyary@1fE1=T-G?ngKM`33U#7_mG8eE9`G866!qt?NWSO zxtr!;+{%Qi#W(wHweDSp-n!jXiEnkySXhI`-3q5$mBidqiIHe&#J`;gj>0q_Y@JKR z%1`suJX(yx>vHuLs#Oc4+^JVJZGw9nq#Y>5a5b-$3z#=Cbd>w44$o{ zcC}75LCS6+k6MoaDUV*GGIfb+hO&6@tp(pyDpaz^x8jSq>Ut`g=ND(4FK$}-4G6t9 zEz^&QpyMOatXT_=A%pVh5p{O=v||!w>23sLIPSHjhX=7r37Gfnr-lcpq~f{C=N_e^ zan%M~v$CQSn^=xkg1^)oi2UxkKve%Dk!>GiDoatCU8SXqIi6 zmSVf4&ZXJvJg~F|1gxbhwTc?l1=OTgQ%GG%H>(D)a1B`4NKdG>nk^mncGWv|PJoT8 z>){_nardYj@TQO}-Y%s<;3=aU)VtJ;uvS&HOWmYyhN3k=NZzg91G3#gekmNnvI%2O zAvOCLFc^lKZm3(F|M@yYbr`B2si#-ns&2EXPG{gLDD%q?soOIY9vxz}#4hEjyW~yX zt=^00Rx(kSyIkD^zjiUTs(UZ;tNYaZ)ovTn21Ih@vo{V>r5vO}kM@DVwM@YrnODG%0C{n|JYk z!RtSw{@{4ngMWaJf0K{rOVTt;+c{95J!{hBocG+`bH39*{<{A=fKmJ;p^f2~X%%F% z%#8|Htz1zqb1P@+s%=_w%D+wN)q-YBt=JMe81{r!oVOL(m68x)IM_mDPA!vxnqX&Z zLB;W=CKzQn(gKm3BHy(oox8#ekulBC>~V(nq2UZeN5ahWDB95*!*;|NdXt*L=c}b9 zZlzVyW9UzsIaSZ7mL|S?4?FClR$-8nVZ(k_nk6XC5G`=~a+O;eC-wfJ;p7eVwklU` zO_!5e#U_a^MJpJpUA4&ebc4WHgGDdD!JG^V3#&?*=f+0H;d%cY$?^M5K0sSI3y}}n86-$bE&IF zK~`)_GYauXbS|vc%KUI9iXn_h7-o2)UdV*0>pW*`rcsIED4wN6R>&Kj+omCk%rF@A z_0rM}l8!f_!VV(KG`Dj_rMi?VsujVRt3zS$%FPPts5v^6S1Ll)@yE&WN>f{R{tw&+ zwt919Sp+3-2+g7C^9;iwKDE}6@B+hFNW8|_4HIfkC38Hl=%!7XtfwZ56X=z2is9^5 zU}m&JQK$bf$Z4FFaE4(b4D!&0i6M?L8bVZe-x#;3>N_?J0veP={gW6M)ITrgNMMYJ zmx7oRNJzNA&{rqsgk`C##K}oa(IiwWE*TFET@Q3lYc@(b(V;6u-^71FL5A%VrNV8S zT7x1yzac{5Sp#{zsZ#NDWKX#^<6dDn9I8MIjV{dLl7xAN6Cs7B^qJPUo_7x?I#6Or z;WF`bc7AedAv-sb%3htP&(zdpc6xRyIjLkPlx%uxA)QsG(&^dxiwq5|)xt0Dd~i)s zPSP84LXXD+g=#V4+*ws}j#Al08ceU2xi>M#E&|9=WY8-cIkq(8wt17wNwu_;S4mJ^R&%s$t;TQzH>qPv^+ybU z1~ckuy5KXak=J=;Flm}MtL1ng%{9_Am~<5dQG((dP&a+)n%rF1JK=(nCRANlXf=rU zMzAdQop33duM-poZ56Xdx$2)~4ywnZK9r&;!;)}oOD)cwo#H}O(}@pMGci_)X++sj zOKicLOm8PlhT@~>hRxNR)ZZqmss2DcKrTA{^9!iNlpTF`bQ-zD5(*t@wXb-&vj5h}T9ah5U496c%mF&6;u!OG~SMtqgw}fwKScL2~QK6w!ANUf!V;Bu7ZFOMMCK4K< z^!u*9J~i1a>F108+XMS)$8V>C zV0Zbj$8g+-l^xhL`PxRh`?fv6bBpckII*~f7nQ}1b(~y0`Wr^qa89|0m%{kt4*pJ8 z*TW|iOyeTC$uJWLVAip88Xfc;=;?TXmlxZAMY0)gBH(wA@Anmq`F<-7+9+jPpmq1B z*O4Kg7w%y(?Bq-UtlI~pBau`^Va5qm1RohWx`u4MGEwL09A$hy5N5;=GXfP$gwc*1 zX}2K{PFIl(ot_!6aof3D!Mk`5@6)@pM2rvc5kAIle1gyLMWhQ~;%nTcYle2TyXeHX UM4j*H`)QK>f$j(B`Xjpj25}S?WB>pF literal 0 HcmV?d00001 diff --git a/alerting/bin/test/esnode-key.pem b/alerting/bin/test/esnode-key.pem new file mode 100644 index 000000000..4ac2cb57a --- /dev/null +++ b/alerting/bin/test/esnode-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCWvn+O+rywfgMC +ud24mAclMDfuNA/IzCKLxl5usIE/PvUm7PPfXQ14LfQhNQXqOuaD9fiVM+HO1BzK +wmN3j4g7eHInR1cxENoNGKFa0Fr9EXnUv8sfwyobPD8NTu9eaH7T+d6f9oow+Q4n +xb9Xin5IRR/pcJ8v7zEjcXpZaZejcSU4iVZ0PR2Di4H9rfe9SEyR5wLrsVBePB3L +jaL1uK4bZF3n/JGgDe3BNy1PgPU+O+FCzQipBBTyJWQCjd4iTRXVbMa01PglAR85 +O9w6NXApBLyWdGRY6dGd8vMC2P4KlhnxlcgPZdglKniGTX+eTzT7Rszq77zjYrou +PLwSh9S7AgMBAAECggEABwiohxFoEIwws8XcdKqTWsbfNTw0qFfuHLuK2Htf7IWR +htlzn66F3F+4jnwc5IsPCoVFriCXnsEC/usHHSMTZkL+gJqxlNaGdin6DXS/aiOQ +nb69SaQfqNmsz4ApZyxVDqsQGkK0vAhDAtQVU45gyhp/nLLmmqP8lPzMirOEodmp +U9bA8t/ttrzng7SVAER42f6IVpW0iTKTLyFii0WZbq+ObViyqib9hVFrI6NJuQS+ +IelcZB0KsSi6rqIjXg1XXyMiIUcSlhq+GfEa18AYgmsbPwMbExate7/8Ci7ZtCbh +lx9bves2+eeqq5EMm3sMHyhdcg61yzd5UYXeZhwJkQKBgQDS9YqrAtztvLY2gMgv +d+wOjb9awWxYbQTBjx33kf66W+pJ+2j8bI/XX2CpZ98w/oq8VhMqbr9j5b8MfsrF +EoQvedA4joUo8sXd4j1mR2qKF4/KLmkgy6YYusNP2UrVSw7sh77bzce+YaVVoO/e +0wIVTHuD/QZ6fG6MasOqcbl6hwKBgQC27cQruaHFEXR/16LrMVAX+HyEEv44KOCZ +ij5OE4P7F0twb+okngG26+OJV3BtqXf0ULlXJ+YGwXCRf6zUZkld3NMy3bbKPgH6 +H/nf3BxqS2tudj7+DV52jKtisBghdvtlKs56oc9AAuwOs37DvhptBKUPdzDDqfys +Qchv5JQdLQKBgERev+pcqy2Bk6xmYHrB6wdseS/4sByYeIoi0BuEfYH4eB4yFPx6 +UsQCbVl6CKPgWyZe3ydJbU37D8gE78KfFagtWoZ56j4zMF2RDUUwsB7BNCDamce/ +OL2bCeG/Erm98cBG3lxufOX+z47I8fTNfkdY2k8UmhzoZwurLm73HJ3RAoGBAKsp +6yamuXF2FbYRhUXgjHsBbTD/vJO72/yO2CGiLRpi/5mjfkjo99269trp0C8sJSub +5PBiSuADXFsoRgUv+HI1UAEGaCTwxFTQWrRWdtgW3d0sE2EQDVWL5kmfT9TwSeat +mSoyAYR5t3tCBNkPJhbgA7pm4mASzHQ50VyxWs25AoGBAKPFx9X2oKhYQa+mW541 +bbqRuGFMoXIIcr/aeM3LayfLETi48o5NDr2NDP11j4yYuz26YLH0Dj8aKpWuehuH +uB27n6j6qu0SVhQi6mMJBe1JrKbzhqMKQjYOoy8VsC2gdj5pCUP/kLQPW7zm9diX +CiKTtKgPIeYdigor7V3AHcVT +-----END PRIVATE KEY----- diff --git a/alerting/bin/test/esnode.pem b/alerting/bin/test/esnode.pem new file mode 100644 index 000000000..7ba92534e --- /dev/null +++ b/alerting/bin/test/esnode.pem @@ -0,0 +1,28 @@ +-----BEGIN CERTIFICATE----- +MIIEyTCCA7GgAwIBAgIGAWLrc1O2MA0GCSqGSIb3DQEBCwUAMIGPMRMwEQYKCZIm +iZPyLGQBGRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQ +RXhhbXBsZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290 +IENBMSEwHwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0EwHhcNMTgwNDIy +MDM0MzQ3WhcNMjgwNDE5MDM0MzQ3WjBeMRIwEAYKCZImiZPyLGQBGRYCZGUxDTAL +BgNVBAcMBHRlc3QxDTALBgNVBAoMBG5vZGUxDTALBgNVBAsMBG5vZGUxGzAZBgNV +BAMMEm5vZGUtMC5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAJa+f476vLB+AwK53biYByUwN+40D8jMIovGXm6wgT8+9Sbs899dDXgt +9CE1Beo65oP1+JUz4c7UHMrCY3ePiDt4cidHVzEQ2g0YoVrQWv0RedS/yx/DKhs8 +Pw1O715oftP53p/2ijD5DifFv1eKfkhFH+lwny/vMSNxellpl6NxJTiJVnQ9HYOL +gf2t971ITJHnAuuxUF48HcuNovW4rhtkXef8kaAN7cE3LU+A9T474ULNCKkEFPIl +ZAKN3iJNFdVsxrTU+CUBHzk73Do1cCkEvJZ0ZFjp0Z3y8wLY/gqWGfGVyA9l2CUq +eIZNf55PNPtGzOrvvONiui48vBKH1LsCAwEAAaOCAVkwggFVMIG8BgNVHSMEgbQw +gbGAFJI1DOAPHitF9k0583tfouYSl0BzoYGVpIGSMIGPMRMwEQYKCZImiZPyLGQB +GRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQRXhhbXBs +ZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290IENBMSEw +HwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0GCAQEwHQYDVR0OBBYEFKyv +78ZmFjVKM9g7pMConYH7FVBHMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgXg +MCAGA1UdJQEB/wQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA1BgNVHREELjAsiAUq +AwQFBYISbm9kZS0wLmV4YW1wbGUuY29tgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZI +hvcNAQELBQADggEBAIOKuyXsFfGv1hI/Lkpd/73QNqjqJdxQclX57GOMWNbOM5H0 +5/9AOIZ5JQsWULNKN77aHjLRr4owq2jGbpc/Z6kAd+eiatkcpnbtbGrhKpOtoEZy +8KuslwkeixpzLDNISSbkeLpXz4xJI1ETMN/VG8ZZP1bjzlHziHHDu0JNZ6TnNzKr +XzCGMCohFfem8vnKNnKUneMQMvXd3rzUaAgvtf7Hc2LTBlf4fZzZF1EkwdSXhaMA +1lkfHiqOBxtgeDLxCHESZ2fqgVqsWX+t3qHQfivcPW6txtDyrFPRdJOGhiMGzT/t +e/9kkAtQRgpTb3skYdIOOUOV0WGQ60kJlFhAzIs= +-----END CERTIFICATE----- diff --git a/alerting/bin/test/kirk-key.pem b/alerting/bin/test/kirk-key.pem new file mode 100644 index 000000000..bacb22c21 --- /dev/null +++ b/alerting/bin/test/kirk-key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDCwgBOoO88uMM8 +dREJsk58Yt4Jn0zwQ2wUThbvy3ICDiEWhiAhUbg6dTggpS5vWWJto9bvaaqgMVoh +ElfYHdTDncX3UQNBEP8tqzHON6BFEFSGgJRGLd6f5dri6rK32nCotYS61CFXBFxf +WumXjSukjyrcTsdkR3C5QDo2oN7F883MOQqRENPzAtZi9s3jNX48u+/e3yvJzXsB +GS9Qmsye6C71enbIujM4CVwDT/7a5jHuaUp6OuNCFbdRPnu/wLYwOS2/yOtzAqk7 +/PFnPCe7YOa10ShnV/jx2sAHhp7ZQBJgFkkgnIERz9Ws74Au+EbptWnsWuB+LqRL +x5G02IzpAgMBAAECggEAEzwnMkeBbqqDgyRqFbO/PgMNvD7i0b/28V0dCtCPEVY6 +klzrg3RCERP5V9AN8VVkppYjPkCzZ2A4b0JpMUu7ncOmr7HCnoSCj2IfEyePSVg+ +4OHbbcBOAoDTHiI2myM/M9++8izNS34qGV4t6pfjaDyeQQ/5cBVWNBWnKjS34S5H +rJWpAcDgxYk5/ah2Xs2aULZlXDMxbSikjrv+n4JIYTKFQo8ydzL8HQDBRmXAFLjC +gNOSHf+5u1JdpY3uPIxK1ugVf8zPZ4/OEB23j56uu7c8+sZ+kZwfRWAQmMhFVG/y +OXxoT5mOruBsAw29m2Ijtxg252/YzSTxiDqFziB/eQKBgQDjeVAdi55GW/bvhuqn +xME/An8E3hI/FyaaITrMQJUBjiCUaStTEqUgQ6A7ZfY/VX6qafOX7sli1svihrXC +uelmKrdve/CFEEqzX9JWWRiPiQ0VZD+EQRsJvX85Tw2UGvVUh6dO3UGPS0BhplMD +jeVpyXgZ7Gy5we+DWjfwhYrCmwKBgQDbLmQhRy+IdVljObZmv3QtJ0cyxxZETWzU +MKmgBFvcRw+KvNwO+Iy0CHEbDu06Uj63kzI2bK3QdINaSrjgr8iftXIQpBmcgMF+ +a1l5HtHlCp6RWd55nWQOEvn36IGN3cAaQkXuh4UYM7QfEJaAbzJhyJ+wXA3jWqUd +8bDTIAZ0ywKBgFuZ44gyTAc7S2JDa0Up90O/ZpT4NFLRqMrSbNIJg7d/m2EIRNkM +HhCzCthAg/wXGo3XYq+hCdnSc4ICCzmiEfoBY6LyPvXmjJ5VDOeWs0xBvVIK74T7 +jr7KX2wdiHNGs9pZUidw89CXVhK8nptEzcheyA1wZowbK68yamph7HHXAoGBAK3x +7D9Iyl1mnDEWPT7f1Gh9UpDm1TIRrDvd/tBihTCVKK13YsFy2d+LD5Bk0TpGyUVR +STlOGMdloFUJFh4jA3pUOpkgUr8Uo/sbYN+x6Ov3+I3sH5aupRhSURVA7YhUIz/z +tqIt5R+m8Nzygi6dkQNvf+Qruk3jw0S3ahizwsvvAoGAL7do6dTLp832wFVxkEf4 +gg1M6DswfkgML5V/7GQ3MkIX/Hrmiu+qSuHhDGrp9inZdCDDYg5+uy1+2+RBMRZ3 +vDUUacvc4Fep05zp7NcjgU5y+/HWpuKVvLIlZAO1MBY4Xinqqii6RdxukIhxw7eT +C6TPL5KAcV1R/XAihDhI18Y= +-----END PRIVATE KEY----- diff --git a/alerting/bin/test/kirk.pem b/alerting/bin/test/kirk.pem new file mode 100644 index 000000000..c32b21cd8 --- /dev/null +++ b/alerting/bin/test/kirk.pem @@ -0,0 +1,26 @@ +-----BEGIN CERTIFICATE----- +MIIEdzCCA1+gAwIBAgIGAWLrc1O4MA0GCSqGSIb3DQEBCwUAMIGPMRMwEQYKCZIm +iZPyLGQBGRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQ +RXhhbXBsZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290 +IENBMSEwHwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0EwHhcNMTgwNDIy +MDM0MzQ3WhcNMjgwNDE5MDM0MzQ3WjBNMQswCQYDVQQGEwJkZTENMAsGA1UEBwwE +dGVzdDEPMA0GA1UECgwGY2xpZW50MQ8wDQYDVQQLDAZjbGllbnQxDTALBgNVBAMM +BGtpcmswggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDCwgBOoO88uMM8 +dREJsk58Yt4Jn0zwQ2wUThbvy3ICDiEWhiAhUbg6dTggpS5vWWJto9bvaaqgMVoh +ElfYHdTDncX3UQNBEP8tqzHON6BFEFSGgJRGLd6f5dri6rK32nCotYS61CFXBFxf +WumXjSukjyrcTsdkR3C5QDo2oN7F883MOQqRENPzAtZi9s3jNX48u+/e3yvJzXsB +GS9Qmsye6C71enbIujM4CVwDT/7a5jHuaUp6OuNCFbdRPnu/wLYwOS2/yOtzAqk7 +/PFnPCe7YOa10ShnV/jx2sAHhp7ZQBJgFkkgnIERz9Ws74Au+EbptWnsWuB+LqRL +x5G02IzpAgMBAAGjggEYMIIBFDCBvAYDVR0jBIG0MIGxgBSSNQzgDx4rRfZNOfN7 +X6LmEpdAc6GBlaSBkjCBjzETMBEGCgmSJomT8ixkARkWA2NvbTEXMBUGCgmSJomT +8ixkARkWB2V4YW1wbGUxGTAXBgNVBAoMEEV4YW1wbGUgQ29tIEluYy4xITAfBgNV +BAsMGEV4YW1wbGUgQ29tIEluYy4gUm9vdCBDQTEhMB8GA1UEAwwYRXhhbXBsZSBD +b20gSW5jLiBSb290IENBggEBMB0GA1UdDgQWBBRsdhuHn3MGDvZxOe22+1wliCJB +mDAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggr +BgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAkPrUTKKn+/6g0CjhTPBFeX8mKXhG +zw5z9Oq+xnwefZwxV82E/tgFsPcwXcJIBg0f43BaVSygPiV7bXqWhxASwn73i24z +lveIR4+z56bKIhP6c3twb8WWR9yDcLu2Iroin7dYEm3dfVUrhz/A90WHr6ddwmLL +3gcFF2kBu3S3xqM5OmN/tqRXFmo+EvwrdJRiTh4Fsf0tX1ZT07rrGvBFYktK7Kma +lqDl4UDCF1UWkiiFubc0Xw+DR6vNAa99E0oaphzvCmITU1wITNnYZTKzVzQ7vUCq +kLmXOFLTcxTQpptxSo5xDD3aTpzWGCvjExCKpXQtsITUOYtZc02AGjjPOQ== +-----END CERTIFICATE----- diff --git a/alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt b/alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt new file mode 100644 index 000000000..6eda9ec30 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt @@ -0,0 +1,508 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting + +import org.opensearch.commons.alerting.model.Input +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.Schedule +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.commons.authuser.User +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.AggregationBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.test.rest.OpenSearchRestTestCase +import java.time.Instant +import java.time.ZonedDateTime +import java.time.temporal.ChronoUnit + +const val ANOMALY_DETECTOR_INDEX = ".opendistro-anomaly-detectors" +const val ANOMALY_RESULT_INDEX = ".opendistro-anomaly-results*" + +fun anomalyDetectorIndexMapping(): String { + return """ + "properties": { + "schema_version": { + "type": "integer" + }, + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "description": { + "type": "text" + }, + "time_field": { + "type": "keyword" + }, + "indices": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "filter_query": { + "type": "object", + "enabled": false + }, + "feature_attributes": { + "type": "nested", + "properties": { + "feature_id": { + "type": "keyword", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "feature_name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "feature_enabled": { + "type": "boolean" + }, + "aggregation_query": { + "type": "object", + "enabled": false + } + } + }, + "detection_interval": { + "properties": { + "period": { + "properties": { + "interval": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + } + } + }, + "window_delay": { + "properties": { + "period": { + "properties": { + "interval": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + } + } + }, + "shingle_size": { + "type": "integer" + }, + "last_update_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "ui_metadata": { + "type": "object", + "enabled": false + }, + "user": { + "type": "nested", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "backend_roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "custom_attribute_names": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "category_field": { + "type": "keyword" + } + } + """ +} + +fun anomalyResultIndexMapping(): String { + return """ + "properties": { + "detector_id": { + "type": "keyword" + }, + "is_anomaly": { + "type": "boolean" + }, + "anomaly_score": { + "type": "double" + }, + "anomaly_grade": { + "type": "double" + }, + "confidence": { + "type": "double" + }, + "feature_data": { + "type": "nested", + "properties": { + "feature_id": { + "type": "keyword" + }, + "data": { + "type": "double" + } + } + }, + "data_start_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "data_end_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "execution_start_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "execution_end_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "error": { + "type": "text" + }, + "user": { + "type": "nested", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "backend_roles": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "roles": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "custom_attribute_names": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + } + } + }, + "entity": { + "type": "nested", + "properties": { + "name": { + "type": "keyword" + }, + "value": { + "type": "keyword" + } + } + }, + "schema_version": { + "type": "integer" + } + } + """ +} + +fun randomAnomalyDetector(): String { + return """{ + "name" : "${OpenSearchTestCase.randomAlphaOfLength(10)}", + "description" : "${OpenSearchTestCase.randomAlphaOfLength(10)}", + "time_field" : "timestamp", + "indices" : [ + "${OpenSearchTestCase.randomAlphaOfLength(5)}" + ], + "filter_query" : { + "match_all" : { + "boost" : 1.0 + } + }, + "detection_interval" : { + "period" : { + "interval" : 1, + "unit" : "Minutes" + } + }, + "window_delay" : { + "period" : { + "interval" : 1, + "unit" : "Minutes" + } + }, + "shingle_size" : 8, + "feature_attributes" : [ + { + "feature_name" : "F1", + "feature_enabled" : true, + "aggregation_query" : { + "f_1" : { + "sum" : { + "field" : "value" + } + } + } + } + ] + } + """.trimIndent() +} + +fun randomAnomalyDetectorWithUser(backendRole: String): String { + return """{ + "name" : "${OpenSearchTestCase.randomAlphaOfLength(5)}", + "description" : "${OpenSearchTestCase.randomAlphaOfLength(10)}", + "time_field" : "timestamp", + "indices" : [ + "${OpenSearchTestCase.randomAlphaOfLength(5)}" + ], + "filter_query" : { + "match_all" : { + "boost" : 1.0 + } + }, + "detection_interval" : { + "period" : { + "interval" : 1, + "unit" : "Minutes" + } + }, + "window_delay" : { + "period" : { + "interval" : 1, + "unit" : "Minutes" + } + }, + "shingle_size" : 8, + "feature_attributes" : [ + { + "feature_name" : "F1", + "feature_enabled" : true, + "aggregation_query" : { + "f_1" : { + "sum" : { + "field" : "value" + } + } + } + } + ], + "user" : { + "name" : "${OpenSearchTestCase.randomAlphaOfLength(5)}", + "backend_roles" : [ "$backendRole" ], + "roles" : [ + "${OpenSearchTestCase.randomAlphaOfLength(5)}" + ], + "custom_attribute_names" : [ ] + } + } + """.trimIndent() +} + +fun randomAnomalyResult( + detectorId: String = OpenSearchTestCase.randomAlphaOfLength(10), + dataStartTime: Long = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).toInstant().toEpochMilli(), + dataEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), + featureId: String = OpenSearchTestCase.randomAlphaOfLength(5), + featureName: String = OpenSearchTestCase.randomAlphaOfLength(5), + featureData: Double = OpenSearchTestCase.randomDouble(), + executionStartTime: Long = ZonedDateTime.now().minus(10, ChronoUnit.SECONDS).toInstant().toEpochMilli(), + executionEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), + anomalyScore: Double = OpenSearchTestCase.randomDouble(), + anomalyGrade: Double = OpenSearchTestCase.randomDouble(), + confidence: Double = OpenSearchTestCase.randomDouble(), + user: User = randomUser() +): String { + return """{ + "detector_id" : "$detectorId", + "data_start_time" : $dataStartTime, + "data_end_time" : $dataEndTime, + "feature_data" : [ + { + "feature_id" : "$featureId", + "feature_name" : "$featureName", + "data" : $featureData + } + ], + "execution_start_time" : $executionStartTime, + "execution_end_time" : $executionEndTime, + "anomaly_score" : $anomalyScore, + "anomaly_grade" : $anomalyGrade, + "confidence" : $confidence, + "user" : { + "name" : "${user.name}", + "backend_roles" : [ + ${user.backendRoles.joinToString { "\"${it}\"" }} + ], + "roles" : [ + ${user.roles.joinToString { "\"${it}\"" }} + ], + "custom_attribute_names" : [ + ${user.customAttNames.joinToString { "\"${it}\"" }} + ] + } + } + """.trimIndent() +} + +fun randomAnomalyResultWithoutUser( + detectorId: String = OpenSearchTestCase.randomAlphaOfLength(10), + dataStartTime: Long = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).toInstant().toEpochMilli(), + dataEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), + featureId: String = OpenSearchTestCase.randomAlphaOfLength(5), + featureName: String = OpenSearchTestCase.randomAlphaOfLength(5), + featureData: Double = OpenSearchTestCase.randomDouble(), + executionStartTime: Long = ZonedDateTime.now().minus(10, ChronoUnit.SECONDS).toInstant().toEpochMilli(), + executionEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), + anomalyScore: Double = OpenSearchTestCase.randomDouble(), + anomalyGrade: Double = OpenSearchTestCase.randomDouble(), + confidence: Double = OpenSearchTestCase.randomDouble() +): String { + return """{ + "detector_id" : "$detectorId", + "data_start_time" : $dataStartTime, + "data_end_time" : $dataEndTime, + "feature_data" : [ + { + "feature_id" : "$featureId", + "feature_name" : "$featureName", + "data" : $featureData + } + ], + "execution_start_time" : $executionStartTime, + "execution_end_time" : $executionEndTime, + "anomaly_score" : $anomalyScore, + "anomaly_grade" : $anomalyGrade, + "confidence" : $confidence + } + """.trimIndent() +} + +fun maxAnomalyGradeSearchInput( + adResultIndex: String = ".opendistro-anomaly-results-history", + detectorId: String = OpenSearchTestCase.randomAlphaOfLength(10), + size: Int = 1 +): SearchInput { + val rangeQuery = QueryBuilders.rangeQuery("execution_end_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val termQuery = QueryBuilders.termQuery("detector_id", detectorId) + + var boolQueryBuilder = BoolQueryBuilder() + boolQueryBuilder.filter(rangeQuery).filter(termQuery) + + val aggregationBuilder = AggregationBuilders.max("max_anomaly_grade").field("anomaly_grade") + val searchSourceBuilder = SearchSourceBuilder().query(boolQueryBuilder).aggregation(aggregationBuilder).size(size) + return SearchInput(indices = listOf(adResultIndex), query = searchSourceBuilder) +} + +fun adMonitorTrigger(): QueryLevelTrigger { + val triggerScript = """ + return ctx.results[0].aggregations.max_anomaly_grade.value != null && + ctx.results[0].aggregations.max_anomaly_grade.value > 0.7 + """.trimIndent() + return randomQueryLevelTrigger(condition = Script(triggerScript)) +} + +fun adSearchInput(detectorId: String): SearchInput { + return maxAnomalyGradeSearchInput(adResultIndex = ANOMALY_RESULT_INDEX, detectorId = detectorId, size = 10) +} + +fun randomADMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + inputs: List = listOf(adSearchInput("test_detector_id")), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = OpenSearchTestCase.randomBoolean(), + triggers: List = (1..OpenSearchTestCase.randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, + user = user, uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + ) +} + +fun randomADUser(backendRole: String = OpenSearchRestTestCase.randomAlphaOfLength(10)): User { + return User( + OpenSearchRestTestCase.randomAlphaOfLength(10), listOf(backendRole), + listOf(OpenSearchRestTestCase.randomAlphaOfLength(10), ALL_ACCESS_ROLE), listOf("test_attr=test") + ) +} diff --git a/alerting/bin/test/org/opensearch/alerting/AccessRoles.kt b/alerting/bin/test/org/opensearch/alerting/AccessRoles.kt new file mode 100644 index 000000000..7f415a8ac --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/AccessRoles.kt @@ -0,0 +1,49 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.commons.alerting.action.AlertingActions + +val ALL_ACCESS_ROLE = "all_access" +val READALL_AND_MONITOR_ROLE = "readall_and_monitor" +val ALERTING_FULL_ACCESS_ROLE = "alerting_full_access" +val ALERTING_READ_ONLY_ACCESS = "alerting_read_access" +val ALERTING_NO_ACCESS_ROLE = "no_access" +val ALERTING_GET_EMAIL_ACCOUNT_ACCESS = "alerting_get_email_account_access" +val ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS = "alerting_search_email_account_access" +val ALERTING_GET_EMAIL_GROUP_ACCESS = "alerting_get_email_group_access" +val ALERTING_SEARCH_EMAIL_GROUP_ACCESS = "alerting_search_email_group_access" +val ALERTING_INDEX_MONITOR_ACCESS = "alerting_index_monitor_access" +val ALERTING_GET_MONITOR_ACCESS = "alerting_get_monitor_access" +val ALERTING_GET_WORKFLOW_ACCESS = "alerting_get_workflow_access" +val ALERTING_DELETE_WORKFLOW_ACCESS = "alerting_delete_workflow_access" +val ALERTING_SEARCH_MONITOR_ONLY_ACCESS = "alerting_search_monitor_access" +val ALERTING_EXECUTE_MONITOR_ACCESS = "alerting_execute_monitor_access" +val ALERTING_EXECUTE_WORKFLOW_ACCESS = "alerting_execute_workflow_access" +val ALERTING_DELETE_MONITOR_ACCESS = "alerting_delete_monitor_access" +val ALERTING_GET_DESTINATION_ACCESS = "alerting_get_destination_access" +val ALERTING_GET_ALERTS_ACCESS = "alerting_get_alerts_access" +val ALERTING_INDEX_WORKFLOW_ACCESS = "alerting_index_workflow_access" + +val ROLE_TO_PERMISSION_MAPPING = mapOf( + ALERTING_NO_ACCESS_ROLE to "", + ALERTING_GET_EMAIL_ACCOUNT_ACCESS to "cluster:admin/opendistro/alerting/destination/email_account/get", + ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS to "cluster:admin/opendistro/alerting/destination/email_account/search", + ALERTING_GET_EMAIL_GROUP_ACCESS to "cluster:admin/opendistro/alerting/destination/email_group/get", + ALERTING_SEARCH_EMAIL_GROUP_ACCESS to "cluster:admin/opendistro/alerting/destination/email_group/search", + ALERTING_INDEX_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/write", + ALERTING_GET_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/get", + ALERTING_GET_WORKFLOW_ACCESS to AlertingActions.GET_WORKFLOW_ACTION_NAME, + ALERTING_SEARCH_MONITOR_ONLY_ACCESS to "cluster:admin/opendistro/alerting/monitor/search", + ALERTING_EXECUTE_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/execute", + ALERTING_EXECUTE_WORKFLOW_ACCESS to ExecuteWorkflowAction.NAME, + ALERTING_DELETE_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/delete", + ALERTING_GET_DESTINATION_ACCESS to "cluster:admin/opendistro/alerting/destination/get", + ALERTING_GET_ALERTS_ACCESS to "cluster:admin/opendistro/alerting/alerts/get", + ALERTING_INDEX_WORKFLOW_ACCESS to AlertingActions.INDEX_WORKFLOW_ACTION_NAME, + ALERTING_DELETE_WORKFLOW_ACCESS to AlertingActions.DELETE_WORKFLOW_ACTION_NAME +) diff --git a/alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt b/alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt new file mode 100644 index 000000000..8583ae0db --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt @@ -0,0 +1,256 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.junit.Before +import org.mockito.Mockito +import org.opensearch.Version +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.client.Client +import org.opensearch.cluster.node.DiscoveryNode +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.ClusterSettings +import org.opensearch.common.settings.Setting +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.test.ClusterServiceUtils +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.threadpool.ThreadPool +import java.time.Instant +import java.time.temporal.ChronoUnit + +class AlertServiceTests : OpenSearchTestCase() { + + private lateinit var client: Client + private lateinit var xContentRegistry: NamedXContentRegistry + private lateinit var settings: Settings + private lateinit var threadPool: ThreadPool + private lateinit var clusterService: ClusterService + + private lateinit var alertIndices: AlertIndices + private lateinit var alertService: AlertService + + @Before + fun setup() { + // TODO: If more *Service unit tests are added, this configuration can be moved to some base class for each service test class to use + client = Mockito.mock(Client::class.java) + xContentRegistry = Mockito.mock(NamedXContentRegistry::class.java) + threadPool = Mockito.mock(ThreadPool::class.java) + clusterService = Mockito.mock(ClusterService::class.java) + + settings = Settings.builder().build() + val settingSet = hashSetOf>() + settingSet.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + settingSet.add(AlertingSettings.ALERT_HISTORY_ENABLED) + settingSet.add(AlertingSettings.ALERT_HISTORY_MAX_DOCS) + settingSet.add(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE) + settingSet.add(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD) + settingSet.add(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD) + settingSet.add(AlertingSettings.REQUEST_TIMEOUT) + settingSet.add(AlertingSettings.FINDING_HISTORY_ENABLED) + settingSet.add(AlertingSettings.FINDING_HISTORY_MAX_DOCS) + settingSet.add(AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE) + settingSet.add(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD) + settingSet.add(AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD) + val discoveryNode = DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT) + val clusterSettings = ClusterSettings(settings, settingSet) + val testClusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSettings) + clusterService = Mockito.spy(testClusterService) + + alertIndices = AlertIndices(settings, client, threadPool, clusterService) + alertService = AlertService(client, xContentRegistry, alertIndices) + } + + fun `test getting categorized alerts for bucket-level monitor with no current alerts`() { + val trigger = randomBucketLevelTrigger() + val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) + + val currentAlerts = mutableMapOf() + val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( + listOf( + listOf("a"), + listOf("b") + ) + ) + + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) + // Completed Alerts are what remains in currentAlerts after categorization + val completedAlerts = currentAlerts.values.toList() + assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) + assertAlertsExistForBucketKeys( + listOf( + listOf("a"), + listOf("b") + ), + categorizedAlerts[AlertCategory.NEW] ?: error("New alerts not found") + ) + assertEquals(listOf(), completedAlerts) + } + + fun `test getting categorized alerts for bucket-level monitor with de-duped alerts`() { + val trigger = randomBucketLevelTrigger() + val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) + + val currentAlerts = createCurrentAlertsFromBucketKeys( + monitor, + trigger, + listOf( + listOf("a"), + listOf("b") + ) + ) + val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( + listOf( + listOf("a"), + listOf("b") + ) + ) + + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) + // Completed Alerts are what remains in currentAlerts after categorization + val completedAlerts = currentAlerts.values.toList() + assertAlertsExistForBucketKeys( + listOf( + listOf("a"), + listOf("b") + ), + categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found") + ) + assertEquals(listOf(), categorizedAlerts[AlertCategory.NEW]) + assertEquals(listOf(), completedAlerts) + } + + fun `test getting categorized alerts for bucket-level monitor with completed alerts`() { + val trigger = randomBucketLevelTrigger() + val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) + + val currentAlerts = createCurrentAlertsFromBucketKeys( + monitor, + trigger, + listOf( + listOf("a"), + listOf("b") + ) + ) + val aggResultBuckets = listOf() + + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) + // Completed Alerts are what remains in currentAlerts after categorization + val completedAlerts = currentAlerts.values.toList() + assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) + assertEquals(listOf(), categorizedAlerts[AlertCategory.NEW]) + assertAlertsExistForBucketKeys( + listOf( + listOf("a"), + listOf("b") + ), + completedAlerts + ) + } + + fun `test getting categorized alerts for bucket-level monitor with de-duped and completed alerts`() { + val trigger = randomBucketLevelTrigger() + val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) + + val currentAlerts = createCurrentAlertsFromBucketKeys( + monitor, + trigger, + listOf( + listOf("a"), + listOf("b") + ) + ) + val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( + listOf( + listOf("b"), + listOf("c") + ) + ) + + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) + // Completed Alerts are what remains in currentAlerts after categorization + val completedAlerts = currentAlerts.values.toList() + assertAlertsExistForBucketKeys(listOf(listOf("b")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) + assertAlertsExistForBucketKeys(listOf(listOf("c")), categorizedAlerts[AlertCategory.NEW] ?: error("New alerts not found")) + assertAlertsExistForBucketKeys(listOf(listOf("a")), completedAlerts) + } + + fun `test getting categorized alerts for bucket-level monitor with de-duped alerts size 1`() { + val trigger = randomBucketLevelTrigger() + val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) + + val currentAlerts = createCurrentAlertsFromBucketKeys( + monitor, + trigger, + listOf( + listOf("a") + ) + ) + val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( + listOf( + listOf("a") + ) + ) + + val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( + monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null + ) + // Completed Alerts are what remains in currentAlerts after categorization + val completedAlerts = currentAlerts.values.toList() + assertAlertsExistForBucketKeys(listOf(listOf("a")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) + assertAlertsExistForBucketKeys(emptyList(), categorizedAlerts[AlertCategory.NEW] ?: error("New alerts found")) + assertAlertsExistForBucketKeys(emptyList(), completedAlerts) + } + + private fun createCurrentAlertsFromBucketKeys( + monitor: Monitor, + trigger: BucketLevelTrigger, + bucketKeysList: List> + ): MutableMap { + return bucketKeysList.map { bucketKeys -> + val aggResultBucket = AggregationResultBucket("parent_bucket_path", bucketKeys, mapOf()) + val alert = Alert( + monitor, + trigger, + Instant.now().truncatedTo(ChronoUnit.MILLIS), + null, + actionExecutionResults = listOf(randomActionExecutionResult()), + aggregationResultBucket = aggResultBucket + ) + aggResultBucket.getBucketKeysHash() to alert + }.toMap().toMutableMap() + } + + private fun createAggregationResultBucketsFromBucketKeys(bucketKeysList: List>): List { + return bucketKeysList.map { AggregationResultBucket("parent_bucket_path", it, mapOf()) } + } + + private fun assertAlertsExistForBucketKeys(bucketKeysList: List>, alerts: List) { + // Check if size is equals first for sanity and since bucketKeysList should have unique entries, + // this ensures there shouldn't be duplicates in the alerts + assertEquals(bucketKeysList.size, alerts.size) + val expectedBucketKeyHashes = bucketKeysList.map { it.joinToString(separator = "#") }.toSet() + alerts.forEach { alert -> + assertNotNull(alert.aggregationResultBucket) + assertTrue(expectedBucketKeyHashes.contains(alert.aggregationResultBucket!!.getBucketKeysHash())) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt b/alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt new file mode 100644 index 000000000..50cae9d8c --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt @@ -0,0 +1,1821 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.http.HttpEntity +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.entity.ContentType.APPLICATION_JSON +import org.apache.http.entity.StringEntity +import org.apache.http.message.BasicHeader +import org.junit.AfterClass +import org.junit.rules.DisableOnDebug +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_ACCOUNT_BASE_URI +import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_GROUP_BASE_URI +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.alerts.AlertIndices.Companion.FINDING_HISTORY_WRITE_INDEX +import org.opensearch.alerting.core.settings.ScheduledJobSettings +import org.opensearch.alerting.model.destination.Chime +import org.opensearch.alerting.model.destination.CustomWebhook +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.Slack +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.settings.DestinationSettings +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.Request +import org.opensearch.client.Response +import org.opensearch.client.RestClient +import org.opensearch.client.WarningFailureException +import org.opensearch.common.UUIDs +import org.opensearch.common.io.PathUtils +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentFactory.jsonBuilder +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.json.JsonXContent.jsonXContent +import org.opensearch.commons.alerting.action.GetFindingsResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.FindingWithDocs +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.search.SearchModule +import java.net.URLEncoder +import java.nio.file.Files +import java.time.Instant +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit +import java.util.Locale +import java.util.UUID +import java.util.stream.Collectors +import javax.management.MBeanServerInvocationHandler +import javax.management.ObjectName +import javax.management.remote.JMXConnectorFactory +import javax.management.remote.JMXServiceURL +import kotlin.collections.ArrayList +import kotlin.collections.HashMap + +/** + * Superclass for tests that interact with an external test cluster using OpenSearch's RestClient + */ +abstract class AlertingRestTestCase : ODFERestTestCase() { + + protected val password = "D%LMX3bo#@U3XqVQ" + + protected val isDebuggingTest = DisableOnDebug(null).isDebugging + protected val isDebuggingRemoteCluster = System.getProperty("cluster.debug", "false")!!.toBoolean() + protected val numberOfNodes = System.getProperty("cluster.number_of_nodes", "1")!!.toInt() + protected val isMultiNode = numberOfNodes > 1 + + protected val statsResponseOpendistroSweeperEnabledField = "opendistro.scheduled_jobs.enabled" + protected val statsResponseOpenSearchSweeperEnabledField = "plugins.scheduled_jobs.enabled" + + override fun xContentRegistry(): NamedXContentRegistry { + return NamedXContentRegistry( + mutableListOf( + Monitor.XCONTENT_REGISTRY, + SearchInput.XCONTENT_REGISTRY, + DocLevelMonitorInput.XCONTENT_REGISTRY, + QueryLevelTrigger.XCONTENT_REGISTRY, + BucketLevelTrigger.XCONTENT_REGISTRY, + DocumentLevelTrigger.XCONTENT_REGISTRY, + Workflow.XCONTENT_REGISTRY, + ChainedAlertTrigger.XCONTENT_REGISTRY + ) + SearchModule(Settings.EMPTY, emptyList()).namedXContents + ) + } + + fun Response.asMap(): Map { + return entityAsMap(this) + } + + private fun createMonitorEntityWithBackendRoles(monitor: Monitor, rbacRoles: List?): HttpEntity { + if (rbacRoles == null) { + return monitor.toHttpEntity() + } + val temp = monitor.toJsonString() + val toReplace = temp.lastIndexOf("}") + val rbacString = rbacRoles.joinToString { "\"$it\"" } + val jsonString = temp.substring(0, toReplace) + ", \"rbac_roles\": [$rbacString] }" + return StringEntity(jsonString, APPLICATION_JSON) + } + + protected fun createMonitorWithClient( + client: RestClient, + monitor: Monitor, + rbacRoles: List? = null, + refresh: Boolean = true, + ): Monitor { + val response = client.makeRequest( + "POST", + "$ALERTING_BASE_URI?refresh=$refresh", + emptyMap(), + createMonitorEntityWithBackendRoles(monitor, rbacRoles) + ) + assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) + + val monitorJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + assertUserNull(monitorJson as HashMap) + + return getMonitor(monitorId = monitorJson["_id"] as String) + } + + protected fun createMonitor(monitor: Monitor, refresh: Boolean = true): Monitor { + return createMonitorWithClient(client(), monitor, emptyList(), refresh) + } + + protected fun deleteMonitor(monitor: Monitor, refresh: Boolean = true): Response { + val response = client().makeRequest( + "DELETE", + "$ALERTING_BASE_URI/${monitor.id}?refresh=$refresh", + emptyMap(), + monitor.toHttpEntity() + ) + assertEquals("Unable to delete a monitor", RestStatus.OK, response.restStatus()) + + return response + } + + protected fun deleteWorkflow(workflow: Workflow, deleteDelegates: Boolean = false, refresh: Boolean = true): Response { + val response = client().makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh&deleteDelegateMonitors=$deleteDelegates", + emptyMap(), + workflow.toHttpEntity() + ) + assertEquals("Unable to delete a workflow", RestStatus.OK, response.restStatus()) + return response + } + + protected fun deleteWorkflowWithClient( + client: RestClient, + workflow: Workflow, + deleteDelegates: Boolean = false, + refresh: Boolean = true, + ): Response { + val response = client.makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh&deleteDelegateMonitors=$deleteDelegates", + emptyMap(), + workflow.toHttpEntity() + ) + assertEquals("Unable to delete a workflow", RestStatus.OK, response.restStatus()) + + return response + } + + /** + * Destinations are now deprecated in favor of the Notification plugin's configs. + * This method should only be used for checking legacy behavior/Notification migration scenarios. + */ + protected fun createDestination(destination: Destination = getTestDestination(), refresh: Boolean = true): Destination { + // Create Alerting config index if it doesn't exist to avoid mapping issues with legacy destination indexing + createAlertingConfigIndex() + + val response = indexDocWithAdminClient( + ScheduledJob.SCHEDULED_JOBS_INDEX, + UUIDs.base64UUID(), + destination.toJsonStringWithType(), + refresh + ) + val destinationJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + + return destination.copy( + id = destinationJson["_id"] as String, + version = (destinationJson["_version"] as Int).toLong(), + primaryTerm = destinationJson["_primary_term"] as Int + ) + } + + protected fun deleteDestination(destination: Destination = getTestDestination(), refresh: Boolean = true): Response { + val response = client().makeRequest( + "DELETE", + "$DESTINATION_BASE_URI/${destination.id}?refresh=$refresh", + emptyMap(), + destination.toHttpEntity() + ) + assertEquals("Unable to delete destination", RestStatus.OK, response.restStatus()) + + return response + } + + protected fun updateDestination(destination: Destination, refresh: Boolean = true): Destination { + val response = client().makeRequest( + "PUT", + "$DESTINATION_BASE_URI/${destination.id}?refresh=$refresh", + emptyMap(), + destination.toHttpEntity() + ) + assertEquals("Unable to update a destination", RestStatus.OK, response.restStatus()) + val destinationJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + assertUserNull(destinationJson as HashMap) + + return destination.copy(id = destinationJson["_id"] as String, version = (destinationJson["_version"] as Int).toLong()) + } + + protected fun getEmailAccount( + emailAccountID: String, + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): EmailAccount { + val response = client().makeRequest("GET", "$EMAIL_ACCOUNT_BASE_URI/$emailAccountID", null, header) + assertEquals("Unable to get email account $emailAccountID", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) + + lateinit var id: String + var version: Long = 0 + lateinit var emailAccount: EmailAccount + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "_id" -> id = parser.text() + "_version" -> version = parser.longValue() + "email_account" -> emailAccount = EmailAccount.parse(parser) + } + } + + return emailAccount.copy(id = id, version = version) + } + + /** + * Email Accounts are now deprecated in favor of the Notification plugin's configs. + * This method should only be used for checking legacy behavior/Notification migration scenarios. + */ + protected fun createEmailAccount(emailAccount: EmailAccount = getTestEmailAccount(), refresh: Boolean = true): EmailAccount { + // Create Alerting config index if it doesn't exist to avoid mapping issues with legacy destination indexing + createAlertingConfigIndex() + + val response = indexDocWithAdminClient( + ScheduledJob.SCHEDULED_JOBS_INDEX, + UUIDs.base64UUID(), + emailAccount.toJsonStringWithType(), + refresh + ) + val emailAccountJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + return emailAccount.copy(id = emailAccountJson["_id"] as String) + } + + protected fun createRandomEmailAccount(refresh: Boolean = true): EmailAccount { + val emailAccount = randomEmailAccount() + val emailAccountID = createEmailAccount(emailAccount, refresh).id + return getEmailAccount(emailAccountID = emailAccountID) + } + + protected fun createRandomEmailAccountWithGivenName(refresh: Boolean = true, randomName: String): EmailAccount { + val emailAccount = randomEmailAccount(salt = randomName) + val emailAccountID = createEmailAccount(emailAccount, refresh).id + return getEmailAccount(emailAccountID = emailAccountID) + } + + protected fun getEmailGroup( + emailGroupID: String, + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): EmailGroup { + val response = client().makeRequest("GET", "$EMAIL_GROUP_BASE_URI/$emailGroupID", null, header) + assertEquals("Unable to get email group $emailGroupID", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) + + lateinit var id: String + var version: Long = 0 + lateinit var emailGroup: EmailGroup + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "_id" -> id = parser.text() + "_version" -> version = parser.longValue() + "email_group" -> emailGroup = EmailGroup.parse(parser) + } + } + + return emailGroup.copy(id = id, version = version) + } + + /** + * Email Groups are now deprecated in favor of the Notification plugin's configs. + * This method should only be used for checking legacy behavior/Notification migration scenarios. + */ + protected fun createEmailGroup(emailGroup: EmailGroup = getTestEmailGroup(), refresh: Boolean = true): EmailGroup { + // Create Alerting config index if it doesn't exist to avoid mapping issues with legacy destination indexing + createAlertingConfigIndex() + + val response = indexDocWithAdminClient( + ScheduledJob.SCHEDULED_JOBS_INDEX, + UUIDs.base64UUID(), + emailGroup.toJsonStringWithType(), + refresh + ) + val emailGroupJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + return emailGroup.copy(id = emailGroupJson["_id"] as String) + } + + protected fun createRandomEmailGroup(refresh: Boolean = true): EmailGroup { + val emailGroup = randomEmailGroup() + val emailGroupID = createEmailGroup(emailGroup, refresh).id + return getEmailGroup(emailGroupID = emailGroupID) + } + + protected fun createRandomEmailGroupWithGivenName(refresh: Boolean = true, randomName: String): EmailGroup { + val emailGroup = randomEmailGroup(salt = randomName) + val emailGroupID = createEmailGroup(emailGroup, refresh).id + return getEmailGroup(emailGroupID = emailGroupID) + } + + @Suppress("UNCHECKED_CAST") + protected fun getDestination(destination: Destination): Map { + val response = client().makeRequest( + "GET", + "$DESTINATION_BASE_URI/${destination.id}" + ) + assertEquals("Unable to update a destination", RestStatus.OK, response.restStatus()) + val destinationJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + assertUserNull(destinationJson as HashMap) + return (destinationJson["destinations"] as List)[0] as Map + } + + @Suppress("UNCHECKED_CAST") + protected fun getDestinations(dataMap: Map = emptyMap()): List> { + return getDestinations(client(), dataMap) + } + + @Suppress("UNCHECKED_CAST") + protected fun getDestinations( + client: RestClient, + dataMap: Map = emptyMap(), + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): List> { + var baseEndpoint = "$DESTINATION_BASE_URI?" + for (entry in dataMap.entries) { + baseEndpoint += "${entry.key}=${entry.value}&" + } + + val response = client.makeRequest( + "GET", + baseEndpoint, + null, + header + ) + assertEquals("Unable to update a destination", RestStatus.OK, response.restStatus()) + val destinationJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + return destinationJson["destinations"] as List> + } + + protected fun getTestDestination(): Destination { + return Destination( + type = DestinationType.TEST_ACTION, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = null, + email = null + ) + } + + fun getSlackDestination(): Destination { + val slack = Slack("https://hooks.slack.com/services/slackId") + return Destination( + type = DestinationType.SLACK, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = slack, + customWebhook = null, + email = null + ) + } + + fun getChimeDestination(): Destination { + val chime = Chime("https://hooks.chime.aws/incomingwebhooks/chimeId") + return Destination( + type = DestinationType.CHIME, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = chime, + slack = null, + customWebhook = null, + email = null + ) + } + + fun getCustomWebhookDestination(): Destination { + val customWebhook = CustomWebhook( + "https://hooks.slack.com/services/customWebhookId", + null, + null, + 80, + null, + null, + emptyMap(), + emptyMap(), + null, + null + ) + return Destination( + type = DestinationType.CUSTOM_WEBHOOK, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = customWebhook, + email = null + ) + } + + private fun getTestEmailAccount(): EmailAccount { + return EmailAccount( + name = "test", + email = "test@email.com", + host = "smtp.com", + port = 25, + method = EmailAccount.MethodType.NONE, + username = null, + password = null + ) + } + + private fun getTestEmailGroup(): EmailGroup { + return EmailGroup( + name = "test", + emails = listOf() + ) + } + + protected fun verifyIndexSchemaVersion(index: String, expectedVersion: Int) { + val indexMapping = client().getIndexMapping(index) + val indexName = indexMapping.keys.toList()[0] + val mappings = indexMapping.stringMap(indexName)?.stringMap("mappings") + var version = 0 + if (mappings!!.containsKey("_meta")) { + val meta = mappings.stringMap("_meta") + if (meta!!.containsKey("schema_version")) version = meta.get("schema_version") as Int + } + assertEquals(expectedVersion, version) + } + + protected fun createAlert(alert: Alert): Alert { + val response = adminClient().makeRequest( + "POST", + "/${AlertIndices.ALERT_INDEX}/_doc?refresh=true&routing=${alert.monitorId}", + emptyMap(), + alert.toHttpEntityWithUser() + ) + assertEquals("Unable to create a new alert", RestStatus.CREATED, response.restStatus()) + + val alertJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + + assertNull(alertJson["monitor_user"]) + return alert.copy(id = alertJson["_id"] as String, version = (alertJson["_version"] as Int).toLong()) + } + + protected fun createRandomMonitor(refresh: Boolean = false, withMetadata: Boolean = false): Monitor { + val monitor = randomQueryLevelMonitor(withMetadata = withMetadata) + val monitorId = createMonitor(monitor, refresh).id + if (withMetadata) { + return getMonitor(monitorId = monitorId, header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards")) + } + return getMonitor(monitorId = monitorId) + } + + protected fun createRandomDocumentMonitor(refresh: Boolean = false, withMetadata: Boolean = false): Monitor { + val monitor = randomDocumentLevelMonitor(withMetadata = withMetadata) + val monitorId = createMonitor(monitor, refresh).id + if (withMetadata) { + return getMonitor(monitorId = monitorId, header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards")) + } + return getMonitor(monitorId = monitorId) + } + + @Suppress("UNCHECKED_CAST") + protected fun updateMonitor(monitor: Monitor, refresh: Boolean = false): Monitor { + val response = client().makeRequest( + "PUT", + "${monitor.relativeUrl()}?refresh=$refresh", + emptyMap(), + monitor.toHttpEntity() + ) + assertEquals("Unable to update a monitor", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["monitor"] as Map) + return getMonitor(monitorId = monitor.id) + } + + @Suppress("UNCHECKED_CAST") + protected fun updateWorkflow(workflow: Workflow, refresh: Boolean = false): Workflow { + val response = client().makeRequest( + "PUT", + "${workflow.relativeUrl()}?refresh=$refresh", + emptyMap(), + workflow.toHttpEntity() + ) + assertEquals("Unable to update a workflow", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["workflow"] as Map) + return getWorkflow(workflowId = workflow.id) + } + + protected fun updateMonitorWithClient( + client: RestClient, + monitor: Monitor, + rbacRoles: List = emptyList(), + refresh: Boolean = true, + ): Monitor { + val response = client.makeRequest( + "PUT", + "${monitor.relativeUrl()}?refresh=$refresh", + emptyMap(), + createMonitorEntityWithBackendRoles(monitor, rbacRoles) + ) + assertEquals("Unable to update a monitor", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["monitor"] as Map) + return getMonitor(monitorId = monitor.id) + } + + protected fun updateWorkflowWithClient( + client: RestClient, + workflow: Workflow, + rbacRoles: List = emptyList(), + refresh: Boolean = true, + ): Workflow { + val response = client.makeRequest( + "PUT", + "${workflow.relativeUrl()}?refresh=$refresh", + emptyMap(), + createWorkflowEntityWithBackendRoles(workflow, rbacRoles) + ) + assertEquals("Unable to update a workflow", RestStatus.OK, response.restStatus()) + assertUserNull(response.asMap()["workflow"] as Map) + return getWorkflow(workflowId = workflow.id) + } + + protected fun getMonitor(monitorId: String, header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")): Monitor { + val response = client().makeRequest("GET", "$ALERTING_BASE_URI/$monitorId", null, header) + assertEquals("Unable to get monitor $monitorId", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) + + lateinit var id: String + var version: Long = 0 + lateinit var monitor: Monitor + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "_id" -> id = parser.text() + "_version" -> version = parser.longValue() + "monitor" -> monitor = Monitor.parse(parser) + "associated_workflows" -> { + XContentParserUtils.ensureExpectedToken( + XContentParser.Token.START_ARRAY, + parser.currentToken(), + parser + ) + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + // do nothing + } + } + } + } + + assertUserNull(monitor) + return monitor.copy(id = id, version = version) + } + + // TODO: understand why doc alerts wont work with the normal search Alerts function + protected fun searchAlertsWithFilter( + monitor: Monitor, + indices: String = AlertIndices.ALERT_INDEX, + refresh: Boolean = true, + ): List { + if (refresh) refreshIndex(indices) + + val request = """ + { "version" : true, + "query": { "match_all": {} } + } + """.trimIndent() + val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) + return searchResponse.hits.hits.map { + val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } + Alert.parse(xcp, it.id, it.version) + }.filter { alert -> alert.monitorId == monitor.id } + } + + protected fun createFinding( + monitorId: String = "NO_ID", + monitorName: String = "NO_NAME", + index: String = "testIndex", + docLevelQueries: List = listOf( + DocLevelQuery(query = "test_field:\"us-west-2\"", name = "testQuery", fields = listOf()) + ), + matchingDocIds: List, + ): String { + val finding = Finding( + id = UUID.randomUUID().toString(), + relatedDocIds = matchingDocIds, + monitorId = monitorId, + monitorName = monitorName, + index = index, + docLevelQueries = docLevelQueries, + timestamp = Instant.now() + ) + + val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() + + indexDoc(FINDING_HISTORY_WRITE_INDEX, finding.id, findingStr) + return finding.id + } + + protected fun searchFindings( + monitor: Monitor, + indices: String = AlertIndices.ALL_FINDING_INDEX_PATTERN, + refresh: Boolean = true, + ): List { + if (refresh) refreshIndex(indices) + + val request = """ + { "version" : true, + "query": { "match_all": {} } + } + """.trimIndent() + val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) + return searchResponse.hits.hits.map { + val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } + Finding.parse(xcp) + }.filter { finding -> finding.monitorId == monitor.id } + } + + protected fun searchAlerts(monitor: Monitor, indices: String = AlertIndices.ALERT_INDEX, refresh: Boolean = true): List { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return emptyList() + } + + // If this is a test monitor (it doesn't have an ID) and no alerts will be saved for it. + val searchParams = if (monitor.id != Monitor.NO_ID) mapOf("routing" to monitor.id) else mapOf() + val request = """ + { "version" : true, + "query" : { "term" : { "${Alert.MONITOR_ID_FIELD}" : "${monitor.id}" } } + } + """.trimIndent() + val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", searchParams, StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) + return searchResponse.hits.hits.map { + val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } + Alert.parse(xcp, it.id, it.version) + } + } + + protected fun acknowledgeAlerts(monitor: Monitor, vararg alerts: Alert): Response { + val request = XContentFactory.jsonBuilder().startObject() + .array("alerts", *alerts.map { it.id }.toTypedArray()) + .endObject() + .string() + .let { StringEntity(it, APPLICATION_JSON) } + + val response = client().makeRequest( + "POST", + "${monitor.relativeUrl()}/_acknowledge/alerts?refresh=true", + emptyMap(), + request + ) + assertEquals("Acknowledge call failed.", RestStatus.OK, response.restStatus()) + return response + } + + protected fun acknowledgeChainedAlerts(workflowId: String, vararg alertId: String): Response { + val request = jsonBuilder().startObject() + .array("alerts", *alertId.map { it }.toTypedArray()) + .endObject() + .string() + .let { StringEntity(it, APPLICATION_JSON) } + + val response = client().makeRequest( + "POST", "${AlertingPlugin.WORKFLOW_BASE_URI}/$workflowId/_acknowledge/alerts", + emptyMap(), request + ) + assertEquals("Acknowledge call failed.", RestStatus.OK, response.restStatus()) + return response + } + + protected fun getAlerts( + client: RestClient, + dataMap: Map = emptyMap(), + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): Response { + var baseEndpoint = "$ALERTING_BASE_URI/alerts?" + for (entry in dataMap.entries) { + baseEndpoint += "${entry.key}=${entry.value}&" + } + + val response = client.makeRequest("GET", baseEndpoint, null, header) + assertEquals("Get call failed.", RestStatus.OK, response.restStatus()) + return response + } + + protected fun getAlerts( + dataMap: Map = emptyMap(), + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): Response { + return getAlerts(client(), dataMap, header) + } + + protected fun refreshIndex(index: String): Response { + val response = client().makeRequest("POST", "/$index/_refresh?expand_wildcards=all") + assertEquals("Unable to refresh index", RestStatus.OK, response.restStatus()) + return response + } + + protected fun deleteIndex(index: String): Response { + val response = adminClient().makeRequest("DELETE", "/$index") + assertEquals("Unable to delete index", RestStatus.OK, response.restStatus()) + return response + } + + protected fun executeMonitor(monitorId: String, params: Map = mutableMapOf()): Response { + return executeMonitor(client(), monitorId, params) + } + + protected fun executeWorkflow(workflowId: String, params: Map = mutableMapOf()): Response { + return executeWorkflow(client(), workflowId, params) + } + + protected fun getWorkflowAlerts( + workflowId: String, + alertId: String? = "", + getAssociatedAlerts: Boolean = true, + ): Response { + return getWorkflowAlerts( + client(), + mutableMapOf(Pair("workflowIds", workflowId), Pair("getAssociatedAlerts", getAssociatedAlerts), Pair("alertIds", alertId!!)) + ) + } + + protected fun getWorkflowAlerts( + client: RestClient, + dataMap: Map = emptyMap(), + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): Response { + var baseEndpoint = "$WORKFLOW_ALERTING_BASE_URI/alerts?" + for (entry in dataMap.entries) { + baseEndpoint += "${entry.key}=${entry.value}&" + } + + val response = client.makeRequest("GET", baseEndpoint, null, header) + assertEquals("Get call failed.", RestStatus.OK, response.restStatus()) + return response + } + + protected fun executeMonitor(client: RestClient, monitorId: String, params: Map = mutableMapOf()): Response { + return client.makeRequest("POST", "$ALERTING_BASE_URI/$monitorId/_execute", params) + } + + protected fun executeWorkflow(client: RestClient, workflowId: String, params: Map = mutableMapOf()): Response { + return client.makeRequest("POST", "$WORKFLOW_ALERTING_BASE_URI/$workflowId/_execute", params) + } + + protected fun executeMonitor(monitor: Monitor, params: Map = mapOf()): Response { + return executeMonitor(client(), monitor, params) + } + + protected fun executeMonitor(client: RestClient, monitor: Monitor, params: Map = mapOf()): Response = + client.makeRequest("POST", "$ALERTING_BASE_URI/_execute", params, monitor.toHttpEntityWithUser()) + + protected fun searchFindings(params: Map = mutableMapOf()): GetFindingsResponse { + var baseEndpoint = "${AlertingPlugin.FINDING_BASE_URI}/_search?" + for (entry in params.entries) { + baseEndpoint += "${entry.key}=${entry.value}&" + } + + val response = client().makeRequest("GET", baseEndpoint) + + assertEquals("Unable to retrieve findings", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) + + var totalFindings = 0 + val findings = mutableListOf() + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "total_findings" -> totalFindings = parser.intValue() + "findings" -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.currentToken(), parser) + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + findings.add(FindingWithDocs.parse(parser)) + } + } + } + } + + return GetFindingsResponse(response.restStatus(), totalFindings, findings) + } + + protected fun searchMonitors(): SearchResponse { + var baseEndpoint = "${AlertingPlugin.MONITOR_BASE_URI}/_search?" + val request = """ + { "version" : true, + "query": { "match_all": {} } + } + """.trimIndent() + val httpResponse = adminClient().makeRequest("POST", baseEndpoint, StringEntity(request, APPLICATION_JSON)) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + return SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) + } + + protected fun indexDoc(index: String, id: String, doc: String, refresh: Boolean = true): Response { + return indexDoc(client(), index, id, doc, refresh) + } + + protected fun indexDocWithAdminClient(index: String, id: String, doc: String, refresh: Boolean = true): Response { + return indexDoc(adminClient(), index, id, doc, refresh) + } + + private fun indexDoc(client: RestClient, index: String, id: String, doc: String, refresh: Boolean = true): Response { + val requestBody = StringEntity(doc, APPLICATION_JSON) + val params = if (refresh) mapOf("refresh" to "true") else mapOf() + val response = client.makeRequest("POST", "$index/_doc/$id?op_type=create", params, requestBody) + assertTrue( + "Unable to index doc: '${doc.take(15)}...' to index: '$index'", + listOf(RestStatus.OK, RestStatus.CREATED).contains(response.restStatus()) + ) + return response + } + + protected fun deleteDoc(index: String, id: String, refresh: Boolean = true): Response { + val params = if (refresh) mapOf("refresh" to "true") else mapOf() + val response = client().makeRequest("DELETE", "$index/_doc/$id", params) + assertTrue("Unable to delete doc with ID $id in index: '$index'", listOf(RestStatus.OK).contains(response.restStatus())) + return response + } + + /** A test index that can be used across tests. Feel free to add new fields but don't remove any. */ + protected fun createTestIndex(index: String = randomAlphaOfLength(10).lowercase(Locale.ROOT)): String { + createIndex( + index, + Settings.EMPTY, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent() + ) + return index + } + + protected fun createTestIndex(index: String, mapping: String): String { + createIndex(index, Settings.EMPTY, mapping.trimIndent()) + return index + } + + protected fun createTestIndex(index: String, mapping: String?, alias: String): String { + createIndex(index, Settings.EMPTY, mapping?.trimIndent(), alias) + return index + } + + protected fun createTestConfigIndex(index: String = "." + randomAlphaOfLength(10).lowercase(Locale.ROOT)): String { + try { + createIndex( + index, + Settings.builder().build(), + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" } + } + """.trimIndent() + ) + } catch (ex: WarningFailureException) { + // ignore + } + return index + } + + protected fun createTestAlias( + alias: String = randomAlphaOfLength(10).lowercase(Locale.ROOT), + numOfAliasIndices: Int = randomIntBetween(1, 10), + includeWriteIndex: Boolean = true, + ): MutableMap> { + return createTestAlias(alias = alias, indices = randomAliasIndices(alias, numOfAliasIndices, includeWriteIndex)) + } + + protected fun createTestAlias( + alias: String = randomAlphaOfLength(10).lowercase(Locale.ROOT), + indices: Map = randomAliasIndices( + alias = alias, + num = randomIntBetween(1, 10), + includeWriteIndex = true + ), + ): MutableMap> { + val indicesMap = mutableMapOf() + val indicesJson = jsonBuilder().startObject().startArray("actions") + indices.keys.map { + val indexName = createTestIndex(index = it, mapping = "") + val isWriteIndex = indices.getOrDefault(indexName, false) + indicesMap[indexName] = isWriteIndex + val indexMap = mapOf( + "add" to mapOf( + "index" to indexName, + "alias" to alias, + "is_write_index" to isWriteIndex + ) + ) + indicesJson.value(indexMap) + } + val requestBody = indicesJson.endArray().endObject().string() + client().makeRequest("POST", "/_aliases", emptyMap(), StringEntity(requestBody, APPLICATION_JSON)) + return mutableMapOf(alias to indicesMap) + } + + protected fun createDataStream(datastream: String, mappings: String?, useComponentTemplate: Boolean) { + val indexPattern = "$datastream*" + var componentTemplateMappings = "\"properties\": {" + + " \"netflow.destination_transport_port\":{ \"type\": \"long\" }," + + " \"netflow.destination_ipv4_address\":{ \"type\": \"ip\" }" + + "}" + if (mappings != null) { + componentTemplateMappings = mappings + } + if (useComponentTemplate) { + // Setup index_template + createComponentTemplateWithMappings( + "my_ds_component_template-$datastream", + componentTemplateMappings + ) + } + createComposableIndexTemplate( + "my_index_template_ds-$datastream", + listOf(indexPattern), + (if (useComponentTemplate) "my_ds_component_template-$datastream" else null), + mappings, + true, + 0 + ) + createDataStream(datastream) + } + + protected fun createDataStream(datastream: String? = randomAlphaOfLength(10).lowercase(Locale.ROOT)) { + client().makeRequest("PUT", "_data_stream/$datastream") + } + + protected fun deleteDataStream(datastream: String) { + client().makeRequest("DELETE", "_data_stream/$datastream") + } + + protected fun createIndexAlias(alias: String, mappings: String?) { + val indexPattern = "$alias*" + var componentTemplateMappings = "\"properties\": {" + + " \"netflow.destination_transport_port\":{ \"type\": \"long\" }," + + " \"netflow.destination_ipv4_address\":{ \"type\": \"ip\" }" + + "}" + if (mappings != null) { + componentTemplateMappings = mappings + } + createComponentTemplateWithMappings( + "my_alias_component_template-$alias", + componentTemplateMappings + ) + createComposableIndexTemplate( + "my_index_template_alias-$alias", + listOf(indexPattern), + "my_alias_component_template-$alias", + mappings, + false, + 0 + ) + createTestIndex( + "$alias-000001", + null, + """ + "$alias": { + "is_write_index": true + } + """.trimIndent() + ) + } + + protected fun deleteIndexAlias(alias: String) { + client().makeRequest("DELETE", "$alias*/_alias/$alias") + } + + protected fun createComponentTemplateWithMappings(componentTemplateName: String, mappings: String?) { + val body = """{"template" : { "mappings": {$mappings} }}""" + client().makeRequest( + "PUT", + "_component_template/$componentTemplateName", + emptyMap(), + StringEntity(body, ContentType.APPLICATION_JSON), + BasicHeader("Content-Type", "application/json") + ) + } + + protected fun createComposableIndexTemplate( + templateName: String, + indexPatterns: List, + componentTemplateName: String?, + mappings: String?, + isDataStream: Boolean, + priority: Int + ) { + var body = "{\n" + if (isDataStream) { + body += "\"data_stream\": { }," + } + body += "\"index_patterns\": [" + + indexPatterns.stream().collect( + Collectors.joining(",", "\"", "\"") + ) + "]," + if (componentTemplateName == null) { + body += "\"template\": {\"mappings\": {$mappings}}," + } + if (componentTemplateName != null) { + body += "\"composed_of\": [\"$componentTemplateName\"]," + } + body += "\"priority\":$priority}" + client().makeRequest( + "PUT", + "_index_template/$templateName", + emptyMap(), + StringEntity(body, APPLICATION_JSON), + BasicHeader("Content-Type", "application/json") + ) + } + + protected fun getDatastreamWriteIndex(datastream: String): String { + val response = client().makeRequest("GET", "_data_stream/$datastream", emptyMap(), null) + var respAsMap = responseAsMap(response) + if (respAsMap.containsKey("data_streams")) { + respAsMap = (respAsMap["data_streams"] as ArrayList>)[0] + val indices = respAsMap["indices"] as List> + val index = indices.last() + return index["index_name"] as String + } else { + respAsMap = respAsMap[datastream] as Map + } + val indices = respAsMap["indices"] as Array + return indices.last() + } + + protected fun rolloverDatastream(datastream: String) { + client().makeRequest( + "POST", + datastream + "/_rollover", + emptyMap(), + null + ) + } + + protected fun randomAliasIndices( + alias: String, + num: Int = randomIntBetween(1, 10), + includeWriteIndex: Boolean = true, + ): Map { + val indices = mutableMapOf() + val writeIndex = randomIntBetween(0, num - 1) + for (i: Int in 0 until num) { + var indexName = randomAlphaOfLength(10).lowercase(Locale.ROOT) + while (indexName.equals(alias) || indices.containsKey(indexName)) + indexName = randomAlphaOfLength(10).lowercase(Locale.ROOT) + indices[indexName] = includeWriteIndex && i == writeIndex + } + return indices + } + + protected fun insertSampleTimeSerializedData(index: String, data: List) { + data.forEachIndexed { i, value -> + val twoMinsAgo = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(twoMinsAgo) + val testDoc = """ + { + "test_strict_date_time": "$testTime", + "test_field": "$value", + "number": "$i" + } + """.trimIndent() + // Indexing documents with deterministic doc id to allow for easy selected deletion during testing + indexDoc(index, (i + 1).toString(), testDoc) + } + } + + protected fun deleteDataWithDocIds(index: String, docIds: List) { + docIds.forEach { + deleteDoc(index, it) + } + } + + fun putAlertMappings(mapping: String? = null) { + val mappingHack = if (mapping != null) mapping else AlertIndices.alertMapping().trimStart('{').trimEnd('}') + val encodedHistoryIndex = URLEncoder.encode(AlertIndices.ALERT_HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) + val settings = Settings.builder().put("index.hidden", true).build() + createIndex(AlertIndices.ALERT_INDEX, settings, mappingHack) + createIndex(encodedHistoryIndex, settings, mappingHack, "\"${AlertIndices.ALERT_HISTORY_WRITE_INDEX}\" : {}") + } + + fun putFindingMappings(mapping: String? = null) { + val mappingHack = if (mapping != null) mapping else AlertIndices.findingMapping().trimStart('{').trimEnd('}') + val encodedHistoryIndex = URLEncoder.encode(AlertIndices.FINDING_HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) + val settings = Settings.builder().put("index.hidden", true).build() +// createIndex(AlertIndices.FINDING_HISTORY_WRITE_INDEX, settings, mappingHack) + createIndex(encodedHistoryIndex, settings, mappingHack, "\"${AlertIndices.FINDING_HISTORY_WRITE_INDEX}\" : {}") + } + + fun scheduledJobMappings(): String { + return javaClass.classLoader.getResource("mappings/scheduled-jobs.json").readText() + } + + /** Creates the Alerting config index if it does not exist */ + fun createAlertingConfigIndex(mapping: String? = null) { + val indexExistsResponse = client().makeRequest("HEAD", ScheduledJob.SCHEDULED_JOBS_INDEX) + if (indexExistsResponse.restStatus() == RestStatus.NOT_FOUND) { + val mappingHack = mapping ?: scheduledJobMappings().trimStart('{').trimEnd('}') + val settings = Settings.builder().put("index.hidden", true).build() + createIndex(ScheduledJob.SCHEDULED_JOBS_INDEX, settings, mappingHack) + } + } + + protected fun Response.restStatus(): RestStatus { + return RestStatus.fromCode(this.statusLine.statusCode) + } + + protected fun Monitor.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + private fun Monitor.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return shuffleXContent(toXContent(builder, ToXContent.EMPTY_PARAMS)).string() + } + + protected fun Monitor.toHttpEntityWithUser(): HttpEntity { + return StringEntity(toJsonStringWithUser(), APPLICATION_JSON) + } + + private fun Monitor.toJsonStringWithUser(): String { + val builder = jsonBuilder() + return shuffleXContent(toXContentWithUser(builder, ToXContent.EMPTY_PARAMS)).string() + } + + protected fun Destination.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + protected fun Destination.toJsonString(): String { + val builder = jsonBuilder() + return shuffleXContent(toXContent(builder)).string() + } + + protected fun Destination.toJsonStringWithType(): String { + val builder = jsonBuilder() + return shuffleXContent( + toXContent(builder, ToXContent.MapParams(mapOf("with_type" to "true"))) + ).string() + } + + protected fun EmailAccount.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + protected fun EmailAccount.toJsonString(): String { + val builder = jsonBuilder() + return shuffleXContent(toXContent(builder)).string() + } + + protected fun EmailAccount.toJsonStringWithType(): String { + val builder = jsonBuilder() + return shuffleXContent( + toXContent(builder, ToXContent.MapParams(mapOf("with_type" to "true"))) + ).string() + } + + protected fun EmailGroup.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + protected fun EmailGroup.toJsonString(): String { + val builder = jsonBuilder() + return shuffleXContent(toXContent(builder)).string() + } + + protected fun EmailGroup.toJsonStringWithType(): String { + val builder = jsonBuilder() + return shuffleXContent( + toXContent(builder, ToXContent.MapParams(mapOf("with_type" to "true"))) + ).string() + } + + protected fun Alert.toHttpEntityWithUser(): HttpEntity { + return StringEntity(toJsonStringWithUser(), APPLICATION_JSON) + } + + private fun Alert.toJsonStringWithUser(): String { + val builder = jsonBuilder() + return shuffleXContent(toXContentWithUser(builder)).string() + } + + protected fun Monitor.relativeUrl() = "$ALERTING_BASE_URI/$id" + + // Useful settings when debugging to prevent timeouts + override fun restClientSettings(): Settings { + return if (isDebuggingTest || isDebuggingRemoteCluster) { + Settings.builder() + .put(CLIENT_SOCKET_TIMEOUT, TimeValue.timeValueMinutes(10)) + .build() + } else { + super.restClientSettings() + } + } + + fun RestClient.getClusterSettings(settings: Map): Map { + val response = this.makeRequest("GET", "_cluster/settings", settings) + assertEquals(RestStatus.OK, response.restStatus()) + return response.asMap() + } + + fun RestClient.getIndexMapping(index: String): Map { + val response = this.makeRequest("GET", "$index/_mapping") + assertEquals(RestStatus.OK, response.restStatus()) + return response.asMap() + } + + fun RestClient.updateSettings(setting: String, value: Any): Map { + val settings = jsonBuilder() + .startObject() + .startObject("persistent") + .field(setting, value) + .endObject() + .endObject() + .string() + val response = this.makeRequest("PUT", "_cluster/settings", StringEntity(settings, APPLICATION_JSON)) + assertEquals(RestStatus.OK, response.restStatus()) + return response.asMap() + } + + @Suppress("UNCHECKED_CAST") + fun Map.opendistroSettings(): Map? { + val map = this as Map>>> + return map["defaults"]?.get("opendistro")?.get("alerting") + } + + @Suppress("UNCHECKED_CAST") + fun Map.stringMap(key: String): Map? { + val map = this as Map> + return map[key] + } + + fun getAlertingStats(metrics: String = ""): Map { + val monitorStatsResponse = client().makeRequest("GET", "/_plugins/_alerting/stats$metrics") + val responseMap = createParser(XContentType.JSON.xContent(), monitorStatsResponse.entity.content).map() + return responseMap + } + + fun enableScheduledJob(): Response { + val updateResponse = client().makeRequest( + "PUT", + "_cluster/settings", + emptyMap(), + StringEntity( + XContentFactory.jsonBuilder().startObject().field("persistent") + .startObject().field(ScheduledJobSettings.SWEEPER_ENABLED.key, true).endObject() + .endObject().string(), + ContentType.APPLICATION_JSON + ) + ) + return updateResponse + } + + fun disableScheduledJob(): Response { + val updateResponse = client().makeRequest( + "PUT", + "_cluster/settings", + emptyMap(), + StringEntity( + XContentFactory.jsonBuilder().startObject().field("persistent") + .startObject().field(ScheduledJobSettings.SWEEPER_ENABLED.key, false).endObject() + .endObject().string(), + ContentType.APPLICATION_JSON + ) + ) + return updateResponse + } + + fun enableFilterBy() { + val updateResponse = client().makeRequest( + "PUT", + "_cluster/settings", + emptyMap(), + StringEntity( + XContentFactory.jsonBuilder().startObject().field("persistent") + .startObject().field(AlertingSettings.FILTER_BY_BACKEND_ROLES.key, true).endObject() + .endObject().string(), + ContentType.APPLICATION_JSON + ) + ) + assertEquals(updateResponse.statusLine.toString(), 200, updateResponse.statusLine.statusCode) + } + + fun disableFilterBy() { + val updateResponse = client().makeRequest( + "PUT", + "_cluster/settings", + emptyMap(), + StringEntity( + XContentFactory.jsonBuilder().startObject().field("persistent") + .startObject().field(AlertingSettings.FILTER_BY_BACKEND_ROLES.key, false).endObject() + .endObject().string(), + ContentType.APPLICATION_JSON + ) + ) + assertEquals(updateResponse.statusLine.toString(), 200, updateResponse.statusLine.statusCode) + } + + fun removeEmailFromAllowList() { + val allowedDestinations = DestinationType.values().toList() + .filter { destinationType -> destinationType != DestinationType.EMAIL } + .joinToString(prefix = "[", postfix = "]") { string -> "\"$string\"" } + client().updateSettings(DestinationSettings.ALLOW_LIST.key, allowedDestinations) + } + + fun createUser(name: String, backendRoles: Array) { + val request = Request("PUT", "/_plugins/_security/api/internalusers/$name") + val broles = backendRoles.joinToString { it -> "\"$it\"" } + var entity = " {\n" + + "\"password\": \"$password\",\n" + + "\"backend_roles\": [$broles],\n" + + "\"attributes\": {\n" + + "}} " + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun patchUserBackendRoles(name: String, backendRoles: Array) { + val request = Request("PATCH", "/_plugins/_security/api/internalusers/$name") + val broles = backendRoles.joinToString { "\"$it\"" } + var entity = " [{\n" + + "\"op\": \"replace\",\n" + + "\"path\": \"/backend_roles\",\n" + + "\"value\": [$broles]\n" + + "}]" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun createIndexRole(name: String, index: String) { + val request = Request("PUT", "/_plugins/_security/api/roles/$name") + var entity = "{\n" + + "\"cluster_permissions\": [\n" + + "],\n" + + "\"index_permissions\": [\n" + + "{\n" + + "\"index_patterns\": [\n" + + "\"$index\"\n" + + "],\n" + + "\"dls\": \"\",\n" + + "\"fls\": [],\n" + + "\"masked_fields\": [],\n" + + "\"allowed_actions\": [\n" + + "\"crud\"\n" + + "]\n" + + "}\n" + + "],\n" + + "\"tenant_permissions\": []\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun createCustomIndexRole(name: String, index: String, clusterPermissions: String?) { + val request = Request("PUT", "/_plugins/_security/api/roles/$name") + var entity = "{\n" + + "\"cluster_permissions\": [\n" + + "\"$clusterPermissions\"\n" + + "],\n" + + "\"index_permissions\": [\n" + + "{\n" + + "\"index_patterns\": [\n" + + "\"$index\"\n" + + "],\n" + + "\"dls\": \"\",\n" + + "\"fls\": [],\n" + + "\"masked_fields\": [],\n" + + "\"allowed_actions\": [\n" + + "\"crud\"\n" + + "]\n" + + "}\n" + + "],\n" + + "\"tenant_permissions\": []\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + private fun createCustomIndexRole(name: String, index: String, clusterPermissions: List) { + val request = Request("PUT", "/_plugins/_security/api/roles/$name") + + val clusterPermissionsStr = + clusterPermissions.stream().map { p: String? -> "\"" + p + "\"" }.collect( + Collectors.joining(",") + ) + + var entity = "{\n" + + "\"cluster_permissions\": [\n" + + "$clusterPermissionsStr\n" + + "],\n" + + "\"index_permissions\": [\n" + + "{\n" + + "\"index_patterns\": [\n" + + "\"$index\"\n" + + "],\n" + + "\"dls\": \"\",\n" + + "\"fls\": [],\n" + + "\"masked_fields\": [],\n" + + "\"allowed_actions\": [\n" + + "\"crud\"\n" + + "]\n" + + "}\n" + + "],\n" + + "\"tenant_permissions\": []\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String, clusterPermissions: String? = "") { + val request = Request("PUT", "/_plugins/_security/api/roles/$name") + var entity = "{\n" + + "\"cluster_permissions\": [\n" + + "\"$clusterPermissions\"\n" + + "],\n" + + "\"index_permissions\": [\n" + + "{\n" + + "\"index_patterns\": [\n" + + "\"$index\"\n" + + "],\n" + + "\"dls\": \"$dlsQuery\",\n" + + "\"fls\": [],\n" + + "\"masked_fields\": [],\n" + + "\"allowed_actions\": [\n" + + "\"crud\"\n" + + "]\n" + + "}\n" + + "],\n" + + "\"tenant_permissions\": []\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String, clusterPermissions: List) { + val clusterPermissionsStr = + clusterPermissions.stream().map { p: String -> "\"" + getClusterPermissionsFromCustomRole(p) + "\"" }.collect( + Collectors.joining(",") + ) + + val request = Request("PUT", "/_plugins/_security/api/roles/$name") + var entity = "{\n" + + "\"cluster_permissions\": [\n" + + "$clusterPermissionsStr\n" + + "],\n" + + "\"index_permissions\": [\n" + + "{\n" + + "\"index_patterns\": [\n" + + "\"$index\"\n" + + "],\n" + + "\"dls\": \"$dlsQuery\",\n" + + "\"fls\": [],\n" + + "\"masked_fields\": [],\n" + + "\"allowed_actions\": [\n" + + "\"crud\"\n" + + "]\n" + + "}\n" + + "],\n" + + "\"tenant_permissions\": []\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun createUserRolesMapping(role: String, users: Array) { + val request = Request("PUT", "/_plugins/_security/api/rolesmapping/$role") + val usersStr = users.joinToString { it -> "\"$it\"" } + var entity = "{ \n" + + " \"backend_roles\" : [ ],\n" + + " \"hosts\" : [ ],\n" + + " \"users\" : [$usersStr]\n" + + "}" + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun updateRoleMapping(role: String, users: List, addUser: Boolean) { + val request = Request("PATCH", "/_plugins/_security/api/rolesmapping/$role") + val usersStr = users.joinToString { it -> "\"$it\"" } + + val op = if (addUser) "add" else "remove" + + val entity = "[{\n" + + " \"op\" : \"$op\",\n" + + " \"path\" : \"/users\",\n" + + " \"value\" : [$usersStr]\n" + + "}]" + + request.setJsonEntity(entity) + client().performRequest(request) + } + + fun deleteUser(name: String) { + client().makeRequest("DELETE", "/_plugins/_security/api/internalusers/$name") + } + + fun deleteRole(name: String) { + client().makeRequest("DELETE", "/_plugins/_security/api/roles/$name") + } + + fun deleteRoleMapping(name: String) { + client().makeRequest("DELETE", "/_plugins/_security/api/rolesmapping/$name") + } + + fun deleteRoleAndRoleMapping(role: String) { + deleteRoleMapping(role) + deleteRole(role) + } + + fun createUserWithTestData(user: String, index: String, role: String, backendRole: String) { + createUser(user, arrayOf(backendRole)) + createTestIndex(index) + createIndexRole(role, index) + createUserRolesMapping(role, arrayOf(user)) + } + + fun createUserWithTestDataAndCustomRole( + user: String, + index: String, + role: String, + backendRoles: List, + clusterPermissions: String?, + ) { + createUser(user, backendRoles.toTypedArray()) + createTestIndex(index) + createCustomIndexRole(role, index, clusterPermissions) + createUserRolesMapping(role, arrayOf(user)) + } + + fun createUserWithTestDataAndCustomRole( + user: String, + index: String, + role: String, + backendRoles: List, + clusterPermissions: List, + ) { + createUser(user, backendRoles.toTypedArray()) + createTestIndex(index) + createCustomIndexRole(role, index, clusterPermissions) + createUserRolesMapping(role, arrayOf(user)) + } + + fun createUserWithRoles( + user: String, + roles: List, + backendRoles: List, + isExistingRole: Boolean, + ) { + createUser(user, backendRoles.toTypedArray()) + for (role in roles) { + if (isExistingRole) { + updateRoleMapping(role, listOf(user), true) + } else { + createUserRolesMapping(role, arrayOf(user)) + } + } + } + + fun createUserWithDocLevelSecurityTestData( + user: String, + index: String, + role: String, + backendRole: String, + dlsQuery: String, + ) { + createUser(user, arrayOf(backendRole)) + createTestIndex(index) + createIndexRoleWithDocLevelSecurity(role, index, dlsQuery) + createUserRolesMapping(role, arrayOf(user)) + } + + fun createUserWithDocLevelSecurityTestDataAndCustomRole( + user: String, + index: String, + role: String, + backendRole: String, + dlsQuery: String, + clusterPermissions: String?, + ) { + createUser(user, arrayOf(backendRole)) + createTestIndex(index) + createIndexRoleWithDocLevelSecurity(role, index, dlsQuery) + createCustomIndexRole(role, index, clusterPermissions) + createUserRolesMapping(role, arrayOf(user)) + } + + fun getClusterPermissionsFromCustomRole(clusterPermissions: String): String? { + return ROLE_TO_PERMISSION_MAPPING.get(clusterPermissions) + } + + companion object { + internal interface IProxy { + val version: String? + var sessionId: String? + + fun getExecutionData(reset: Boolean): ByteArray? + fun dump(reset: Boolean) + fun reset() + } + + /* + * We need to be able to dump the jacoco coverage before the cluster shuts down. + * The new internal testing framework removed some gradle tasks we were listening to, + * to choose a good time to do it. This will dump the executionData to file after each test. + * TODO: This is also currently just overwriting integTest.exec with the updated execData without + * resetting after writing each time. This can be improved to either write an exec file per test + * or by letting jacoco append to the file. + * */ + @JvmStatic + @AfterClass + fun dumpCoverage() { + // jacoco.dir set in opensearchplugin-coverage.gradle, if it doesn't exist we don't + // want to collect coverage, so we can return early + val jacocoBuildPath = System.getProperty("jacoco.dir") ?: return + val serverUrl = "service:jmx:rmi:///jndi/rmi://127.0.0.1:7777/jmxrmi" + JMXConnectorFactory.connect(JMXServiceURL(serverUrl)).use { connector -> + val proxy = MBeanServerInvocationHandler.newProxyInstance( + connector.mBeanServerConnection, + ObjectName("org.jacoco:type=Runtime"), + IProxy::class.java, + false + ) + proxy.getExecutionData(false)?.let { + val path = PathUtils.get("$jacocoBuildPath/integTest.exec") + Files.write(path, it) + } + } + } + } + + protected fun createRandomWorkflow(monitorIds: List, refresh: Boolean = false): Workflow { + val workflow = randomWorkflow(monitorIds = monitorIds) + return createWorkflow(workflow, refresh) + } + + private fun createWorkflowEntityWithBackendRoles(workflow: Workflow, rbacRoles: List?): HttpEntity { + if (rbacRoles == null) { + return workflow.toHttpEntity() + } + val temp = workflow.toJsonString() + val toReplace = temp.lastIndexOf("}") + val rbacString = rbacRoles.joinToString { "\"$it\"" } + val jsonString = temp.substring(0, toReplace) + ", \"rbac_roles\": [$rbacString] }" + return StringEntity(jsonString, ContentType.APPLICATION_JSON) + } + + protected fun createWorkflowWithClient( + client: RestClient, + workflow: Workflow, + rbacRoles: List? = null, + refresh: Boolean = true, + ): Workflow { + val response = client.makeRequest( + "POST", "$WORKFLOW_ALERTING_BASE_URI?refresh=$refresh", emptyMap(), + createWorkflowEntityWithBackendRoles(workflow, rbacRoles) + ) + assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) + + val workflowJson = jsonXContent.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, + response.entity.content + ).map() + assertUserNull(workflowJson as HashMap) + return workflow.copy(id = workflowJson["_id"] as String) + } + + protected fun createWorkflow(workflow: Workflow, refresh: Boolean = true): Workflow { + return createWorkflowWithClient(client(), workflow, emptyList(), refresh) + } + + protected fun Workflow.toHttpEntity(): HttpEntity { + return StringEntity(toJsonString(), APPLICATION_JSON) + } + + private fun Workflow.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return shuffleXContent(toXContent(builder, ToXContent.EMPTY_PARAMS)).string() + } + + protected fun getWorkflow( + workflowId: String, + header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), + ): Workflow { + val response = client().makeRequest("GET", "$WORKFLOW_ALERTING_BASE_URI/$workflowId", null, header) + assertEquals("Unable to get workflow $workflowId", RestStatus.OK, response.restStatus()) + + val parser = createParser(XContentType.JSON.xContent(), response.entity.content) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) + + lateinit var id: String + var version: Long = 0 + lateinit var workflow: Workflow + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + parser.nextToken() + + when (parser.currentName()) { + "_id" -> id = parser.text() + "_version" -> version = parser.longValue() + "workflow" -> workflow = Workflow.parse(parser) + } + } + + assertUserNull(workflow) + return workflow.copy(id = id, version = version) + } + + protected fun Workflow.relativeUrl() = "$WORKFLOW_ALERTING_BASE_URI/$id" +} diff --git a/alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt b/alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt new file mode 100644 index 000000000..44454395e --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt @@ -0,0 +1,1675 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.http.entity.ContentType +import org.apache.http.entity.StringEntity +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_ALERT_INDEX_PATTERN +import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_FINDING_INDEX_PATTERN +import org.opensearch.client.Response +import org.opensearch.client.ResponseException +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.core.rest.RestStatus +import org.opensearch.script.Script +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit.MILLIS +import java.util.Locale + +class DocumentMonitorRunnerIT : AlertingRestTestCase() { + + fun `test execute monitor with dryrun`() { + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val index = createTestIndex() + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + + indexDoc(index, "1", testDoc) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + + assertEquals(1, output.objectMap("trigger_results").values.size) + + for (triggerResult in output.objectMap("trigger_results").values) { + assertEquals(1, triggerResult.objectMap("action_results").values.size) + for (alertActionResult in triggerResult.objectMap("action_results").values) { + for (actionResult in alertActionResult.values) { + @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] + as Map + assertEquals("Hello ${monitor.name}", actionOutput["subject"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute monitor returns search result with dryrun`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger)) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.contains("1|$testIndex")) + assertTrue("Incorrect search result", matchingDocsToQuery.contains("5|$testIndex")) + } + + fun `test execute monitor generates alerts and findings`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + } + + fun `test execute monitor with tag as trigger condition generates alerts and findings`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", tags = listOf("test_tag"), fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = Script("query[tag=test_tag]")) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + } + + fun `test execute monitor input error`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", tags = listOf("test_tag"), fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + deleteIndex(testIndex) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val inputResults = output.stringMap("input_results") + assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + assertEquals("Alert status is incorrect", Alert.State.ERROR, alerts[0].state) + } + + fun `test execute monitor generates alerts and findings with per alert execution for actions`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val alertCategories = AlertCategory.values() + val actionExecutionScope = PerAlertActionScope( + actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() + ) + val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) + val actions = (0..randomInt(10)).map { + randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = actionExecutionPolicy + ) + } + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) + + for (triggerResult in output.objectMap("trigger_results").values) { + assertEquals(2, triggerResult.objectMap("action_results").values.size) + for (alertActionResult in triggerResult.objectMap("action_results").values) { + assertEquals(actions.size, alertActionResult.values.size) + for (actionResult in alertActionResult.values) { + @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] + as Map + assertEquals("Hello ${monitor.name}", actionOutput["subject"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + } + + refreshAllIndices() + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + } + + fun `test execute monitor generates alerts and findings with per trigger execution for actions`() { + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val actionExecutionScope = PerExecutionActionScope() + val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) + val actions = (0..randomInt(10)).map { + randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = actionExecutionPolicy + ) + } + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) + + for (triggerResult in output.objectMap("trigger_results").values) { + assertEquals(2, triggerResult.objectMap("action_results").values.size) + for (alertActionResult in triggerResult.objectMap("action_results").values) { + assertEquals(actions.size, alertActionResult.values.size) + for (actionResult in alertActionResult.values) { + @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] + as Map + assertEquals("Hello ${monitor.name}", actionOutput["subject"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + } + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) + } + + fun `test execute monitor with wildcard index that generates alerts and findings for EQUALS query operator`() { + val testIndexPrefix = "test-index-${randomAlphaOfLength(10).lowercase(Locale.ROOT)}" + val testQueryName = "wildcard-test-query" + val testIndex = createTestIndex("${testIndexPrefix}1") + val testIndex2 = createTestIndex("${testIndexPrefix}2") + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = testQueryName, fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("$testIndexPrefix*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = Script("query[name=$testQueryName]")) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex2"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + val foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Didn't find findings for docs 1 and 5", 2, foundFindings.size) + } + + fun `test execute monitor with wildcard index that generates alerts and findings for NOT EQUALS query operator`() { + val testIndexPrefix = "test-index-${randomAlphaOfLength(10).lowercase(Locale.ROOT)}" + val testQueryName = "wildcard-test-query" + val testIndex = createTestIndex("${testIndexPrefix}1") + val testIndex2 = createTestIndex("${testIndexPrefix}2") + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "NOT (test_field:\"us-west-1\")", name = testQueryName, fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("$testIndexPrefix*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = Script("query[name=$testQueryName]")) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "5", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex2"))) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + val foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Didn't find findings for docs 1 and 5", 2, foundFindings.size) + } + + fun `test execute monitor with new index added after first execution that generates alerts and findings`() { + val testIndex = createTestIndex("test1") + val testIndex2 = createTestIndex("test2") + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "5", testDoc) + executeMonitor(monitor.id) + + var alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + var findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + var foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Findings saved for test monitor expected 1 and 5", 2, foundFindings.size) + + // clear previous findings and alerts + deleteIndex(ALL_FINDING_INDEX_PATTERN) + deleteIndex(ALL_ALERT_INDEX_PATTERN) + + val testIndex3 = createTestIndex("test3") + indexDoc(testIndex3, "10", testDoc) + indexDoc(testIndex, "14", testDoc) + indexDoc(testIndex2, "51", testDoc) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 3, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("14|$testIndex", "51|$testIndex2", "10|$testIndex3"))) + + alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 3, alerts.size) + + findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 3, findings.size) + + foundFindings = findings.filter { + it.relatedDocIds.contains("14") || it.relatedDocIds.contains("51") || it.relatedDocIds.contains("10") + } + assertEquals("Findings saved for test monitor expected 14, 51 and 10", 3, foundFindings.size) + } + + fun `test execute monitor with indices having fields with same name but different data types`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "source.device.port": { "type": "long" }, + "source.device.hwd.id": { "type": "long" }, + "nested_field": { + "type": "nested", + "properties": { + "test1": { + "type": "keyword" + } + } + }, + "my_join_field": { + "type": "join", + "relations": { + "question": "answer" + } + }, + "test_field" : { "type" : "integer" } + } + """.trimIndent() + ) + var testDoc = """{ + "source" : { "device": {"port" : 12345 } }, + "nested_field": { "test1": "some text" }, + "test_field": 12345 + }""" + + val docQuery1 = DocLevelQuery( + query = "(source.device.port:12345 AND test_field:12345) OR source.device.hwd.id:12345", + name = "4", + fields = listOf() + ) + val docQuery2 = DocLevelQuery( + query = "(source.device.port:\"12345\" AND test_field:\"12345\") OR source.device.hwd.id:\"12345\"", + name = "5", + fields = listOf() + ) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + executeMonitor(monitor.id) + + var alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 1, alerts.size) + + var findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + + // clear previous findings and alerts + deleteIndex(ALL_FINDING_INDEX_PATTERN) + deleteIndex(ALL_ALERT_INDEX_PATTERN) + + indexDoc(testIndex, "2", testDoc) + + // no fields expanded as only index test1 is present + val oldExpectedQueries = listOf( + "(source.device.port_test__${monitor.id}:12345 AND test_field_test__${monitor.id}:12345) OR " + + "source.device.hwd.id_test__${monitor.id}:12345", + "(source.device.port_test__${monitor.id}:\"12345\" AND test_field_test__${monitor.id}:\"12345\") " + + "OR source.device.hwd.id_test__${monitor.id}:\"12345\"" + ) + + val request = """{ + "size": 10, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(oldExpectedQueries.contains(query)) + } + + val testIndex2 = createTestIndex( + "test2", + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent() + ) + testDoc = """{ + "source" : { "device": {"port" : "12345" } }, + "nested_field": { "test1": "some text" }, + "test_field": "12345" + }""" + indexDoc(testIndex2, "1", testDoc) + executeMonitor(monitor.id) + + // only fields source.device.port & test_field is expanded as they have same name but different data types + // in indices test1 & test2 + val newExpectedQueries = listOf( + "(source.device.port_test2_${monitor.id}:12345 AND test_field_test2_${monitor.id}:12345) " + + "OR source.device.hwd.id_test__${monitor.id}:12345", + "(source.device.port_test1_${monitor.id}:12345 AND test_field_test1_${monitor.id}:12345) " + + "OR source.device.hwd.id_test__${monitor.id}:12345", + "(source.device.port_test2_${monitor.id}:\"12345\" AND test_field_test2_${monitor.id}:\"12345\") " + + "OR source.device.hwd.id_test__${monitor.id}:\"12345\"", + "(source.device.port_test1_${monitor.id}:\"12345\" AND test_field_test1_${monitor.id}:\"12345\") " + + "OR source.device.hwd.id_test__${monitor.id}:\"12345\"" + ) + + alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(oldExpectedQueries.contains(query) || newExpectedQueries.contains(query)) + } + } + + fun `test execute monitor with indices having fields with same name but with different nesting`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "nested_field": { + "type": "nested", + "properties": { + "test1": { + "type": "keyword" + } + } + } + } + """.trimIndent() + ) + + val testIndex2 = createTestIndex( + "test2", + """"properties": { + "nested_field": { + "properties": { + "test1": { + "type": "keyword" + } + } + } + } + """.trimIndent() + ) + val testDoc = """{ + "nested_field": { "test1": "12345" } + }""" + + val docQuery = DocLevelQuery( + query = "nested_field.test1:\"12345\"", + name = "5", + fields = listOf() + ) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "1", testDoc) + + executeMonitor(monitor.id) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + // as mappings of source.id & test_field are different so, both of them expands + val expectedQueries = listOf( + "nested_field.test1_test__${monitor.id}:\"12345\"" + ) + + val request = """{ + "size": 10, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(expectedQueries.contains(query)) + } + } + + fun `test execute monitor with indices having fields with same name but different field mappings`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "source": { + "properties": { + "id": { + "type":"text", + "analyzer":"whitespace" + } + } + }, + "test_field" : { + "type":"text", + "analyzer":"whitespace" + } + } + """.trimIndent() + ) + + val testIndex2 = createTestIndex( + "test2", + """"properties": { + "source": { + "properties": { + "id": { + "type":"text" + } + } + }, + "test_field" : { + "type":"text" + } + } + """.trimIndent() + ) + val testDoc = """{ + "source" : {"id" : "12345" }, + "nested_field": { "test1": "some text" }, + "test_field": "12345" + }""" + + val docQuery = DocLevelQuery( + query = "test_field:\"12345\" AND source.id:\"12345\"", + name = "5", + fields = listOf() + ) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "1", testDoc) + + executeMonitor(monitor.id) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + // as mappings of source.id & test_field are different so, both of them expands + val expectedQueries = listOf( + "test_field_test2_${monitor.id}:\"12345\" AND source.id_test2_${monitor.id}:\"12345\"", + "test_field_test1_${monitor.id}:\"12345\" AND source.id_test1_${monitor.id}:\"12345\"" + ) + + val request = """{ + "size": 10, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.forEach { hit -> + val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] + assertTrue(expectedQueries.contains(query)) + } + } + + fun `test execute monitor with indices having fields with same name but different field mappings in multiple indices`() { + val testIndex = createTestIndex( + "test1", + """"properties": { + "source": { + "properties": { + "device": { + "properties": { + "hwd": { + "properties": { + "id": { + "type":"text", + "analyzer":"whitespace" + } + } + } + } + } + } + }, + "test_field" : { + "type":"text" + } + } + """.trimIndent() + ) + + val testIndex2 = createTestIndex( + "test2", + """"properties": { + "test_field" : { + "type":"keyword" + } + } + """.trimIndent() + ) + + val testIndex4 = createTestIndex( + "test4", + """"properties": { + "source": { + "properties": { + "device": { + "properties": { + "hwd": { + "properties": { + "id": { + "type":"text" + } + } + } + } + } + } + }, + "test_field" : { + "type":"text" + } + } + """.trimIndent() + ) + + val testDoc1 = """{ + "source" : {"device" : {"hwd" : {"id" : "12345"}} }, + "nested_field": { "test1": "some text" } + }""" + val testDoc2 = """{ + "nested_field": { "test1": "some text" }, + "test_field": "12345" + }""" + + val docQuery1 = DocLevelQuery( + query = "test_field:\"12345\"", + name = "4", + fields = listOf() + ) + val docQuery2 = DocLevelQuery( + query = "source.device.hwd.id:\"12345\"", + name = "5", + fields = listOf() + ) + + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex4, "1", testDoc1) + indexDoc(testIndex2, "1", testDoc2) + indexDoc(testIndex, "1", testDoc1) + indexDoc(testIndex, "2", testDoc2) + + executeMonitor(monitor.id) + + val alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 4, alerts.size) + + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 4, findings.size) + + val request = """{ + "size": 0, + "query": { + "match_all": {} + } + }""" + val httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.totalHits?.let { assertEquals(5L, it.value) } + } + + fun `test no of queries generated for document-level monitor based on wildcard indexes`() { + val testIndex = createTestIndex("test1") + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + executeMonitor(monitor.id) + + val request = """{ + "size": 0, + "query": { + "match_all": {} + } + }""" + var httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.totalHits?.let { assertEquals(1L, it.value) } + + val testIndex2 = createTestIndex("test2") + indexDoc(testIndex2, "1", testDoc) + executeMonitor(monitor.id) + + httpResponse = adminClient().makeRequest( + "GET", "/${monitor.dataSources.queryIndex}/_search", + StringEntity(request, ContentType.APPLICATION_JSON) + ) + assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) + + searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) + searchResponse.hits.totalHits?.let { assertEquals(1L, it.value) } + } + + fun `test execute monitor with new index added after first execution that generates alerts and findings from new query`() { + val testIndex = createTestIndex("test1") + val testIndex2 = createTestIndex("test2") + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery1 = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_new:\"us-west-2\"", name = "4", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + assertNotNull(monitor.id) + + indexDoc(testIndex, "1", testDoc) + indexDoc(testIndex2, "5", testDoc) + executeMonitor(monitor.id) + + var alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 2, alerts.size) + + var findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 2, findings.size) + + var foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } + assertEquals("Findings saved for test monitor expected 1 and 5", 2, foundFindings.size) + + // clear previous findings and alerts + deleteIndex(ALL_FINDING_INDEX_PATTERN) + deleteIndex(ALL_ALERT_INDEX_PATTERN) + + val testDocNew = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field_new" : "us-west-2" + }""" + + val testIndex3 = createTestIndex("test3") + indexDoc(testIndex3, "10", testDocNew) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val matchingDocsToQuery = searchResult[docQuery2.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("10|$testIndex3"))) + + alerts = searchAlertsWithFilter(monitor) + assertEquals("Alert saved for test monitor", 1, alerts.size) + + findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + + foundFindings = findings.filter { + it.relatedDocIds.contains("10") + } + assertEquals("Findings saved for test monitor expected 10", 1, foundFindings.size) + } + + fun `test document-level monitor when alias only has write index with 0 docs`() { + // Monitor should execute, but create 0 findings. + val alias = createTestAlias(includeWriteIndex = true) + val aliasIndex = alias.keys.first() + val query = randomDocLevelQuery(tags = listOf()) + val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) + val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) + val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) + + val response: Response + try { + response = executeMonitor(monitor.id) + } catch (e: ResponseException) { + assertNotNull("Expected an error message: $e", e.message) + e.message?.let { + assertTrue("Unexpected exception: $e", it.contains("""reason":"no such index [.opensearch-alerting-findings]""")) + } + assertEquals(404, e.response.statusLine.statusCode) + return + } + + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + val errorMessage = inputResults?.get("error") + @Suppress("UNCHECKED_CAST") + val searchResult = (inputResults?.get("results") as List>).firstOrNull() + @Suppress("UNCHECKED_CAST") + val findings = searchFindings() + + assertEquals(monitor.name, output["monitor_name"]) + assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) + findings.findings.forEach { + val queryIds = it.finding.docLevelQueries.map { query -> query.id } + assertFalse("No findings should exist with queryId ${query.id}, but found: $it", queryIds.contains(query.id)) + } + } + + fun `test document-level monitor when docs exist prior to monitor creation`() { + // FIXME: Consider renaming this test case + // Only new docs should create findings. + val alias = createTestAlias(includeWriteIndex = true) + val aliasIndex = alias.keys.first() + val indices = alias[aliasIndex]?.keys?.toList() as List + val query = randomDocLevelQuery(tags = listOf()) + val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) + val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) + + val preExistingDocIds = mutableSetOf() + indices.forEach { index -> + val docId = index.hashCode().toString() + val doc = """{ "message" : "${query.query}" }""" + preExistingDocIds.add(docId) + indexDoc(index = index, id = docId, doc = doc) + } + assertEquals(indices.size, preExistingDocIds.size) + + val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + val errorMessage = inputResults?.get("error") + @Suppress("UNCHECKED_CAST") + val searchResult = (inputResults?.get("results") as List>).firstOrNull() + @Suppress("UNCHECKED_CAST") + val findings = searchFindings() + + assertEquals(monitor.name, output["monitor_name"]) + assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) + findings.findings.forEach { + val docIds = it.finding.relatedDocIds + assertTrue( + "Findings index should not contain a pre-existing doc, but found $it", + preExistingDocIds.intersect(docIds).isEmpty() + ) + } + } + + fun `test document-level monitor when alias indices only contain docs that match query`() { + // Only new docs should create findings. + val alias = createTestAlias(includeWriteIndex = true) + val aliasIndex = alias.keys.first() + val indices = alias[aliasIndex]?.keys?.toList() as List + val query = randomDocLevelQuery(tags = listOf()) + val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) + val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) + + val preExistingDocIds = mutableSetOf() + indices.forEach { index -> + val docId = index.hashCode().toString() + val doc = """{ "message" : "${query.query}" }""" + preExistingDocIds.add(docId) + indexDoc(index = index, id = docId, doc = doc) + } + assertEquals(indices.size, preExistingDocIds.size) + + val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) + executeMonitor(monitor.id) + + val newDocIds = mutableSetOf() + indices.forEach { index -> + (1..5).map { + val docId = "${index.hashCode()}$it" + val doc = """{ "message" : "${query.query}" }""" + newDocIds.add(docId) + indexDoc(index = index, id = docId, doc = doc) + } + } + assertEquals(indices.size * 5, newDocIds.size) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + val errorMessage = inputResults?.get("error") + @Suppress("UNCHECKED_CAST") + val searchResult = (inputResults?.get("results") as List>).firstOrNull() + @Suppress("UNCHECKED_CAST") + val findings = searchFindings() + + assertEquals(monitor.name, output["monitor_name"]) + assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) + findings.findings.forEach { + val docIds = it.finding.relatedDocIds + assertTrue( + "Findings index should not contain a pre-existing doc, but found $it", + preExistingDocIds.intersect(docIds).isEmpty() + ) + assertTrue("Found an unexpected finding $it", newDocIds.intersect(docIds).isNotEmpty()) + } + } + + fun `test document-level monitor when alias indices contain docs that do and do not match query`() { + // Only matching docs should create findings. + val alias = createTestAlias(includeWriteIndex = true) + val aliasIndex = alias.keys.first() + val indices = alias[aliasIndex]?.keys?.toList() as List + val query = randomDocLevelQuery(tags = listOf()) + val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) + val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) + + val preExistingDocIds = mutableSetOf() + indices.forEach { index -> + val docId = index.hashCode().toString() + val doc = """{ "message" : "${query.query}" }""" + preExistingDocIds.add(docId) + indexDoc(index = index, id = docId, doc = doc) + } + assertEquals(indices.size, preExistingDocIds.size) + + val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) + executeMonitor(monitor.id) + + val matchingDocIds = mutableSetOf() + val nonMatchingDocIds = mutableSetOf() + indices.forEach { index -> + (1..5).map { + val matchingDocId = "${index.hashCode()}$it" + val matchingDoc = """{ "message" : "${query.query}" }""" + indexDoc(index = index, id = matchingDocId, doc = matchingDoc) + matchingDocIds.add(matchingDocId) + + val nonMatchingDocId = "${index.hashCode()}${it}2" + var nonMatchingDoc = StringBuilder(query.query).insert(2, "difference").toString() + nonMatchingDoc = """{ "message" : "$nonMatchingDoc" }""" + indexDoc(index = index, id = nonMatchingDocId, doc = nonMatchingDoc) + nonMatchingDocIds.add(nonMatchingDocId) + } + } + assertEquals(indices.size * 5, matchingDocIds.size) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + val errorMessage = inputResults?.get("error") + @Suppress("UNCHECKED_CAST") + val searchResult = (inputResults?.get("results") as List>).firstOrNull() + @Suppress("UNCHECKED_CAST") + val findings = searchFindings() + + assertEquals(monitor.name, output["monitor_name"]) + assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) + findings.findings.forEach { + val docIds = it.finding.relatedDocIds + assertTrue( + "Findings index should not contain a pre-existing doc, but found $it", + preExistingDocIds.intersect(docIds).isEmpty() + ) + assertTrue("Found doc that doesn't match query: $it", nonMatchingDocIds.intersect(docIds).isEmpty()) + assertFalse("Found an unexpected finding $it", matchingDocIds.intersect(docIds).isNotEmpty()) + } + } + + fun `test document-level monitor when datastreams contain docs that do match query`() { + val dataStreamName = "test-datastream" + createDataStream( + dataStreamName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "2", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + deleteDataStream(dataStreamName) + } + + fun `test document-level monitor when datastreams contain docs across read-only indices that do match query`() { + val dataStreamName = "test-datastream" + createDataStream( + dataStreamName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + indexDoc(dataStreamName, "2", testDoc) + rolloverDatastream(dataStreamName) + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "4", testDoc) + rolloverDatastream(dataStreamName) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + + indexDoc(dataStreamName, "5", testDoc) + indexDoc(dataStreamName, "6", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + deleteDataStream(dataStreamName) + } + + fun `test document-level monitor when index alias contain docs that do match query`() { + val aliasName = "test-alias" + createIndexAlias( + aliasName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent() + ) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("$aliasName"), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(aliasName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + rolloverDatastream(aliasName) + indexDoc(aliasName, "2", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + deleteIndexAlias(aliasName) + } + + fun `test document-level monitor when multiple datastreams contain docs across read-only indices that do match query`() { + val dataStreamName1 = "test-datastream1" + createDataStream( + dataStreamName1, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + val dataStreamName2 = "test-datastream2" + createDataStream( + dataStreamName2, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName2, "-1", testDoc) + rolloverDatastream(dataStreamName2) + indexDoc(dataStreamName2, "0", testDoc) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf("test-datastream*"), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + indexDoc(dataStreamName1, "1", testDoc) + indexDoc(dataStreamName2, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) + + indexDoc(dataStreamName1, "2", testDoc) + indexDoc(dataStreamName2, "2", testDoc) + rolloverDatastream(dataStreamName1) + rolloverDatastream(dataStreamName1) + rolloverDatastream(dataStreamName2) + indexDoc(dataStreamName1, "4", testDoc) + indexDoc(dataStreamName2, "4", testDoc) + rolloverDatastream(dataStreamName1) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 4, matchingDocsToQuery.size) + + indexDoc(dataStreamName1, "5", testDoc) + indexDoc(dataStreamName1, "6", testDoc) + indexDoc(dataStreamName2, "5", testDoc) + indexDoc(dataStreamName2, "6", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 4, matchingDocsToQuery.size) + deleteDataStream(dataStreamName1) + deleteDataStream(dataStreamName2) + } + + fun `test document-level monitor ignoring old read-only indices for datastreams`() { + val dataStreamName = "test-datastream" + createDataStream( + dataStreamName, + """ + "properties" : { + "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, + "test_field" : { "type" : "keyword" }, + "number" : { "type" : "keyword" } + } + """.trimIndent(), + false + ) + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "@timestamp": "$testTime", + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(dataStreamName, "-1", testDoc) + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "0", testDoc) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + indexDoc(dataStreamName, "1", testDoc) + var response = executeMonitor(monitor.id) + var output = entityAsMap(response) + var searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + var matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + rolloverDatastream(dataStreamName) + indexDoc(dataStreamName, "2", testDoc) + response = executeMonitor(monitor.id) + output = entityAsMap(response) + searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + matchingDocsToQuery = searchResult[docQuery.id] as List + assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) + + deleteDataStream(dataStreamName) + } + + fun `test execute monitor with non-null data sources`() { + + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val alertCategories = AlertCategory.values() + val actionExecutionScope = PerAlertActionScope( + actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() + ) + val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) + val actions = (0..randomInt(10)).map { + randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = actionExecutionPolicy + ) + } + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + try { + createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + findingsIndex = "custom_findings_index", + alertsIndex = "custom_alerts_index", + ) + ) + ) + fail("Expected create monitor to fail") + } catch (e: ResponseException) { + assertTrue(e.message!!.contains("illegal_argument_exception")) + } + } + + fun `test execute monitor with indices removed after first run`() { + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val index1 = createTestIndex() + val index2 = createTestIndex() + val index4 = createTestIndex() + val index5 = createTestIndex() + + val docQuery = DocLevelQuery(query = "\"us-west-2\"", name = "3", fields = listOf()) + var docLevelInput = DocLevelMonitorInput("description", listOf(index1, index2, index4, index5), listOf(docQuery)) + + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + ) + + indexDoc(index1, "1", testDoc) + indexDoc(index2, "1", testDoc) + indexDoc(index4, "1", testDoc) + indexDoc(index5, "1", testDoc) + + var response = executeMonitor(monitor.id) + + var output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + + assertEquals(1, output.objectMap("trigger_results").values.size) + deleteIndex(index1) + deleteIndex(index2) + + indexDoc(index4, "2", testDoc) + response = executeMonitor(monitor.id) + + output = entityAsMap(response) + assertEquals(1, output.objectMap("trigger_results").values.size) + } + + @Suppress("UNCHECKED_CAST") + /** helper that returns a field in a json map whose values are all json objects */ + private fun Map.objectMap(key: String): Map> { + return this[key] as Map> + } + + fun `test execute monitor with non-null owner`() { + + val testIndex = createTestIndex() + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val alertCategories = AlertCategory.values() + val actionExecutionScope = PerAlertActionScope( + actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() + ) + val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) + val actions = (0..randomInt(10)).map { + randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = actionExecutionPolicy + ) + } + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) + try { + createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + owner = "owner" + ) + ) + fail("Expected create monitor to fail") + } catch (e: ResponseException) { + assertTrue(e.message!!.contains("illegal_argument_exception")) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt new file mode 100644 index 000000000..d3f166b13 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt @@ -0,0 +1,5923 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.junit.Assert +import org.opensearch.action.DocWriteRequest +import org.opensearch.action.admin.cluster.state.ClusterStateRequest +import org.opensearch.action.admin.indices.alias.Alias +import org.opensearch.action.admin.indices.close.CloseIndexRequest +import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest +import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest +import org.opensearch.action.admin.indices.open.OpenIndexRequest +import org.opensearch.action.admin.indices.refresh.RefreshRequest +import org.opensearch.action.bulk.BulkRequest +import org.opensearch.action.bulk.BulkResponse +import org.opensearch.action.fieldcaps.FieldCapabilitiesRequest +import org.opensearch.action.index.IndexRequest +import org.opensearch.action.search.SearchRequest +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.alerting.transport.AlertingSingleNodeTestCase +import org.opensearch.alerting.util.AlertingException +import org.opensearch.alerting.util.DocLevelMonitorQueries +import org.opensearch.alerting.util.DocLevelMonitorQueries.Companion.INDEX_PATTERN_SUFFIX +import org.opensearch.alerting.workflow.CompositeWorkflowRunner +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest +import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse +import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest +import org.opensearch.commons.alerting.action.GetAlertsRequest +import org.opensearch.commons.alerting.action.GetAlertsResponse +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.action.SearchMonitorRequest +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ChainedMonitorFindings +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.DOC_LEVEL_QUERIES_INDEX +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.mapper.MapperService +import org.opensearch.index.query.MatchQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.query.TermQueryBuilder +import org.opensearch.rest.RestRequest +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import java.time.Instant +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit +import java.time.temporal.ChronoUnit.MILLIS +import java.util.Collections +import java.util.Map +import java.util.UUID +import java.util.concurrent.ExecutionException +import java.util.concurrent.TimeUnit +import java.util.stream.Collectors + +class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { + + fun `test execute monitor with dryrun`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, true) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 0) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 0) + try { + client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, "wrong_alert_index")) + .get() + fail() + } catch (e: Exception) { + Assert.assertTrue(e.message!!.contains("IndexNotFoundException")) + } + } + + fun `test execute monitor with custom alerts index`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(alertsIndex = customAlertsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + val alerts = searchAlerts(id, customAlertsIndex) + assertEquals("Alert saved for test monitor", 1, alerts.size) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val alertId = getAlertsResponse.alerts.get(0).id + val acknowledgeAlertResponse = client().execute( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, + AcknowledgeAlertRequest(id, listOf(alertId), WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + Assert.assertEquals(acknowledgeAlertResponse.acknowledged.size, 1) + } + + fun `test mappings parsing`() { + + val index1 = "index_123" + val index2 = "index_456" + val index3 = "index_789" + val index4 = "index_012" + val q1 = DocLevelQuery(query = "properties:\"abcd\"", name = "1", fields = listOf()) + val q2 = DocLevelQuery(query = "type.properties:\"abcd\"", name = "2", fields = listOf()) + val q3 = DocLevelQuery(query = "type.something.properties:\"abcd\"", name = "3", fields = listOf()) + val q4 = DocLevelQuery(query = "type.something.properties.lastone:\"abcd\"", name = "4", fields = listOf()) + + createIndex(index1, Settings.EMPTY) + createIndex(index2, Settings.EMPTY) + createIndex(index3, Settings.EMPTY) + createIndex(index4, Settings.EMPTY) + + val m1 = """{ + "properties": { + "properties": { + "type": "keyword" + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index1).source(m1, XContentType.JSON)).get() + + val m2 = """{ + "properties": { + "type": { + "properties": { + "properties": { "type": "keyword" } + } + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index2).source(m2, XContentType.JSON)).get() + + val m3 = """{ + "properties": { + "type": { + "properties": { + "something": { + "properties" : { + "properties": { "type": "keyword" } + } + } + } + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index3).source(m3, XContentType.JSON)).get() + + val m4 = """{ + "properties": { + "type": { + "properties": { + "something": { + "properties" : { + "properties": { + "properties": { + "lastone": { "type": "keyword" } + } + } + } + } + } + } + } + } + """.trimIndent() + client().admin().indices().putMapping(PutMappingRequest(index4).source(m4, XContentType.JSON)).get() + + val docLevelInput = DocLevelMonitorInput( + "description", + listOf(index1, index2, index3, index4), + listOf(q1, q2, q3, q4) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + + val testDoc1 = """{ + "properties": "abcd" + }""" + indexDoc(index1, "1", testDoc1) + val testDoc2 = """{ + "type.properties": "abcd" + }""" + indexDoc(index2, "1", testDoc2) + val testDoc3 = """{ + "type.something.properties": "abcd" + }""" + indexDoc(index3, "1", testDoc3) + val testDoc4 = """{ + "type.something.properties.lastone": "abcd" + }""" + indexDoc(index4, "1", testDoc4) + + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 4, findings.size) + } + + fun `test execute monitor without triggers`() { + val docQuery = DocLevelQuery(query = "eventType:\"login\"", name = "3", fields = listOf()) + + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + val testDoc = """{ + "eventType" : "login" + }""" + indexDoc(index, "1", testDoc) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + // Execute dry run first and expect no alerts or findings + var executeMonitorResponse = executeMonitor(monitor, id, true) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.isEmpty()) + var findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 0, findings.size) + + // Execute real run - expect findings, but no alerts + executeMonitorResponse = executeMonitor(monitor, id, false) + + searchAlerts(id) + table = Table("asc", "id", null, 1, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.isEmpty()) + + findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertEquals("Didn't match query", 1, findings[0].docLevelQueries.size) + } + + fun `test execute monitor with custom query index`() { + val q1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val q2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val q3 = DocLevelQuery(query = "source.ip.v4.v0:120", name = "5", fields = listOf()) + val q4 = DocLevelQuery(query = "alias.some.fff:\"us-west-2\"", name = "6", fields = listOf()) + val q5 = DocLevelQuery(query = "message:\"This is an error from IAD region\"", name = "7", fields = listOf()) + val q6 = DocLevelQuery(query = "f1.type.f4:\"hello\"", name = "8", fields = listOf()) + val q7 = DocLevelQuery(query = "f1.type.f2.f3:\"world\"", name = "9", fields = listOf()) + val q8 = DocLevelQuery(query = "type:\"some type\"", name = "10", fields = listOf()) + val q9 = DocLevelQuery(query = "properties:123", name = "11", fields = listOf()) + + val docLevelInput = DocLevelMonitorInput( + "description", + listOf(index), + listOf(q1, q2, q3, q4, q5, q6, q7, q8, q9) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + // Trying to test here few different "nesting" situations and "wierd" characters + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v1" : 12345, + "source.ip.v6.v2" : 16645, + "source.ip.v4.v0" : 120, + "test_bad_char" : "\u0000", + "test_strict_date_time" : "$testTime", + "test_field.some_other_field" : "us-west-2", + "f1.type.f2.f3" : "world", + "f1.type.f4" : "hello", + "type" : "some type", + "properties": 123 + }""" + indexDoc(index, "1", testDoc) + client().admin().indices().putMapping( + PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") + ) + val mappings = "{\"properties\":{\"type\":{\"type\":\"text\",\"fields\":{\"keyword\":{\"type\":\"keyword\"," + + "\"ignore_above\":256}}},\"query\":{\"type\":\"text\"}}}" + val mappingsResp = client().admin().indices().putMapping( + PutMappingRequest(index).source(mappings, XContentType.JSON) + ).get() + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertEquals("Didn't match all 9 queries", 9, findings[0].docLevelQueries.size) + } + + fun `test execute monitor with non-flattened json doc as source`() { + val docQuery1 = DocLevelQuery(query = "source.device.port:12345 OR source.device.hwd.id:12345", name = "3", fields = listOf()) + + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + + val mappings = """{ + "properties": { + "source.device.port": { "type": "long" }, + "source.device.hwd.id": { "type": "long" }, + "nested_field": { + "type": "nested", + "properties": { + "test1": { + "type": "keyword" + } + } + }, + "my_join_field": { + "type": "join", + "relations": { + "question": "answer" + } + } + } + }""" + + client().admin().indices().putMapping(PutMappingRequest(index).source(mappings, XContentType.JSON)).get() + val getFieldCapabilitiesResp = client().fieldCaps(FieldCapabilitiesRequest().indices(index).fields("*")).get() + assertTrue(getFieldCapabilitiesResp.getField("source").containsKey("object")) + assertTrue(getFieldCapabilitiesResp.getField("source.device").containsKey("object")) + assertTrue(getFieldCapabilitiesResp.getField("source.device.hwd").containsKey("object")) + // testing both, nested and flatten documents + val testDocuments = mutableListOf() + testDocuments += """{ + "source" : { "device": {"port" : 12345 } }, + "nested_field": { "test1": "some text" } + }""" + testDocuments += """{ + "source.device.port" : "12345" + }""" + testDocuments += """{ + "source.device.port" : 12345 + }""" + testDocuments += """{ + "source" : { "device": {"hwd": { "id": 12345 } } } + }""" + testDocuments += """{ + "source.device.hwd.id" : 12345 + }""" + // Document with join field + testDocuments += """{ + "source" : { "device" : { "hwd": { "id" : 12345 } } }, + "my_join_field": { "name": "question" } + }""" + // Checking if these pointless but valid documents cause any issues + testDocuments += """{ + "source" : {} + }""" + testDocuments += """{ + "source.device" : null + }""" + testDocuments += """{ + "source.device" : {} + }""" + testDocuments += """{ + "source.device.hwd" : {} + }""" + testDocuments += """{ + "source.device.hwd.id" : null + }""" + testDocuments += """{ + "some.multi.val.field" : [12345, 10, 11] + }""" + // Insert all documents + for (i in testDocuments.indices) { + indexDoc(index, "$i", testDocuments[i]) + } + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 6, findings.size) + assertEquals("Didn't match query", 1, findings[0].docLevelQueries.size) + } + + fun `test execute monitor with custom query index old`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docQuery3 = DocLevelQuery(query = "source.ip.v4.v0:120", name = "5", fields = listOf()) + val docQuery4 = DocLevelQuery(query = "alias.some.fff:\"us-west-2\"", name = "6", fields = listOf()) + val docQuery5 = DocLevelQuery(query = "message:\"This is an error from IAD region\"", name = "7", fields = listOf()) + val docQuery6 = DocLevelQuery(query = "type.subtype:\"some subtype\"", name = "8", fields = listOf()) + val docQuery7 = DocLevelQuery(query = "supertype.type:\"some type\"", name = "9", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1, docQuery2, docQuery3, docQuery4, docQuery5, docQuery6, docQuery7) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + // Trying to test here few different "nesting" situations and "wierd" characters + val testDoc = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v1" : 12345, + "source.ip.v6.v2" : 16645, + "source.ip.v4.v0" : 120, + "test_bad_char" : "\u0000", + "test_strict_date_time" : "$testTime", + "test_field.some_other_field" : "us-west-2", + "type.subtype" : "some subtype", + "supertype.type" : "some type" + }""" + indexDoc(index, "1", testDoc) + client().admin().indices().putMapping( + PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") + ) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertEquals("Didn't match all 7 queries", 7, findings[0].docLevelQueries.size) + } + + fun `test monitor error alert created and updated with new error`() { + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val testDoc = """{ + "message" : "This is an error from IAD region" + }""" + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + + // Close index to force error alert + client().admin().indices().close(CloseIndexRequest(index)).get() + + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") + // Reopen index + client().admin().indices().open(OpenIndexRequest(index)).get() + // Close queryIndex + client().admin().indices().close(CloseIndexRequest(DOC_LEVEL_QUERIES_INDEX + INDEX_PATTERN_SUFFIX)).get() + + indexDoc(index, "1", testDoc) + + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertTrue(getAlertsResponse.alerts[0].errorHistory[0].message == "IndexClosedException[closed]") + Assert.assertEquals(1, getAlertsResponse.alerts[0].errorHistory.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Failed to run percolate search")) + } + + fun `test monitor error alert created trigger run errored 2 times same error`() { + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = Script("invalid script code")) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Trigger errors")) + + val oldLastNotificationTime = getAlertsResponse.alerts[0].lastNotificationTime + + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + Assert.assertEquals(0, getAlertsResponse.alerts[0].errorHistory.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Trigger errors")) + Assert.assertTrue(getAlertsResponse.alerts[0].lastNotificationTime!!.isAfter(oldLastNotificationTime)) + } + + fun `test monitor error alert cleared after successful monitor run`() { + val customAlertIndex = "custom-alert-index" + val customAlertHistoryIndex = "custom-alert-history-index" + val customAlertHistoryIndexPattern = "" + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndex = customAlertHistoryIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern + ) + ) + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + + // Close index to force error alert + client().admin().indices().close(CloseIndexRequest(index)).get() + + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + searchAlerts(id) + var table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(1, getAlertsResponse.alerts.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") + Assert.assertNull(getAlertsResponse.alerts[0].endTime) + + // Open index to have monitor run successfully + client().admin().indices().open(OpenIndexRequest(index)).get() + // Execute monitor again and expect successful run + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Verify that alert is moved to history index + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(0, getAlertsResponse.alerts.size) + + table = Table("asc", "id", null, 10, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertHistoryIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(1, getAlertsResponse.alerts.size) + Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") + Assert.assertNotNull(getAlertsResponse.alerts[0].endTime) + } + + fun `test multiple monitor error alerts cleared after successful monitor run`() { + val customAlertIndex = "custom-alert-index" + val customAlertHistoryIndex = "custom-alert-history-index" + val customAlertHistoryIndexPattern = "" + val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndex = customAlertHistoryIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern + ) + ) + + val monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + monitor = monitorResponse!!.monitor + val monitorId = monitorResponse.id + + // Close index to force error alert + client().admin().indices().close(CloseIndexRequest(index)).get() + + var executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) + // Create 10 old alerts to simulate having "old error alerts"(2.6) + for (i in 1..10) { + val startTimestamp = Instant.now().minusSeconds(3600 * 24 * i.toLong()).toEpochMilli() + val oldErrorAlertAsString = """ + {"id":"$i","version":-1,"monitor_id":"$monitorId", + "schema_version":4,"monitor_version":1,"monitor_name":"geCNcHKTlp","monitor_user":{"name":"","backend_roles":[], + "roles":[],"custom_attribute_names":[],"user_requested_tenant":null},"trigger_id":"_nnk_YcB5pHgSZwYwO2r", + "trigger_name":"NoOp trigger","finding_ids":[],"related_doc_ids":[],"state":"ERROR","error_message":"some monitor error", + "alert_history":[],"severity":"","action_execution_results":[], + "start_time":$startTimestamp,"last_notification_time":$startTimestamp,"end_time":null,"acknowledged_time":null} + """.trimIndent() + + client().index( + IndexRequest(customAlertIndex) + .id("$i") + .routing(monitorId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .source(oldErrorAlertAsString, XContentType.JSON) + ).get() + } + var table = Table("asc", "id", null, 1000, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertIndex)) + .get() + + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(1 + 10, getAlertsResponse.alerts.size) + val newErrorAlert = getAlertsResponse.alerts.firstOrNull { it.errorMessage == "IndexClosedException[closed]" } + Assert.assertNotNull(newErrorAlert) + Assert.assertNull(newErrorAlert!!.endTime) + + // Open index to have monitor run successfully + client().admin().indices().open(OpenIndexRequest(index)).get() + // Execute monitor again and expect successful run + executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Verify that alert is moved to history index + table = Table("asc", "id", null, 1000, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(0, getAlertsResponse.alerts.size) + + table = Table("asc", "id", null, 1000, 0, "") + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertHistoryIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertEquals(11, getAlertsResponse.alerts.size) + getAlertsResponse.alerts.forEach { alert -> assertNotNull(alert.endTime) } + } + + fun `test execute monitor with custom query index and nested mappings`() { + val docQuery1 = DocLevelQuery(query = "message:\"msg 1 2 3 4\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + + // We are verifying here that index with nested mappings and nested aliases + // won't break query matching + + // Create index mappings + val m: MutableMap = HashMap() + val m1: MutableMap = HashMap() + m1["title"] = Map.of("type", "text") + m1["category"] = Map.of("type", "keyword") + m["rule"] = Map.of("type", "nested", "properties", m1) + val properties = Map.of("properties", m) + + client().admin().indices().putMapping( + PutMappingRequest( + index + ).source(properties) + ).get() + + // Put alias for nested fields + val mm: MutableMap = HashMap() + val mm1: MutableMap = HashMap() + mm1["title_alias"] = Map.of("type", "alias", "path", "rule.title") + mm["rule"] = Map.of("type", "nested", "properties", mm1) + val properties1 = Map.of("properties", mm) + client().admin().indices().putMapping( + PutMappingRequest( + index + ).source(properties1) + ).get() + + val testDoc = """{ + "rule": {"title": "some_title"}, + "message": "msg 1 2 3 4" + }""" + indexDoc(index, "2", testDoc) + + client().admin().indices().putMapping( + PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") + ) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val findings = searchFindings(id, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + assertEquals("Didn't match all 4 queries", 1, findings[0].docLevelQueries.size) + } + + fun `test cleanup monitor on partial create monitor failure`() { + val docQuery = DocLevelQuery(query = "dnbkjndsfkjbnds:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_alerts_index" + val analyzer = "dfbdfbafd" + val testDoc = """{ + "rule": {"title": "some_title"}, + "message": "msg 1 2 3 4" + }""" + indexDoc(index, "2", testDoc) + client().admin().indices() + .create( + CreateIndexRequest(customQueryIndex + "-000001").alias(Alias(customQueryIndex)) + .mapping( + """ + { + "_meta": { + "schema_version": 1 + }, + "properties": { + "query": { + "type": "percolator_ext" + }, + "monitor_id": { + "type": "text" + }, + "index": { + "type": "text" + } + } + } + """.trimIndent() + ) + ).get() + + client().admin().indices().close(CloseIndexRequest(customQueryIndex + "-000001")).get() + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + try { + createMonitor(monitor) + fail("monitor creation should fail due to incorrect analyzer name in test setup") + } catch (e: Exception) { + Assert.assertEquals(client().search(SearchRequest(SCHEDULED_JOBS_INDEX)).get().hits.hits.size, 0) + } + } + + fun `test execute monitor without create when no monitors exists`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_alerts_index" + val analyzer = "whitespace" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + var executeMonitorResponse = executeMonitor(monitor, null) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + assertIndexNotExists(SCHEDULED_JOBS_INDEX) + + val createMonitorResponse = createMonitor(monitor) + + assertIndexExists(SCHEDULED_JOBS_INDEX) + + indexDoc(index, "1", testDoc) + + executeMonitorResponse = executeMonitor(monitor, createMonitorResponse?.id, dryRun = false) + + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + Assert.assertEquals( + (executeMonitorResponse.monitorRunResult.triggerResults.iterator().next().value as DocumentLevelTriggerRunResult) + .triggeredDocs.size, + 1 + ) + } + + fun `test execute monitor with custom query index and custom field mappings`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_alerts_index" + val analyzer = "whitespace" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + val mapping = client().admin().indices().getMappings(GetMappingsRequest().indices(customQueryIndex)).get() + Assert.assertTrue(mapping.toString().contains("\"analyzer\":\"$analyzer\"")) + } + + fun `test delete monitor deletes all queries and metadata too`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customQueryIndex = "custom_query_index" + val analyzer = "whitespace" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val clusterStateResponse = client().admin().cluster().state(ClusterStateRequest().indices(customQueryIndex).metadata(true)).get() + val mapping = client().admin().indices().getMappings(GetMappingsRequest().indices(customQueryIndex)).get() + Assert.assertTrue(mapping.toString().contains("\"analyzer\":\"$analyzer\"") == true) + // Verify queries exist + var searchResponse = client().search( + SearchRequest(customQueryIndex).source(SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + ).get() + assertNotEquals(0, searchResponse.hits.hits.size) + + deleteMonitor(monitorId) + assertIndexNotExists(customQueryIndex + "*") + assertAliasNotExists(customQueryIndex) + } + + fun `test execute monitor with custom findings index and pattern`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + + indexDoc(index, "2", testDoc) + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("2")) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute monitor with multiple indices in input success`() { + + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + + createIndex(testSourceIndex1, Settings.EMPTY) + createIndex(testSourceIndex2, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + indexDoc(testSourceIndex1, "1", testDoc) + indexDoc(testSourceIndex2, "1", testDoc) + + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + var foundFindings = findings.filter { it.relatedDocIds.contains("1") } + assertEquals("Didn't find 2 findings", 2, foundFindings.size) + + indexDoc(testSourceIndex1, "2", testDoc) + indexDoc(testSourceIndex2, "2", testDoc) + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 4, findings.size) + foundFindings = findings.filter { it.relatedDocIds.contains("2") } + assertEquals("Didn't find 2 findings", 2, foundFindings.size) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute monitor with multiple indices in input first index gets deleted`() { + // Index #1 does not exist + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + + createIndex(testSourceIndex1, Settings.EMPTY) + createIndex(testSourceIndex2, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + indexDoc(testSourceIndex2, "1", testDoc) + + client().admin().indices().delete(DeleteIndexRequest(testSourceIndex1)).get() + + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 1, findings.size) + var foundFindings = findings.filter { it.relatedDocIds.contains("1") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + indexDoc(testSourceIndex2, "2", testDoc) + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + foundFindings = findings.filter { it.relatedDocIds.contains("2") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute monitor with multiple indices in input second index gets deleted`() { + // Second index does not exist + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + + createIndex(testSourceIndex1, Settings.EMPTY) + createIndex(testSourceIndex2, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) + ) + val monitorResponse = createMonitor(monitor) + client().admin().indices().refresh(RefreshRequest("*")) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + indexDoc(testSourceIndex1, "1", testDoc) + + client().admin().indices().delete(DeleteIndexRequest(testSourceIndex2)).get() + + val id = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + var findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 1, findings.size) + var foundFindings = findings.filter { it.relatedDocIds.contains("1") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + indexDoc(testSourceIndex1, "2", testDoc) + + executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(id) + findings = searchFindings(id, "custom_findings_index*", true) + assertEquals("Findings saved for test monitor", 2, findings.size) + foundFindings = findings.filter { it.relatedDocIds.contains("2") } + assertEquals("Didn't find 2 findings", 1, foundFindings.size) + + val indices = getAllIndicesFromPattern("custom_findings_index*") + Assert.assertTrue(indices.isNotEmpty()) + } + + fun `test execute pre-existing monitor and update`() { + val request = CreateIndexRequest(SCHEDULED_JOBS_INDEX).mapping(ScheduledJobIndices.scheduledJobMappings()) + .settings(Settings.builder().put("index.hidden", true).build()) + client().admin().indices().create(request) + val monitorStringWithoutName = """ + { + "monitor": { + "type": "monitor", + "schema_version": 0, + "name": "UayEuXpZtb", + "monitor_type": "doc_level_monitor", + "user": { + "name": "", + "backend_roles": [], + "roles": [], + "custom_attribute_names": [], + "user_requested_tenant": null + }, + "enabled": true, + "enabled_time": 1662753436791, + "schedule": { + "period": { + "interval": 5, + "unit": "MINUTES" + } + }, + "inputs": [{ + "doc_level_input": { + "description": "description", + "indices": [ + "$index" + ], + "queries": [{ + "id": "63efdcce-b5a1-49f4-a25f-6b5f9496a755", + "name": "3", + "query": "test_field:\"us-west-2\"", + "tags": [] + }] + } + }], + "triggers": [{ + "document_level_trigger": { + "id": "OGnTI4MBv6qt0ATc9Phk", + "name": "mrbHRMevYI", + "severity": "1", + "condition": { + "script": { + "source": "return true", + "lang": "painless" + } + }, + "actions": [] + } + }], + "last_update_time": 1662753436791 + } + } + """.trimIndent() + val monitorId = "abc" + indexDoc(SCHEDULED_JOBS_INDEX, monitorId, monitorStringWithoutName) + val getMonitorResponse = getMonitorResponse(monitorId) + Assert.assertNotNull(getMonitorResponse) + Assert.assertNotNull(getMonitorResponse.monitor) + val monitor = getMonitorResponse.monitor + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(index, "1", testDoc) + var executeMonitorResponse = executeMonitor(monitor!!, monitorId, false) + Assert.assertNotNull(executeMonitorResponse) + if (executeMonitorResponse != null) { + Assert.assertNotNull(executeMonitorResponse.monitorRunResult.monitorName) + } + val alerts = searchAlerts(monitorId) + assertEquals(1, alerts.size) + + val customAlertsIndex = "custom_alerts_index" + val customQueryIndex = "custom_query_index" + Assert.assertFalse(client().admin().cluster().state(ClusterStateRequest()).get().state.routingTable.hasIndex(customQueryIndex)) + val customFindingsIndex = "custom_findings_index" + val updateMonitorResponse = updateMonitor( + monitor.copy( + id = monitorId, + owner = "security_analytics_plugin", + dataSources = DataSources( + alertsIndex = customAlertsIndex, + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex + ) + ), + monitorId + ) + Assert.assertNotNull(updateMonitorResponse) + Assert.assertEquals(updateMonitorResponse!!.monitor.owner, "security_analytics_plugin") + indexDoc(index, "2", testDoc) + if (updateMonitorResponse != null) { + executeMonitorResponse = executeMonitor(updateMonitorResponse.monitor, monitorId, false) + } + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + val customAlertsIndexAlerts = searchAlerts(monitorId, customAlertsIndex) + assertEquals("Alert saved for test monitor", 1, customAlertsIndexAlerts.size) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX) + var searchMonitorResponse = + client().execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, SearchMonitorRequest(searchRequest)) + .get() + Assert.assertEquals(searchMonitorResponse.hits.hits.size, 0) + searchRequest.source().query(MatchQueryBuilder("monitor.owner", "security_analytics_plugin")) + searchMonitorResponse = + client().execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, SearchMonitorRequest(searchRequest)) + .get() + Assert.assertEquals(searchMonitorResponse.hits.hits.size, 1) + } + + fun `test execute pre-existing monitor without triggers`() { + val request = CreateIndexRequest(SCHEDULED_JOBS_INDEX).mapping(ScheduledJobIndices.scheduledJobMappings()) + .settings(Settings.builder().put("index.hidden", true).build()) + client().admin().indices().create(request) + val monitorStringWithoutName = """ + { + "monitor": { + "type": "monitor", + "schema_version": 0, + "name": "UayEuXpZtb", + "monitor_type": "doc_level_monitor", + "user": { + "name": "", + "backend_roles": [], + "roles": [], + "custom_attribute_names": [], + "user_requested_tenant": null + }, + "enabled": true, + "enabled_time": 1662753436791, + "schedule": { + "period": { + "interval": 5, + "unit": "MINUTES" + } + }, + "inputs": [{ + "doc_level_input": { + "description": "description", + "indices": [ + "$index" + ], + "queries": [{ + "id": "63efdcce-b5a1-49f4-a25f-6b5f9496a755", + "name": "3", + "query": "test_field:\"us-west-2\"", + "tags": [] + }] + } + }], + "triggers": [], + "last_update_time": 1662753436791 + } + } + """.trimIndent() + val monitorId = "abc" + indexDoc(SCHEDULED_JOBS_INDEX, monitorId, monitorStringWithoutName) + val getMonitorResponse = getMonitorResponse(monitorId) + Assert.assertNotNull(getMonitorResponse) + Assert.assertNotNull(getMonitorResponse.monitor) + val monitor = getMonitorResponse.monitor + + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + indexDoc(index, "1", testDoc) + var executeMonitorResponse = executeMonitor(monitor!!, monitorId, false) + Assert.assertNotNull(executeMonitorResponse) + if (executeMonitorResponse != null) { + Assert.assertNotNull(executeMonitorResponse.monitorRunResult.monitorName) + } + val alerts = searchAlerts(monitorId) + assertEquals(0, alerts.size) + + val findings = searchFindings(monitorId) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + } + + fun `test execute monitor with empty source index`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + val monitorId = monitorResponse.id + var executeMonitorResponse = executeMonitor(monitor, monitorId, false) + + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + + refreshIndex(customFindingsIndex) + + var findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 0, findings.size) + + indexDoc(index, "1", testDoc) + + executeMonitor(monitor, monitorId, false) + + refreshIndex(customFindingsIndex) + + findings = searchFindings(monitorId, customFindingsIndex) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + } + + fun `test execute GetFindingsAction with monitorId param`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + // fetch findings - pass monitorId as reference to finding_index + val findingsFromAPI = getFindings(findings.get(0).id, monitorId, null) + assertEquals( + "Findings mismatch between manually searched and fetched via GetFindingsAction", + findings.get(0).id, + findingsFromAPI.get(0).id + ) + } + + fun `test execute GetFindingsAction with unknown monitorId`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + // fetch findings - don't send monitorId or findingIndexName. It should fall back to hardcoded finding index name + try { + getFindings(findings.get(0).id, "unknown_monitor_id_123456789", null) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("Monitor not found") + ) + } + } + } + + fun `test execute monitor with owner field`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(alertsIndex = customAlertsIndex), + owner = "owner" + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + Assert.assertEquals(monitor.owner, "owner") + indexDoc(index, "1", testDoc) + val id = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + val alerts = searchAlerts(id, customAlertsIndex) + assertEquals("Alert saved for test monitor", 1, alerts.size) + val table = Table("asc", "id", null, 1, 0, "") + var getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + getAlertsResponse = client() + .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) + .get() + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 1) + } + + fun `test execute GetFindingsAction with unknown findingIndex param`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources(findingsIndex = customFindingsIndex) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + searchAlerts(monitorId) + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + // fetch findings - don't send monitorId or findingIndexName. It should fall back to hardcoded finding index name + try { + getFindings(findings.get(0).id, null, "unknown_finding_index_123456789") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("no such index") + ) + } + } + } + + fun `test search custom alerts history index`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger1, trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + var alertsBefore = searchAlerts(monitorId, customAlertsIndex) + Assert.assertEquals(2, alertsBefore.size) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) + // Remove 1 trigger from monitor to force moveAlerts call to move alerts to history index + monitor = monitor.copy(triggers = listOf(trigger1)) + updateMonitor(monitor, monitorId) + + var alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsHistoryIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 1, alerts.size) + } + + fun `test search custom alerts history index after alert ack`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger1, trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + indexDoc(index, "1", testDoc) + val monitorId = monitorResponse.id + val executeMonitorResponse = executeMonitor(monitor, monitorId, false) + var alertsBefore = searchAlerts(monitorId, customAlertsIndex) + Assert.assertEquals(2, alertsBefore.size) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) + + var alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom index", 2, alerts.size) + + val ackReq = AcknowledgeAlertRequest(monitorId, alerts.map { it.id }.toMutableList(), WriteRequest.RefreshPolicy.IMMEDIATE) + client().execute(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, ackReq).get() + + // verify alerts moved from alert index to alert history index + alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsHistoryIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 2, alerts.size) + + // verify alerts deleted from alert index + alerts = listOf() + OpenSearchTestCase.waitUntil({ + alerts = searchAlerts(monitorId, customAlertsIndex) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 30, TimeUnit.SECONDS) + assertEquals("Alerts from custom history index", 0, alerts.size) + } + + fun `test get alerts by list of monitors containing both existent and non-existent ids`() { + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + val monitorResponse = createMonitor(monitor) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_strict_date_time" : "$testTime", + "test_field" : "us-west-2" + }""" + + monitor = monitorResponse!!.monitor + + val id = monitorResponse.id + + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + val monitorResponse1 = createMonitor(monitor1) + monitor1 = monitorResponse1!!.monitor + val id1 = monitorResponse1.id + indexDoc(index, "1", testDoc) + executeMonitor(monitor1, id1, false) + executeMonitor(monitor, id, false) + val alerts = searchAlerts(id) + assertEquals("Alert saved for test monitor", 1, alerts.size) + val alerts1 = searchAlerts(id) + assertEquals("Alert saved for test monitor", 1, alerts1.size) + val table = Table("asc", "id", null, 1000, 0, "") + var getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null) + ) + .get() + + Assert.assertTrue(getAlertsResponse != null) + Assert.assertTrue(getAlertsResponse.alerts.size == 2) + + var alertsResponseForRequestWithoutCustomIndex = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, monitorIds = listOf(id, id1, "1", "2")) + ) + .get() + Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex != null) + Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex.alerts.size == 2) + val alertIds = getAlertsResponse.alerts.stream().map { alert -> alert.id }.collect(Collectors.toList()) + var getAlertsByAlertIds = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = alertIds) + ) + .get() + Assert.assertTrue(getAlertsByAlertIds != null) + Assert.assertTrue(getAlertsByAlertIds.alerts.size == 2) + + var getAlertsByWrongAlertIds = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = listOf("1", "2")) + ) + .get() + + Assert.assertTrue(getAlertsByWrongAlertIds != null) + Assert.assertEquals(getAlertsByWrongAlertIds.alerts.size, 0) + } + + fun `test queryIndex rollover and delete monitor success`() { + + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create close to 1000 (limit) fields in index mapping. It's easier to add mappings like this then via api + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..3300) { + docPayload.append(""" "id$i.somefield.somefield$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor #1 + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + // Execute monitor #1 + var executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Create monitor #2 + var monitorResponse2 = createMonitor(monitor) + assertFalse(monitorResponse2?.id.isNullOrEmpty()) + monitor = monitorResponse2!!.monitor + // Insert doc #2. This one should trigger creation of alerts during monitor exec + val testDoc = """{ + "test_field" : "us-west-2" + }""" + indexDoc(testSourceIndex, "2", testDoc) + // Execute monitor #2 + var executeMonitorResponse2 = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse2!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse2.monitorRunResult.triggerResults.size, 1) + + refreshIndex(AlertIndices.ALERT_INDEX) + var alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 1) + + // Both monitors used same queryIndex alias. Since source index has close to limit amount of fields in mappings, + // we expect that creation of second monitor would trigger rollover of queryIndex + var getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(2, getIndexResponse.indices.size) + assertEquals(DOC_LEVEL_QUERIES_INDEX + "-000001", getIndexResponse.indices[0]) + assertEquals(DOC_LEVEL_QUERIES_INDEX + "-000002", getIndexResponse.indices[1]) + // Now we'll verify that execution of both monitors still works + indexDoc(testSourceIndex, "3", testDoc) + // Exec Monitor #1 + executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + // Exec Monitor #2 + executeMonitorResponse = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + // Delete monitor #1 + client().execute( + AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorResponse.id, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + // Expect first concrete queryIndex to be deleted since that one was only used by this monitor + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(1, getIndexResponse.indices.size) + assertEquals(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "-000002", getIndexResponse.indices[0]) + // Delete monitor #2 + client().execute( + AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorResponse2.id, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + // Expect second concrete queryIndex to be deleted since that one was only used by this monitor + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(0, getIndexResponse.indices.size) + } + + fun `test queryIndex rollover failure source_index field count over limit`() { + + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create 999 fields in mapping, only 1 field less then limit + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..998) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor and expect failure. + // queryIndex has 3 fields in mappings initially so 999 + 3 > 1000(default limit) + try { + createMonitor(monitor) + } catch (e: Exception) { + assertTrue(e.message?.contains("can't process index [$testSourceIndex] due to field mapping limit") ?: false) + } + } + + fun `test queryIndex not rolling over multiple monitors`() { + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // Create doc with 11 fields + val docPayload: StringBuilder = StringBuilder(1000) + docPayload.append("{") + for (i in 1..10) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor #1 + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + // Execute monitor #1 + var executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + // Create monitor #2 + var monitorResponse2 = createMonitor(monitor) + assertFalse(monitorResponse2?.id.isNullOrEmpty()) + monitor = monitorResponse2!!.monitor + // Insert doc #2. This one should trigger creation of alerts during monitor exec + val testDoc = """{ + "test_field" : "us-west-2" + }""" + indexDoc(testSourceIndex, "2", testDoc) + // Execute monitor #2 + var executeMonitorResponse2 = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse2!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse2.monitorRunResult.triggerResults.size, 1) + + refreshIndex(AlertIndices.ALERT_INDEX) + var alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 1) + + // Both monitors used same queryIndex. Since source index has well below limit amount of fields in mappings, + // we expect only 1 backing queryIndex + val getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(1, getIndexResponse.indices.size) + // Now we'll verify that execution of both monitors work + indexDoc(testSourceIndex, "3", testDoc) + // Exec Monitor #1 + executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + // Exec Monitor #2 + executeMonitorResponse = executeMonitor(monitor, monitorResponse2.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + alerts = searchAlerts(monitorResponse2.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 2) + } + + /** + * 1. Create monitor with input source_index with 9000 fields in mappings - can fit 1 in queryIndex + * 2. Update monitor and change input source_index to a new one with 9000 fields in mappings + * 3. Expect queryIndex rollover resulting in 2 backing indices + * 4. Delete monitor and expect that all backing indices are deleted + * */ + fun `test updating monitor no execution queryIndex rolling over`() { + val testSourceIndex1 = "test_source_index1" + val testSourceIndex2 = "test_source_index2" + createIndex(testSourceIndex1, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + createIndex(testSourceIndex2, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create close to 10000 (limit) fields in index mapping. It's easier to add mappings like this then via api + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..9000) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + // Indexing docs here as an easier means to set index mappings + indexDoc(testSourceIndex1, "1", docPayload.toString()) + indexDoc(testSourceIndex2, "1", docPayload.toString()) + // Create monitor + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + // Update monitor and change input + val updatedMonitor = monitor.copy( + inputs = listOf( + DocLevelMonitorInput("description", listOf(testSourceIndex2), listOf(docQuery)) + ) + ) + updateMonitor(updatedMonitor, updatedMonitor.id) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + + // Expect queryIndex to rollover after setting new source_index with close to limit amount of fields in mappings + var getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(2, getIndexResponse.indices.size) + + deleteMonitor(updatedMonitor.id) + waitUntil { + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + return@waitUntil getIndexResponse.indices.isEmpty() + } + assertEquals(0, getIndexResponse.indices.size) + } + + fun `test queryIndex gets increased max fields in mappings`() { + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create 12000 fields in index mapping. It's easier to add mappings like this then via api + val docPayload: StringBuilder = StringBuilder(100000) + docPayload.append("{") + for (i in 1..9998) { + docPayload.append(""" "id$i":$i,""") + } + docPayload.append("\"test_field\" : \"us-west-2\" }") + // Indexing docs here as an easier means to set index mappings + indexDoc(testSourceIndex, "1", docPayload.toString()) + // Create monitor + var monitorResponse = createMonitor(monitor) + assertFalse(monitorResponse?.id.isNullOrEmpty()) + monitor = monitorResponse!!.monitor + + // Expect queryIndex to rollover after setting new source_index with close to limit amount of fields in mappings + var getIndexResponse: GetIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + assertEquals(1, getIndexResponse.indices.size) + val field_max_limit = getIndexResponse + .getSetting(DOC_LEVEL_QUERIES_INDEX + "-000001", MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key).toInt() + + assertEquals(10000 + DocLevelMonitorQueries.QUERY_INDEX_BASE_FIELDS_COUNT, field_max_limit) + + deleteMonitor(monitorResponse.id) + waitUntil { + getIndexResponse = + client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() + return@waitUntil getIndexResponse.indices.isEmpty() + } + assertEquals(0, getIndexResponse.indices.size) + } + + fun `test queryIndex bwc when index was not an alias`() { + createIndex(DOC_LEVEL_QUERIES_INDEX, Settings.builder().put("index.hidden", true).build()) + assertIndexExists(DOC_LEVEL_QUERIES_INDEX) + + val testSourceIndex = "test_source_index" + createIndex(testSourceIndex, Settings.EMPTY) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + // This doc should create 999 fields in mapping, only 1 field less then limit + val docPayload = "{\"test_field\" : \"us-west-2\" }" + // Create monitor + try { + var monitorResponse = createMonitor(monitor) + indexDoc(testSourceIndex, "1", docPayload) + var executeMonitorResponse = executeMonitor(monitor, monitorResponse!!.id, false) + Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) + Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) + refreshIndex(AlertIndices.ALERT_INDEX) + val alerts = searchAlerts(monitorResponse.id) + Assert.assertTrue(alerts != null) + Assert.assertTrue(alerts.size == 1) + // check if DOC_LEVEL_QUERIES_INDEX alias exists + assertAliasExists(DOC_LEVEL_QUERIES_INDEX) + } catch (e: Exception) { + fail("Exception happend but it shouldn't!") + } + } + + // TODO - revisit single node integ tests setup to figure out why we cannot have multiple test classes implementing it + + fun `test execute workflow with custom alerts and finding index when bucket monitor is used in chained finding of doc monitor`() { + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong + // to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + val bucketCustomAlertsIndex = "custom_alerts_index" + val bucketCustomFindingsIndex = "custom_findings_index" + val bucketCustomFindingsIndexPattern = "custom_findings_index-1" + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = bucketCustomAlertsIndex, + findingsIndex = bucketCustomFindingsIndex, + findingsIndexPattern = bucketCustomFindingsIndexPattern + ) + ) + )!! + + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_1\"", name = "2", fields = listOf()) + val docQuery3 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2, docQuery3)) + val docTrigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val docCustomAlertsIndex = "custom_alerts_index" + val docCustomFindingsIndex = "custom_findings_index" + val docCustomFindingsIndexPattern = "custom_findings_index-1" + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(docTrigger), + dataSources = DataSources( + alertsIndex = docCustomAlertsIndex, + findingsIndex = docCustomFindingsIndex, + findingsIndexPattern = docCustomFindingsIndexPattern + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, docLevelMonitorResponse.id), + enabled = false, + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + if (bucketLevelMonitorResponse.monitor.name == monitorRunResults.monitorName) { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 3, buckets.size) + + val getAlertsResponse = assertAlerts(bucketLevelMonitorResponse.id, bucketCustomAlertsIndex, 2, workflowId) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings(bucketLevelMonitorResponse.id, bucketCustomFindingsIndex, 1, 4, listOf("1", "2", "3", "4")) + } else { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("1", "2", "3", "4") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = assertAlerts(docLevelMonitorResponse.id, docCustomAlertsIndex, 4, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings(docLevelMonitorResponse.id, docCustomFindingsIndex, 4, 4, listOf("1", "2", "3", "4")) + } + } + } + + fun `test execute workflow with custom alerts and finding index when doc level delegate is used in chained finding`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "2", fields = listOf()) + + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + )!! + + var docLevelMonitor1 = randomDocumentLevelMonitor( + // Match the documents with test_field_1: test_value_3 + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index_1", + findingsIndex = "custom_findings_index_1", + findingsIndexPattern = "custom_findings_index_1-1" + ) + ) + + val docLevelMonitorResponse1 = createMonitor(docLevelMonitor1)!! + + val queryMonitorInput = SearchInput( + indices = listOf(index), + query = SearchSourceBuilder().query( + QueryBuilders + .rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + ) + ) + val queryTriggerScript = """ + return ctx.results[0].hits.hits.size() > 0 + """.trimIndent() + + val queryLevelTrigger = randomQueryLevelTrigger(condition = Script(queryTriggerScript)) + val queryMonitorResponse = + createMonitor(randomQueryLevelMonitor(inputs = listOf(queryMonitorInput), triggers = listOf(queryLevelTrigger)))!! + + // 1. docMonitor (chainedFinding = null) 2. bucketMonitor (chainedFinding = docMonitor) 3. docMonitor (chainedFinding = bucketMonitor) 4. queryMonitor (chainedFinding = docMonitor 3) + var workflow = randomWorkflow( + monitorIds = listOf( + docLevelMonitorResponse.id, + bucketLevelMonitorResponse.id, + docLevelMonitorResponse1.id, + queryMonitorResponse.id + ), + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. Doc level monitor should reduce the doc findings to 4 (3 - test_value_2, 4 - test_value_2, 5 - test_value_3, 6 - test_value_3) + // 2. Bucket level monitor will match the fetch the docs from current findings execution, although it contains rules for matching documents which has test_value_2 and test value_3 + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + when (monitorRunResults.monitorName) { + // Verify first doc level monitor execution, alerts and findings + docLevelMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("3", "4", "5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts(docLevelMonitorResponse.id, docLevelMonitorResponse.monitor.dataSources.alertsIndex, 4, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings( + docLevelMonitorResponse.id, + docLevelMonitorResponse.monitor.dataSources.findingsIndex, + 4, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify second bucket level monitor execution, alerts and findings + bucketLevelMonitorResponse.monitor.name -> { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = + searchResult + .stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 2, buckets.size) + + val getAlertsResponse = + assertAlerts( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.alertsIndex, + 2, + workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.findingsIndex, + 1, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify third doc level monitor execution, alerts and findings + docLevelMonitorResponse1.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts(docLevelMonitorResponse1.id, docLevelMonitorResponse1.monitor.dataSources.alertsIndex, 2, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse1.id, 2) + assertFindings( + docLevelMonitorResponse1.id, + docLevelMonitorResponse1.monitor.dataSources.findingsIndex, + 2, + 2, + listOf("5", "6") + ) + } + // Verify fourth query level monitor execution + queryMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val totalHits = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] as kotlin.collections.Map + )["total"] as kotlin.collections.Map + )["value"] + assertEquals(2, totalHits) + @Suppress("UNCHECKED_CAST") + val docIds = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] as kotlin.collections.Map + )["hits"] as List> + ).map { it["_id"]!! } + assertEquals(listOf("5", "6"), docIds.sorted()) + } + } + } + } + + private fun assertAlerts( + monitorId: String, + customAlertsIndex: String, + alertSize: Int, + workflowId: String, + ): GetAlertsResponse { + val table = Table("asc", "id", null, alertSize, 0, "") + val getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest( + table, "ALL", "ALL", monitorId, customAlertsIndex, + workflowIds = listOf(workflowId) + ) + ) + .get() + assertTrue(getAlertsResponse != null) + assertTrue(getAlertsResponse.alerts.size == alertSize) + return getAlertsResponse + } + + fun `test execute workflow with custom alerts and finding index with doc level delegates`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex1 = "custom_alerts_index" + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex2 = "custom_alerts_index_2" + val customFindingsIndex2 = "custom_findings_index_2" + val customFindingsIndexPattern2 = "custom_findings_index-2" + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + dataSources = DataSources( + alertsIndex = customAlertsIndex2, + findingsIndex = customFindingsIndex2, + findingsIndexPattern = customFindingsIndexPattern2 + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Doesn't match + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + assertEquals(monitor1.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) + Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) + + val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex1, alertSize = 2, workflowId = workflowId) + assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + + val getAlertsResponse2 = assertAlerts(monitorResponse2.id, customAlertsIndex2, alertSize = 1, workflowId = workflowId) + assertAcknowledges(getAlertsResponse2.alerts, monitorResponse2.id, 1) + assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) + } + + fun `test execute workflow with multiple monitors in chained monitor findings of single monitor`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex1 = "custom_alerts_index" + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + enabled = false, + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + enabled = false, + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val docQuery3 = DocLevelQuery(query = "_id:*", name = "5", fields = listOf()) + val docLevelInput3 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery3)) + val trigger3 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + var monitor3 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput3), + triggers = listOf(trigger3), + enabled = false, + dataSources = DataSources( + alertsIndex = customAlertsIndex1, + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + + val monitorResponse3 = createMonitor(monitor3)!! + val d1 = Delegate(1, monitorResponse.id) + val d2 = Delegate(2, monitorResponse2.id) + val d3 = Delegate( + 3, monitorResponse3.id, + ChainedMonitorFindings(null, listOf(monitorResponse.id, monitorResponse2.id)) + ) + var workflow = Workflow( + id = "", + name = "test", + enabled = false, + schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + lastUpdateTime = Instant.now(), + enabledTime = null, + workflowType = Workflow.WorkflowType.COMPOSITE, + user = randomUser(), + inputs = listOf(CompositeInput(org.opensearch.commons.alerting.model.Sequence(listOf(d1, d2, d3)))), + version = -1L, + schemaVersion = 0, + triggers = emptyList(), + auditDelegateMonitorAlerts = false + + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(3, monitorsRunResults.size) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + assertFindings(monitorResponse2.id, customFindingsIndex1, 2, 2, listOf("2", "3")) + assertFindings(monitorResponse3.id, customFindingsIndex1, 3, 3, listOf("1", "2", "3")) + } + + fun `test execute workflows with shared doc level monitor delegate`() { + val docQuery = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customAlertsIndex = "custom_alerts_index" + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertsIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var workflow1 = randomWorkflow( + monitorIds = listOf(monitorResponse.id), + auditDelegateMonitorAlerts = false + ) + val workflowResponse1 = upsertWorkflow(workflow1)!! + val workflowById1 = searchWorkflow(workflowResponse1.id) + assertNotNull(workflowById1) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults.size) + + assertEquals(monitor.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + // Assert and not ack the alerts (in order to verify later on that all the alerts are generated) + assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId) + assertFindings(monitorResponse.id, customFindingsIndex, 2, 2, listOf("1", "2")) + // Verify workflow and monitor delegate metadata + val workflowMetadata = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse.workflowRunResult.executionId, + workflowMetadata!!.latestExecutionId + ) + val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata = searchMonitorMetadata(monitorMetadataId) + assertNotNull(monitorMetadata) + + // Execute second workflow + val workflowId1 = workflowResponse1.id + val executeWorkflowResponse1 = executeWorkflow(workflowById1, workflowId1, false)!! + val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults1.size) + + assertEquals(monitor.name, monitorsRunResults1[0].monitorName) + assertEquals(1, monitorsRunResults1[0].triggerResults.size) + + val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId1) + assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) + assertFindings(monitorResponse.id, customFindingsIndex, 4, 4, listOf("1", "2", "1", "2")) + // Verify workflow and monitor delegate metadata + val workflowMetadata1 = searchWorkflowMetadata(id = workflowId1) + assertNotNull("Workflow metadata not initialized", workflowMetadata1) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse1.workflowRunResult.executionId, + workflowMetadata1!!.latestExecutionId + ) + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + // Verify that for two workflows two different doc level monitor metadata has been created + assertTrue("Different monitor is used in workflows", monitorMetadata!!.monitorId == monitorMetadata1!!.monitorId) + assertTrue(monitorMetadata.id != monitorMetadata1.id) + } + + fun `test execute workflows with shared doc level monitor delegate updating delegate datasource`() { + val docQuery = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + var monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + val workflow1 = randomWorkflow( + monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse1 = upsertWorkflow(workflow1)!! + val workflowById1 = searchWorkflow(workflowResponse1.id) + assertNotNull(workflowById1) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults.size) + + assertEquals(monitor.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + assertAlerts(monitorResponse.id, AlertIndices.ALERT_INDEX, alertSize = 2, workflowId) + assertFindings(monitorResponse.id, AlertIndices.FINDING_HISTORY_WRITE_INDEX, 2, 2, listOf("1", "2")) + // Verify workflow and monitor delegate metadata + val workflowMetadata = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse.workflowRunResult.executionId, + workflowMetadata!!.latestExecutionId + ) + val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata = searchMonitorMetadata(monitorMetadataId) + assertNotNull(monitorMetadata) + + val customAlertsIndex = "custom_alerts_index" + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val monitorId = monitorResponse.id + updateMonitor( + monitor = monitor.copy( + dataSources = DataSources( + alertsIndex = customAlertsIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ), + monitorId + ) + + // Execute second workflow + val workflowId1 = workflowResponse1.id + val executeWorkflowResponse1 = executeWorkflow(workflowById1, workflowId1, false)!! + val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults + assertEquals(1, monitorsRunResults1.size) + + assertEquals(monitor.name, monitorsRunResults1[0].monitorName) + assertEquals(1, monitorsRunResults1[0].triggerResults.size) + + // Verify alerts for the custom index + val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId1) + assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) + assertFindings(monitorResponse.id, customFindingsIndex, 2, 2, listOf("1", "2")) + + // Verify workflow and monitor delegate metadata + val workflowMetadata1 = searchWorkflowMetadata(id = workflowId1) + assertNotNull("Workflow metadata not initialized", workflowMetadata1) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse1.workflowRunResult.executionId, + workflowMetadata1!!.latestExecutionId + ) + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + // Verify that for two workflows two different doc level monitor metadata has been created + assertTrue("Different monitor is used in workflows", monitorMetadata!!.monitorId == monitorMetadata1!!.monitorId) + assertTrue(monitorMetadata.id != monitorMetadata1.id) + } + + fun `test execute workflow verify workflow metadata`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + // First execution + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + val workflowMetadata = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse.workflowRunResult.executionId, + workflowMetadata!!.latestExecutionId + ) + val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata = searchMonitorMetadata(monitorMetadataId) + assertNotNull(monitorMetadata) + + // Second execution + val executeWorkflowResponse1 = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults1.size) + + val workflowMetadata1 = searchWorkflowMetadata(id = workflowId) + assertNotNull("Workflow metadata not initialized", workflowMetadata) + assertEquals( + "Workflow metadata execution id not correct", + executeWorkflowResponse1.workflowRunResult.executionId, + workflowMetadata1!!.latestExecutionId + ) + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) + assertTrue(monitorMetadataId == monitorMetadataId1) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + } + + fun `test execute workflow dryrun verify workflow metadata not created`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + // First execution + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, true) + + assertNotNull("Workflow run result is null", executeWorkflowResponse) + val monitorsRunResults = executeWorkflowResponse!!.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + var exception: java.lang.Exception? = null + try { + searchWorkflowMetadata(id = workflowId) + } catch (ex: java.lang.Exception) { + exception = ex + assertTrue(exception is java.util.NoSuchElementException) + } + } + + fun `test execute workflow with custom alerts and finding index with bucket and doc monitor bucket monitor used as chained finding`() { + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + val bucketCustomAlertsIndex = "custom_alerts_index" + val bucketCustomFindingsIndex = "custom_findings_index" + val bucketCustomFindingsIndexPattern = "custom_findings_index-1" + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = bucketCustomAlertsIndex, + findingsIndex = bucketCustomFindingsIndex, + findingsIndexPattern = bucketCustomFindingsIndexPattern + ) + ) + )!! + + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_1\"", name = "2", fields = listOf()) + val docQuery3 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2, docQuery3)) + val docTrigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val docCustomAlertsIndex = "custom_alerts_index" + val docCustomFindingsIndex = "custom_findings_index" + val docCustomFindingsIndexPattern = "custom_findings_index-1" + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(docTrigger), + dataSources = DataSources( + alertsIndex = docCustomAlertsIndex, + findingsIndex = docCustomFindingsIndex, + findingsIndexPattern = docCustomFindingsIndexPattern + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, docLevelMonitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + if (bucketLevelMonitorResponse.monitor.name == monitorRunResults.monitorName) { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 3, buckets.size) + + val getAlertsResponse = assertAlerts(bucketLevelMonitorResponse.id, bucketCustomAlertsIndex, alertSize = 2, workflowId) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings(bucketLevelMonitorResponse.id, bucketCustomFindingsIndex, 1, 4, listOf("1", "2", "3", "4")) + } else { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("1", "2", "3", "4") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = assertAlerts(docLevelMonitorResponse.id, docCustomAlertsIndex, alertSize = 4, workflowId) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings(docLevelMonitorResponse.id, docCustomFindingsIndex, 4, 4, listOf("1", "2", "3", "4")) + } + } + } + + fun `test chained alerts for bucket level monitors generating audit alerts custom alerts index`() { + val customAlertIndex = "custom-alert-index" + val customAlertHistoryIndex = "custom-alert-history-index" + val customAlertHistoryIndexPattern = "" + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern, + alertsHistoryIndex = customAlertHistoryIndex + + ) + ) + )!! + + val bucketLevelMonitorResponse2 = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + alertsIndex = customAlertIndex, + alertsHistoryIndexPattern = customAlertHistoryIndexPattern, + alertsHistoryIndex = customAlertHistoryIndex + + ) + ) + )!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${bucketLevelMonitorResponse.id}] && monitor[id=${bucketLevelMonitorResponse2.id}]") + ) + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, bucketLevelMonitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.isNotEmpty()) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) + + val auditStateAlerts = getAuditStateAlerts( + alertsIndex = customAlertHistoryIndex, + monitorId = bucketLevelMonitorResponse.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts.size, 2) + + val auditStateAlerts2 = getAuditStateAlerts( + alertsIndex = customAlertHistoryIndex, + monitorId = bucketLevelMonitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts2.size, 2) + } + + fun `test chained alerts for bucket level monitors generating audit alerts`() { + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger) + ) + )!! + + val bucketLevelMonitorResponse2 = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger) + ) + )!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${bucketLevelMonitorResponse.id}] && monitor[id=${bucketLevelMonitorResponse2.id}]") + ) + // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) + var workflow = randomWorkflow( + monitorIds = listOf(bucketLevelMonitorResponse.id, bucketLevelMonitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) + // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.isNotEmpty()) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) + + val auditStateAlerts = getAuditStateAlerts( + alertsIndex = bucketLevelMonitorResponse.monitor.dataSources.alertsHistoryIndex, + monitorId = bucketLevelMonitorResponse.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts.size, 2) + + val auditStateAlerts2 = getAuditStateAlerts( + alertsIndex = bucketLevelMonitorResponse.monitor.dataSources.alertsHistoryIndex, + monitorId = bucketLevelMonitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + Assert.assertEquals(auditStateAlerts2.size, 2) + } + + fun `test execute with custom alerts and finding index with bucket and doc monitor when doc monitor is used in chained finding`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) + val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "2", fields = listOf()) + + var docLevelMonitor = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + + val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null, + ) + ) + + val bucketLevelMonitorResponse = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index", + findingsIndex = "custom_findings_index", + findingsIndexPattern = "custom_findings_index-1" + ) + ) + )!! + + var docLevelMonitor1 = randomDocumentLevelMonitor( + // Match the documents with test_field_1: test_value_3 + inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery2))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), + dataSources = DataSources( + findingsEnabled = true, + alertsIndex = "custom_alerts_index_1", + findingsIndex = "custom_findings_index_1", + findingsIndexPattern = "custom_findings_index_1-1" + ) + ) + + val docLevelMonitorResponse1 = createMonitor(docLevelMonitor1)!! + + val queryMonitorInput = SearchInput( + indices = listOf(index), + query = SearchSourceBuilder().query( + QueryBuilders + .rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + ) + ) + val queryTriggerScript = """ + return ctx.results[0].hits.hits.size() > 0 + """.trimIndent() + + val queryLevelTrigger = randomQueryLevelTrigger(condition = Script(queryTriggerScript)) + val queryMonitorResponse = + createMonitor(randomQueryLevelMonitor(inputs = listOf(queryMonitorInput), triggers = listOf(queryLevelTrigger)))!! + + // 1. docMonitor (chainedFinding = null) 2. bucketMonitor (chainedFinding = docMonitor) 3. docMonitor (chainedFinding = bucketMonitor) 4. queryMonitor (chainedFinding = docMonitor 3) + var workflow = randomWorkflow( + monitorIds = listOf( + docLevelMonitorResponse.id, + bucketLevelMonitorResponse.id, + docLevelMonitorResponse1.id, + queryMonitorResponse.id + ), + auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + // Creates 5 documents + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2", + "test_value_2", + "test_value_3", + "test_value_3" + ) + ) + + val workflowId = workflowResponse.id + // 1. Doc level monitor should reduce the doc findings to 4 (3 - test_value_2, 4 - test_value_2, 5 - test_value_3, 6 - test_value_3) + // 2. Bucket level monitor will match the fetch the docs from current findings execution, although it contains rules for matching documents which has test_value_2 and test value_3 + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertNotNull(executeWorkflowResponse) + + for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { + when (monitorRunResults.monitorName) { + // Verify first doc level monitor execution, alerts and findings + docLevelMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("3", "4", "5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts( + docLevelMonitorResponse.id, + docLevelMonitorResponse.monitor.dataSources.alertsIndex, + alertSize = 4, + workflowId = workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) + assertFindings( + docLevelMonitorResponse.id, + docLevelMonitorResponse.monitor.dataSources.findingsIndex, + 4, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify second bucket level monitor execution, alerts and findings + bucketLevelMonitorResponse.monitor.name -> { + val searchResult = monitorRunResults.inputResults.results.first() + + @Suppress("UNCHECKED_CAST") + val buckets = + searchResult + .stringMap("aggregations")?.stringMap("composite_agg") + ?.get("buckets") as List> + assertEquals("Incorrect search result", 2, buckets.size) + + val getAlertsResponse = + assertAlerts( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.alertsIndex, + alertSize = 2, + workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) + assertFindings( + bucketLevelMonitorResponse.id, + bucketLevelMonitorResponse.monitor.dataSources.findingsIndex, + 1, + 4, + listOf("3", "4", "5", "6") + ) + } + // Verify third doc level monitor execution, alerts and findings + docLevelMonitorResponse1.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult + val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } + val expectedTriggeredDocIds = listOf("5", "6") + assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) + + val getAlertsResponse = + assertAlerts( + docLevelMonitorResponse1.id, + docLevelMonitorResponse1.monitor.dataSources.alertsIndex, + alertSize = 2, + workflowId + ) + assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse1.id, 2) + assertFindings( + docLevelMonitorResponse1.id, + docLevelMonitorResponse1.monitor.dataSources.findingsIndex, + 2, + 2, + listOf("5", "6") + ) + } + // Verify fourth query level monitor execution + queryMonitorResponse.monitor.name -> { + assertEquals(1, monitorRunResults.inputResults.results.size) + val values = monitorRunResults.triggerResults.values + assertEquals(1, values.size) + @Suppress("UNCHECKED_CAST") + val totalHits = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] + as kotlin.collections.Map + )["total"] as kotlin.collections.Map + )["value"] + assertEquals(2, totalHits) + @Suppress("UNCHECKED_CAST") + val docIds = + ( + ( + monitorRunResults.inputResults.results[0]["hits"] + as kotlin.collections.Map + )["hits"] as List> + ) + .map { it["_id"]!! } + assertEquals(listOf("5", "6"), docIds.sorted()) + } + } + } + } + + fun `test execute workflow input error`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + deleteIndex(index) + + val response = executeWorkflow(workflowById, workflowById!!.id, false)!! + val error = response.workflowRunResult.monitorRunResults[0].error + assertNotNull(error) + assertTrue(error is AlertingException) + assertEquals(RestStatus.INTERNAL_SERVER_ERROR, (error as AlertingException).status) + assertTrue(error.message!!.contains("no such index [$index]")) + } + + fun `test execute workflow wrong workflow id`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + var exception: java.lang.Exception? = null + val badWorkflowId = getWorkflowResponse.id + "bad" + try { + executeWorkflow(id = badWorkflowId) + } catch (ex: java.lang.Exception) { + exception = ex + } + assertTrue(exception is ExecutionException) + assertTrue(exception!!.cause is AlertingException) + assertEquals(RestStatus.NOT_FOUND, (exception.cause as AlertingException).status) + assertEquals("Can't find workflow with id: $badWorkflowId", exception.cause!!.message) + } + + private fun assertFindings( + monitorId: String, + customFindingsIndex: String, + findingSize: Int, + matchedQueryNumber: Int, + relatedDocIds: List, + ) { + val findings = searchFindings(monitorId, customFindingsIndex) + assertEquals("Findings saved for test monitor", findingSize, findings.size) + + val findingDocIds = findings.flatMap { it.relatedDocIds } + + assertEquals("Didn't match $matchedQueryNumber query", matchedQueryNumber, findingDocIds.size) + assertTrue("Findings saved for test monitor", relatedDocIds.containsAll(findingDocIds)) + } + + private fun getAuditStateAlerts( + alertsIndex: String? = AlertIndices.ALERT_INDEX, + monitorId: String, + executionId: String? = null, + ): List { + val searchRequest = SearchRequest(alertsIndex) + val boolQueryBuilder = QueryBuilders.boolQuery() + boolQueryBuilder.must(TermQueryBuilder("monitor_id", monitorId)) + if (executionId.isNullOrEmpty() == false) + boolQueryBuilder.must(TermQueryBuilder("execution_id", executionId)) + searchRequest.source().query(boolQueryBuilder) + val searchResponse = client().search(searchRequest).get() + return searchResponse.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry(), LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + } + + private fun assertAlerts( + monitorId: String, + alertsIndex: String? = AlertIndices.ALERT_INDEX, + executionId: String? = null, + alertSize: Int, + workflowId: String, + ): GetAlertsResponse { + val alerts = searchAlerts(monitorId, alertsIndex!!, executionId = executionId) + assertEquals("Alert saved for test monitor", alertSize, alerts.size) + val table = Table("asc", "id", null, alertSize, 0, "") + var getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", null, alertsIndex) + ) + .get() + assertTrue(getAlertsResponse != null) + assertTrue(getAlertsResponse.alerts.size == alertSize) + getAlertsResponse = client() + .execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest(table, "ALL", "ALL", monitorId, null, workflowIds = listOf(workflowId)) + ) + .get() + assertTrue(getAlertsResponse != null) + assertTrue(getAlertsResponse.alerts.size == alertSize) + + return getAlertsResponse + } + + private fun assertAcknowledges( + alerts: List, + monitorId: String, + alertSize: Int, + ) { + val alertIds = alerts.map { it.id } + val acknowledgeAlertResponse = client().execute( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, + AcknowledgeAlertRequest(monitorId, alertIds, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + + assertEquals(alertSize, acknowledgeAlertResponse.acknowledged.size) + } + + private fun verifyAcknowledgeChainedAlerts( + alerts: List, + workflowId: String, + alertSize: Int, + ) { + val alertIds = alerts.map { it.id }.toMutableList() + val acknowledgeAlertResponse = ackChainedAlerts(alertIds, workflowId) + assertTrue(acknowledgeAlertResponse.acknowledged.stream().map { it.id }.collect(Collectors.toList()).containsAll(alertIds)) + assertEquals(alertSize, acknowledgeAlertResponse.acknowledged.size) + alertIds.add("dummy") + val redundantAck = ackChainedAlerts(alertIds, workflowId) + Assert.assertTrue(redundantAck.acknowledged.isEmpty()) + Assert.assertTrue(redundantAck.missing.contains("dummy")) + alertIds.remove("dummy") + Assert.assertTrue(redundantAck.failed.map { it.id }.toList().containsAll(alertIds)) + } + + private fun ackChainedAlerts(alertIds: List, workflowId: String): AcknowledgeAlertResponse { + + return client().execute( + AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, + AcknowledgeChainedAlertRequest(workflowId, alertIds) + ).get() + } + + private fun assertAuditStateAlerts( + monitorId: String, + alerts: List, + ) { + alerts.forEach { Assert.assertEquals(it.state, Alert.State.AUDIT) } + val alertIds = alerts.stream().map { it.id }.collect(Collectors.toList()) + val ack = client().execute( + AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, + AcknowledgeAlertRequest(monitorId, alertIds, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + Assert.assertTrue(ack.acknowledged.isEmpty()) + Assert.assertTrue(ack.missing.containsAll(alertIds)) + Assert.assertTrue(ack.failed.isEmpty()) + } + + fun `test execute workflow with bucket-level and doc-level chained monitors`() { + createTestIndex(TEST_HR_INDEX) + + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field_1").field("test_field_1") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) + ) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ), + actions = listOf() + ) + val bucketMonitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger) + ) + ) + assertNotNull("The bucket monitor was not created", bucketMonitor) + + val docQuery1 = DocLevelQuery(query = "test_field_1:\"a\"", name = "3", fields = listOf()) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), listOf(docQuery1))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) + ) + val docMonitor = createMonitor(monitor1)!! + assertNotNull("The doc level monitor was not created", docMonitor) + + val workflow = randomWorkflow(monitorIds = listOf(bucketMonitor!!.id, docMonitor.id)) + val workflowResponse = upsertWorkflow(workflow) + assertNotNull("The workflow was not created", workflowResponse) + + // Add a doc that is accessible to the user + indexDoc( + TEST_HR_INDEX, + "1", + """ + { + "test_field_1": "a", + "accessible": true + } + """.trimIndent() + ) + + // Add a second doc that is not accessible to the user + indexDoc( + TEST_HR_INDEX, + "2", + """ + { + "test_field_1": "b", + "accessible": false + } + """.trimIndent() + ) + + indexDoc( + TEST_HR_INDEX, + "3", + """ + { + "test_field_1": "c", + "accessible": true + } + """.trimIndent() + ) + + val executeResult = executeWorkflow(id = workflowResponse!!.id) + assertNotNull(executeResult) + assertEquals(2, executeResult!!.workflowRunResult.monitorRunResults.size) + } + + fun `test chained alerts for AND OR and NOT conditions with custom alerts indices`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + val customAlertsIndex = "custom_alerts_index" + val customAlertsHistoryIndex = "custom_alerts_history_index" + val customAlertsHistoryIndexPattern = "" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + dataSources = DataSources( + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1, + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex2 = "custom_findings_index_2" + val customFindingsIndexPattern2 = "custom_findings_index-2" + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + dataSources = DataSources( + findingsIndex = customFindingsIndex2, + findingsIndexPattern = customFindingsIndexPattern2, + alertsIndex = customAlertsIndex, + alertsHistoryIndex = customAlertsHistoryIndex, + alertsHistoryIndexPattern = customAlertsHistoryIndexPattern + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger, notTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + val workflowId = workflowResponse.id + + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + var andTriggerResult = triggerResults[andTrigger.id] + var notTriggerResult = triggerResults[notTrigger.id] + Assert.assertTrue(notTriggerResult!!.triggered) + Assert.assertFalse(andTriggerResult!!.triggered) + var res = + getWorkflowAlerts(workflowId = workflowId, alertIndex = customAlertsIndex, associatedAlertsIndex = customAlertsHistoryIndex) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + Assert.assertTrue(res.associatedAlerts.isEmpty()) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == notTrigger.id) + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Doesn't match + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + andTriggerResult = triggerResults[andTrigger.id] + notTriggerResult = triggerResults[notTrigger.id] + Assert.assertFalse(notTriggerResult!!.triggered) + Assert.assertTrue(andTriggerResult!!.triggered) + res = getWorkflowAlerts(workflowId, alertIndex = customAlertsIndex, associatedAlertsIndex = customAlertsHistoryIndex) + chainedAlerts = res.alerts + val numChainedAlerts = 1 + Assert.assertTrue(res.associatedAlerts.isNotEmpty()) + Assert.assertTrue(chainedAlerts.size == numChainedAlerts) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == andTrigger.id) + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + assertEquals(monitor1.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) + Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) + + Assert.assertEquals( + monitor1.dataSources.alertsHistoryIndex, + CompositeWorkflowRunner.getDelegateMonitorAlertIndex(dataSources = monitor1.dataSources, workflow, true) + ) + val alerts = getAuditStateAlerts( + monitorId = monitorResponse.id, executionId = executeWorkflowResponse.workflowRunResult.executionId, + alertsIndex = monitor1.dataSources.alertsHistoryIndex, + ) + assertAuditStateAlerts(monitorResponse.id, alerts) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + val associatedAlertIds = res.associatedAlerts.map { it.id }.toList() + associatedAlertIds.containsAll(alerts.map { it.id }.toList()) + val alerts1 = getAuditStateAlerts( + alertsIndex = monitor2.dataSources.alertsHistoryIndex, monitorId = monitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId, + ) + assertAuditStateAlerts(monitorResponse2.id, alerts1) + assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) + associatedAlertIds.containsAll(alerts1.map { it.id }.toList()) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, numChainedAlerts) + } + + fun `test chained alerts for AND OR and NOT conditions`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex1 = "custom_findings_index" + val customFindingsIndexPattern1 = "custom_findings_index-1" + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + dataSources = DataSources( + findingsIndex = customFindingsIndex1, + findingsIndexPattern = customFindingsIndexPattern1 + ) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex2 = "custom_findings_index_2" + val customFindingsIndexPattern2 = "custom_findings_index-2" + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + dataSources = DataSources( + findingsIndex = customFindingsIndex2, + findingsIndexPattern = customFindingsIndexPattern2 + ) + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger, notTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + val workflowId = workflowResponse.id + + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + var andTriggerResult = triggerResults[andTrigger.id] + var notTriggerResult = triggerResults[notTrigger.id] + Assert.assertTrue(notTriggerResult!!.triggered) + Assert.assertFalse(andTriggerResult!!.triggered) + var res = getWorkflowAlerts( + workflowId, + ) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + + // verify get alerts api with defaults set in query params returns only chained alerts and not audit alerts + val table = Table("asc", "id", null, 1, 0, "") + val getAlertsDefaultParamsResponse = client().execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest( + table = table, + severityLevel = "ALL", + alertState = "ALL", + monitorId = null, + alertIndex = null, + monitorIds = null, + workflowIds = null, + alertIds = null + ) + ).get() + Assert.assertEquals(getAlertsDefaultParamsResponse.alerts.size, 1) + Assert.assertTrue(res.associatedAlerts.isEmpty()) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == notTrigger.id) + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 and monitor2 + val testDoc2 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "2", testDoc2) + + testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Doesn't match + val testDoc3 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16645, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-east-1" + }""" + indexDoc(index, "3", testDoc3) + executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults + Assert.assertEquals(triggerResults.size, 2) + Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) + Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) + andTriggerResult = triggerResults[andTrigger.id] + notTriggerResult = triggerResults[notTrigger.id] + Assert.assertFalse(notTriggerResult!!.triggered) + Assert.assertTrue(andTriggerResult!!.triggered) + val getAuditAlertsForMonitor1 = client().execute( + AlertingActions.GET_ALERTS_ACTION_TYPE, + GetAlertsRequest( + table = table, + severityLevel = "ALL", + alertState = "AUDIT", + monitorId = monitorResponse.id, + alertIndex = null, + monitorIds = null, + workflowIds = listOf(workflowId), + alertIds = null + ) + ).get() + Assert.assertEquals(getAuditAlertsForMonitor1.alerts.size, 1) + res = getWorkflowAlerts(workflowId) + chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + Assert.assertTrue(res.associatedAlerts.isNotEmpty()) + Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) + Assert.assertTrue(chainedAlerts[0].monitorId == "") + Assert.assertTrue(chainedAlerts[0].triggerId == andTrigger.id) + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + assertEquals(monitor1.name, monitorsRunResults[0].monitorName) + assertEquals(1, monitorsRunResults[0].triggerResults.size) + + Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) + Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) + + Assert.assertEquals( + monitor1.dataSources.alertsHistoryIndex, + CompositeWorkflowRunner.getDelegateMonitorAlertIndex(dataSources = monitor1.dataSources, workflow, true) + ) + val alerts = getAuditStateAlerts( + alertsIndex = monitor1.dataSources.alertsHistoryIndex, monitorId = monitorResponse.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + val associatedAlertIds = res.associatedAlerts.map { it.id }.toList() + associatedAlertIds.containsAll(alerts.map { it.id }.toList()) + assertAuditStateAlerts(monitorResponse.id, alerts) + assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) + + val alerts1 = getAuditStateAlerts( + alertsIndex = monitor2.dataSources.alertsHistoryIndex, monitorId = monitorResponse2.id, + executionId = executeWorkflowResponse.workflowRunResult.executionId + ) + associatedAlertIds.containsAll(alerts1.map { it.id }.toList()) + assertAuditStateAlerts(monitorResponse2.id, alerts1) + assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) + verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) + // test redundant executions of workflow dont query old data again to verify metadata updation works fine + val redundantExec = executeWorkflow(workflow) + Assert.assertFalse(redundantExec?.workflowRunResult!!.triggerResults[andTrigger.id]!!.triggered) + Assert.assertTrue(redundantExec.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + } + + private fun getDelegateMonitorMetadataId( + workflowMetadata: WorkflowMetadata?, + monitorResponse: IndexMonitorResponse, + ) = "${workflowMetadata!!.id}-${monitorResponse.id}-metadata" + + fun `test create workflow success`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + val workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse1.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow add monitor success`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + var workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + val monitor3 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + val monitorResponse3 = createMonitor(monitor3)!! + + val updatedWorkflowResponse = upsertWorkflow( + randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id, monitorResponse3.id) + ), + workflowResponse.id, + RestRequest.Method.PUT + )!! + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse.workflow) + assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = searchWorkflow(updatedWorkflowResponse.id)!! + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 3, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse1.id, delegate2.chainedMonitorFindings!!.monitorId + ) + + val delegate3 = delegates[2] + assertNotNull(delegate3) + assertEquals("Delegate3 order not correct", 3, delegate3.order) + assertEquals("Delegate3 id not correct", monitorResponse3.id, delegate3.monitorId) + assertEquals( + "Delegate3 Chained finding not correct", monitorResponse2.id, delegate3.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow change order of delegate monitors`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + var workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + val updatedWorkflowResponse = upsertWorkflow( + randomWorkflow( + monitorIds = listOf(monitorResponse2.id, monitorResponse1.id) + ), + workflowResponse.id, + RestRequest.Method.PUT + )!! + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse.workflow) + assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = searchWorkflow(updatedWorkflowResponse.id)!! + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse2.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse1.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse2.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow remove monitor success`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + var workflowById = searchWorkflow(workflowResponse.id)!! + assertNotNull(workflowById) + + val updatedWorkflowResponse = upsertWorkflow( + randomWorkflow( + monitorIds = listOf(monitorResponse1.id) + ), + workflowResponse.id, + RestRequest.Method.PUT + )!! + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse.workflow) + assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = searchWorkflow(updatedWorkflowResponse.id)!! + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 1, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) + } + + fun `test update workflow doesn't exist failure`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + try { + upsertWorkflow(workflow, "testId", RestRequest.Method.PUT) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow with testId is not found") + ) + } + } + } + + fun `test get workflow`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = upsertWorkflow(workflowRequest)!! + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflowResponse.workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + assertTrue("incorrect version", workflowResponse.version > 0) + + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + assertNotNull(getWorkflowResponse) + + val workflowById = getWorkflowResponse.workflow!! + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, getWorkflowResponse.id) + assertTrue("incorrect version", getWorkflowResponse.version > 0) + assertEquals("Workflow name not correct", workflowRequest.name, workflowById.name) + assertEquals("Workflow owner not correct", workflowRequest.owner, workflowById.owner) + assertEquals("Workflow input not correct", workflowRequest.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 1, delegates.size) + + val delegate = delegates[0] + assertNotNull(delegate) + assertEquals("Delegate order not correct", 1, delegate.order) + assertEquals("Delegate id not correct", monitorResponse.id, delegate.monitorId) + } + + fun `test get workflow for invalid id monitor index doesn't exist`() { + // Get workflow for non existing workflow id + try { + getWorkflowById(id = "-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found") + ) + } + } + } + + fun `test get workflow for invalid id monitor index exists`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + createMonitor(monitor) + // Get workflow for non existing workflow id + try { + getWorkflowById(id = "-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found") + ) + } + } + } + + fun `test delete workflow keeping delegate monitor`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + deleteWorkflow(workflowId, false) + // Verify that the workflow is deleted + try { + getWorkflowById(workflowId) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + // Verify that the monitor is not deleted + val existingDelegate = getMonitorResponse(monitorResponse.id) + assertNotNull(existingDelegate) + } + + fun `test delete workflow delegate monitor deleted`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + deleteWorkflow(workflowId, true) + // Verify that the workflow is deleted + try { + getWorkflowById(workflowId) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + // Verify that the monitor is deleted + try { + getMonitorResponse(monitorResponse.id) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("Monitor not found") + ) + } + } + } + + fun `test delete executed workflow with metadata deleted`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + + val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) + val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) + val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput2), + triggers = listOf(trigger2), + ) + + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) + // Matches monitor1 + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + + val workflowId = workflowResponse.id + val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults + assertEquals(2, monitorsRunResults.size) + + val workflowMetadata = searchWorkflowMetadata(workflowId) + assertNotNull(workflowMetadata) + + val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) + val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) + assertNotNull(monitorMetadata1) + + val monitorMetadataId2 = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse2) + val monitorMetadata2 = searchMonitorMetadata(monitorMetadataId2) + assertNotNull(monitorMetadata2) + + assertFalse(monitorMetadata1!!.id == monitorMetadata2!!.id) + + deleteWorkflow(workflowId, true) + // Verify that the workflow is deleted + try { + getWorkflowById(workflowId) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + // Verify that the workflow metadata is deleted + try { + searchWorkflowMetadata(workflowId) + fail("expected searchWorkflowMetadata method to throw exception") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("List is empty") + ) + } + } + // Verify that the monitors metadata are deleted + try { + searchMonitorMetadata(monitorMetadataId1) + fail("expected searchMonitorMetadata method to throw exception") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("List is empty") + ) + } + } + + try { + searchMonitorMetadata(monitorMetadataId2) + fail("expected searchMonitorMetadata method to throw exception") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetMonitor Action error ", + it.contains("List is empty") + ) + } + } + } + + fun `test delete workflow delegate monitor part of another workflow not deleted`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + val workflowRequest2 = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse2 = upsertWorkflow(workflowRequest2)!! + val workflowId2 = workflowResponse2.id + val getWorkflowResponse2 = getWorkflowById(id = workflowResponse2.id) + + assertNotNull(getWorkflowResponse2) + assertEquals(workflowId2, getWorkflowResponse2.id) + + try { + deleteWorkflow(workflowId, true) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("[Not allowed to delete ${monitorResponse.id} monitors") + ) + } + } + val existingMonitor = getMonitorResponse(monitorResponse.id) + assertNotNull(existingMonitor) + } + + fun `test trying to delete monitor that is part of workflow sequence`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = upsertWorkflow(workflowRequest)!! + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + // Verify that the monitor can't be deleted because it's included in the workflow + try { + deleteMonitor(monitorResponse.id) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning DeleteMonitor Action error ", + it.contains("Monitor can't be deleted because it is a part of workflow(s)") + ) + } + } + } + + fun `test delete workflow for invalid id monitor index doesn't exists`() { + // Try deleting non-existing workflow + try { + deleteWorkflow("-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning DeleteWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + } + + fun `test delete workflow for invalid id monitor index exists`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + createMonitor(monitor) + // Try deleting non-existing workflow + try { + deleteWorkflow("-1") + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning DeleteWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + } + + fun `test create workflow without delegate failure`() { + val workflow = randomWorkflow( + monitorIds = Collections.emptyList() + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be empty.") + ) + } + } + } + + fun `test create workflow with 26 delegates failure`() { + val monitorsIds = mutableListOf() + for (i in 0..25) { + monitorsIds.add(UUID.randomUUID().toString()) + } + val workflow = randomWorkflow( + monitorIds = monitorsIds + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be larger then 25.") + ) + } + } + } + + fun `test update workflow without delegate failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = Collections.emptyList() + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be empty.") + ) + } + } + } + + fun `test create workflow duplicate delegate failure`() { + val workflow = randomWorkflow( + monitorIds = listOf("1", "1", "2") + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test update workflow duplicate delegate failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("1", "1", "2") + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test create workflow delegate monitor doesn't exist failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + val workflow = randomWorkflow( + monitorIds = listOf("-1", monitorResponse.id) + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test update workflow delegate monitor doesn't exist failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("-1", monitorResponse.id) + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test create workflow sequence order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test update workflow sequence order not correct failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test create workflow chained findings monitor not in sequence failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test create workflow query monitor chained findings monitor failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor)!! + + val queryMonitor = randomQueryLevelMonitor() + val queryMonitorResponse = createMonitor(queryMonitor)!! + + val workflow = randomWorkflow( + monitorIds = listOf(queryMonitorResponse.id, docMonitorResponse.id) + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Query level monitor can't be part of chained findings") + ) + } + } + } + + fun `test create workflow delegate and chained finding monitor different indices failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor)!! + + val index1 = "$index-1" + createTestIndex(index1) + + val docLevelInput1 = DocLevelMonitorInput( + "description", listOf(index1), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + + val docMonitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger) + ) + val docMonitorResponse1 = createMonitor(docMonitor1)!! + + val workflow = randomWorkflow( + monitorIds = listOf(docMonitorResponse1.id, docMonitorResponse.id) + ) + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("doesn't query all of chained findings monitor's indices") + ) + } + } + } + + fun `test create workflow when monitor index not initialized failure`() { + val delegates = listOf( + Delegate(1, "monitor-1") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Monitors not found") + ) + } + } + } + + fun `test update workflow chained findings monitor not in sequence failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test create workflow chained findings order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + fun `test update workflow chained findings order not correct failure`() { + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor)!! + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = upsertWorkflow(workflow)!! + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + upsertWorkflow(workflow) + } catch (e: Exception) { + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + fun `test create workflow with chained alert triggers`() { + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val customFindingsIndex = "custom_findings_index" + val customFindingsIndexPattern = "custom_findings_index-1" + val customQueryIndex = "custom_alerts_index" + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + dataSources = DataSources( + queryIndex = customQueryIndex, + findingsIndex = customFindingsIndex, + findingsIndexPattern = customFindingsIndexPattern + ) + ) + + val monitorResponse1 = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val chainedAlertTrigger1 = randomChainedAlertTrigger( + condition = Script("monitor[id=${monitorResponse1.id}] && monitor[id=${monitorResponse2.id}") + ) + val chainedAlertTrigger2 = randomChainedAlertTrigger( + condition = Script("monitor[id=${monitorResponse1.id}] || monitor[id=${monitorResponse2.id}]") + ) + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id), + triggers = listOf( + chainedAlertTrigger1, + chainedAlertTrigger2 + ) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id)!! + + assertEquals("Workflow input not correct", workflowById.triggers.size, 2) + assertEquals("Workflow input not correct", workflowById.triggers.get(0).name, chainedAlertTrigger1.name) + assertEquals("Workflow input not correct", workflowById.triggers.get(1).name, chainedAlertTrigger2.name) + assertEquals("Workflow input not correct", workflowById.triggers.get(0).id, chainedAlertTrigger1.id) + assertEquals("Workflow input not correct", workflowById.triggers.get(1).id, chainedAlertTrigger2.id) + assertEquals( + "Workflow input not correct", + (workflowById.triggers.get(0) as ChainedAlertTrigger).condition.idOrCode, + chainedAlertTrigger1.condition.idOrCode + ) + assertEquals( + "Workflow input not correct", + (workflowById.triggers.get(1) as ChainedAlertTrigger).condition.idOrCode, + chainedAlertTrigger2.condition.idOrCode + ) + } + + fun `test postIndex on workflow update with trigger deletion`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + val workflowId = workflowById!!.id + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var res = getWorkflowAlerts( + workflowId, + ) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + val updatedWorkflowResponse = upsertWorkflow( + workflowById.copy(triggers = listOf(notTrigger)), + workflowResponse.id, + RestRequest.Method.PUT + )!! + val updatedWorkflow = searchWorkflow(workflowResponse.id) + Assert.assertTrue(updatedWorkflow!!.triggers.size == 1) + Assert.assertTrue(updatedWorkflow.triggers[0].id == notTrigger.id) + OpenSearchTestCase.waitUntil({ + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + sr.hits.hits.size == 3 + }, 5, TimeUnit.MINUTES) + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + Assert.assertTrue(sr.hits.hits.size == 3) + val alerts = sr.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + Assert.assertTrue(alerts.stream().anyMatch { it.state == Alert.State.DELETED && chainedAlerts[0].id == it.id }) + } + + fun `test postDelete on workflow deletion`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + val workflowId = workflowById!!.id + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var res = getWorkflowAlerts( + workflowId, + ) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + val deleteRes = deleteWorkflow(workflowId, false) + logger.info(deleteRes) + OpenSearchTestCase.waitUntil({ + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + sr.hits.hits.size == 3 + }, 5, TimeUnit.MINUTES) + val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) + val sr = client().search(searchRequest).get() + Assert.assertTrue(sr.hits.hits.size == 3) + val alerts = sr.hits.map { hit -> + val xcp = XContentHelper.createParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + val alert = Alert.parse(xcp, hit.id, hit.version) + alert + } + Assert.assertTrue(alerts.stream().anyMatch { it.state == Alert.State.DELETED && chainedAlerts[0].id == it.id }) + } + + fun `test get chained alerts with alertId paginating for associated alerts`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(andTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val workflowId = workflowById!!.id + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + var i = 1 + val indexRequests = mutableListOf() + while (i++ < 300) { + indexRequests += IndexRequest(index).source(testDoc1, XContentType.JSON).id("$i").opType(DocWriteRequest.OpType.INDEX) + } + val bulkResponse: BulkResponse = + client().bulk(BulkRequest().add(indexRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).get() + if (bulkResponse.hasFailures()) { + fail("Bulk request to index to test index has failed") + } + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + var res = getWorkflowAlerts( + workflowId = workflowId + ) + Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) + var chainedAlerts = res.alerts + Assert.assertTrue(chainedAlerts.size == 1) + Assert.assertEquals(res.associatedAlerts.size, 10) + var res100to200 = getWorkflowAlerts( + workflowId = workflowId, + alertIds = listOf(res.alerts[0].id), + table = Table("asc", "monitor_id", null, 100, 100, null) + ) + Assert.assertEquals(res100to200.associatedAlerts.size, 100) + var res200to300 = getWorkflowAlerts( + workflowId = workflowId, + alertIds = listOf(res.alerts[0].id), + table = Table("asc", "monitor_id", null, 100, 201, null) + ) + Assert.assertEquals(res200to300.associatedAlerts.size, 100) + var res0to99 = getWorkflowAlerts( + workflowId = workflowId, + alertIds = listOf(res.alerts[0].id), + table = Table("asc", "monitor_id", null, 100, 0, null) + ) + Assert.assertEquals(res0to99.associatedAlerts.size, 100) + + val ids100to200 = res100to200.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) + val idsSet0to99 = res0to99.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) + val idsSet200to300 = res200to300.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) + + Assert.assertTrue(idsSet0to99.all { it !in ids100to200 }) + Assert.assertTrue(idsSet0to99.all { it !in idsSet200to300 }) + Assert.assertTrue(ids100to200.all { it !in idsSet200to300 }) + } + + fun `test existing chained alert active alert is updated on consequtive trigger condition match`() { + val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + var monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1) + ) + val monitorResponse = createMonitor(monitor1)!! + val monitorResponse2 = createMonitor(monitor2)!! + val notTrigger = randomChainedAlertTrigger( + name = "Not1OrNot2", + condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") + ) + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id), + triggers = listOf(notTrigger) + ) + val workflowResponse = upsertWorkflow(workflow)!! + val workflowById = searchWorkflow(workflowResponse.id) + val workflowId = workflowById!!.id + + /** no ACTIVE alert exists and chained alert trigger matches. Expect: new ACTIVE alert created**/ + var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val workflowAlerts = getWorkflowAlerts(workflowId) + Assert.assertTrue(workflowAlerts.alerts.size == 1) + Assert.assertEquals(workflowAlerts.alerts[0].state, Alert.State.ACTIVE) + /** ACTIVE alert exists and chained alert trigger matched again. Expect: existing alert updated and remains in ACTIVE*/ + var executeWorkflowResponse1 = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse1.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val udpdatedActiveAlerts = getWorkflowAlerts(workflowId) + Assert.assertTrue(udpdatedActiveAlerts.alerts.size == 1) + Assert.assertEquals(udpdatedActiveAlerts.alerts[0].state, Alert.State.ACTIVE) + Assert.assertTrue(udpdatedActiveAlerts.alerts[0].lastNotificationTime!! > workflowAlerts.alerts[0].lastNotificationTime!!) + + /** Acknowledge ACTIVE alert*/ + val ackChainedAlerts = ackChainedAlerts(udpdatedActiveAlerts.alerts.stream().map { it.id }.collect(Collectors.toList()), workflowId) + Assert.assertTrue(ackChainedAlerts.acknowledged.size == 1) + Assert.assertTrue(ackChainedAlerts.missing.size == 0) + Assert.assertTrue(ackChainedAlerts.failed.size == 0) + + /** ACKNOWLEDGED alert exists and chained alert trigger matched again. Expect: existing alert updated and remains ACKNOWLEDGED*/ + var executeWorkflowResponse2 = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse2.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val acknowledgedAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.ACKNOWLEDGED) + Assert.assertTrue(acknowledgedAlert.alerts.size == 1) + Assert.assertEquals(acknowledgedAlert.alerts[0].state, Alert.State.ACKNOWLEDGED) + Assert.assertTrue(acknowledgedAlert.alerts[0].lastNotificationTime!! == udpdatedActiveAlerts.alerts[0].lastNotificationTime!!) + + /** ACKNOWLEDGED alert exists and chained alert trigger NOT matched. Expect: ACKNOWLEDGD alert marked as COMPLETED**/ + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) + val testDoc1 = """{ + "message" : "This is an error from IAD region", + "source.ip.v6.v2" : 16644, + "test_strict_date_time" : "$testTime", + "test_field_1" : "us-west-2" + }""" + indexDoc(index, "1", testDoc1) + var executeWorkflowResponse3 = executeWorkflow(workflowById, workflowId, false)!! + assertFalse(executeWorkflowResponse3.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val completedAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.COMPLETED) + Assert.assertTrue(completedAlert.alerts.size == 1) + Assert.assertEquals(completedAlert.alerts[0].state, Alert.State.COMPLETED) + Assert.assertTrue(completedAlert.alerts[0].endTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) + + /** COMPLETED state alert exists and trigger matches. Expect: new ACTIVE state chaiend alert created*/ + var executeWorkflowResponse4 = executeWorkflow(workflowById, workflowId, false)!! + assertTrue(executeWorkflowResponse4.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) + val newActiveAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.ACTIVE) + Assert.assertTrue(newActiveAlert.alerts.size == 1) + Assert.assertEquals(newActiveAlert.alerts[0].state, Alert.State.ACTIVE) + Assert.assertTrue(newActiveAlert.alerts[0].lastNotificationTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) + val completedAlert1 = getWorkflowAlerts(workflowId, alertState = Alert.State.COMPLETED) + Assert.assertTrue(completedAlert1.alerts.size == 1) + Assert.assertEquals(completedAlert1.alerts[0].state, Alert.State.COMPLETED) + Assert.assertTrue(completedAlert1.alerts[0].endTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt b/alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt new file mode 100644 index 000000000..72b7c0423 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt @@ -0,0 +1,2093 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.junit.Assert +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.model.destination.CustomWebhook +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.email.Email +import org.opensearch.alerting.model.destination.email.Recipient +import org.opensearch.alerting.util.DestinationType +import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.client.ResponseException +import org.opensearch.client.WarningFailureException +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.alerts.AlertError +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Alert.State +import org.opensearch.commons.alerting.model.Alert.State.ACKNOWLEDGED +import org.opensearch.commons.alerting.model.Alert.State.ACTIVE +import org.opensearch.commons.alerting.model.Alert.State.COMPLETED +import org.opensearch.commons.alerting.model.Alert.State.ERROR +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.commons.alerting.model.action.Throttle +import org.opensearch.commons.authuser.User +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder +import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig +import org.opensearch.search.builder.SearchSourceBuilder +import java.net.URLEncoder +import java.time.Instant +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit +import java.time.temporal.ChronoUnit.DAYS +import java.time.temporal.ChronoUnit.MILLIS +import java.time.temporal.ChronoUnit.MINUTES +import kotlin.collections.HashMap + +class MonitorRunnerServiceIT : AlertingRestTestCase() { + + fun `test execute monitor with dryrun`() { + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val monitor = randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + @Suppress("UNCHECKED_CAST") val actionOutput = actionResult["output"] as Map + assertEquals("Hello ${monitor.name}", actionOutput["subject"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute monitor returns search result`() { + val testIndex = createTestIndex() + val twoMinsAgo = ZonedDateTime.now().minus(2, MINUTES).truncatedTo(MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(twoMinsAgo) + val testDoc = """{ "test_strict_date_time" : "$testTime" }""" + indexDoc(testIndex, "1", testDoc) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(query)) + val triggerScript = """ + // make sure there is exactly one hit + return ctx.results[0].hits.hits.size() == 1 + """.trimIndent() + + val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)) + val monitor = randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val total = searchResult.stringMap("hits")?.get("total") as Map + assertEquals("Incorrect search result", 1, total["value"]) + } + + fun `test execute monitor not triggered`() { + val monitor = randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN))) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + assertTrue("Unexpected trigger was run", triggerResult.objectMap("action_results").isEmpty()) + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test active alert is updated on each run`() { + val monitor = createMonitor( + randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, destinationId = createDestination().id)) + ) + ) + + executeMonitor(monitor.id) + val firstRunAlert = searchAlerts(monitor).single() + verifyAlert(firstRunAlert, monitor) + // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to + // see lastNotificationTime change. + Thread.sleep(200) + executeMonitor(monitor.id) + val secondRunAlert = searchAlerts(monitor).single() + verifyAlert(secondRunAlert, monitor) + + assertEquals("New alert was created, instead of updating existing alert.", firstRunAlert.id, secondRunAlert.id) + assertEquals("Start time shouldn't change", firstRunAlert.startTime, secondRunAlert.startTime) + assertNotEquals( + "Last notification should be different.", + firstRunAlert.lastNotificationTime, secondRunAlert.lastNotificationTime + ) + } + + fun `test execute monitor input error`() { + // use a non-existent index to trigger an input error + createIndex("foo", Settings.EMPTY) + val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + val monitor = createMonitor( + randomQueryLevelMonitor( + inputs = listOf(input), + triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN)) + ) + ) + + deleteIndex("foo") + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val inputResults = output.stringMap("input_results") + assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ERROR) + } + + fun `test execute monitor wrong monitorid`() { + // use a non-existent monitoid to trigger a 404. + createIndex("foo", Settings.EMPTY) + val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + val monitor = createMonitor( + randomQueryLevelMonitor( + inputs = listOf(input), + triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN)) + ) + ) + + var exception: ResponseException? = null + try { + executeMonitor(monitor.id + "bad") + } catch (ex: ResponseException) { + exception = ex + } + Assert.assertEquals(404, exception?.response?.statusLine?.statusCode) + } + + fun `test execute doclevel monitor without triggers success`() { + // use a non-existent monitoid to trigger a 404. + val index = "foo" + createIndex(index, Settings.EMPTY) + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "1", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf() + ) + ) + val doc = """ + { "test_field": "us-west-2" } + """.trimIndent() + indexDoc(index, "1", doc) + + val response = executeMonitor(monitor.id) + var output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + assertTrue("Unexpected monitor error message", (output["error"] as String?).isNullOrEmpty()) + assertTrue(searchFindings(monitor).size == 1) + assertTrue(searchAlerts(monitor).isEmpty()) + } + + fun `test acknowledged alert does not suppress subsequent errors`() { + val destinationId = createDestination().id + + createIndex("foo", Settings.EMPTY) + val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + val monitor = createMonitor( + randomQueryLevelMonitor( + inputs = listOf(input), + triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, destinationId = destinationId)) + ) + ) + + var response = executeMonitor(monitor.id) + + var output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + assertTrue("Unexpected monitor error message", (output["error"] as String?).isNullOrEmpty()) + val activeAlert = searchAlerts(monitor).single() + verifyAlert(activeAlert, monitor) + + // Now acknowledge the alert and delete the index to cause the next run of the monitor to fail... + acknowledgeAlerts(monitor, activeAlert) + deleteIndex("foo") + response = executeMonitor(monitor.id) + + output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val errorAlert = searchAlerts(monitor).single { it.state == ERROR } + verifyAlert(errorAlert, monitor, ERROR) + } + + fun `test acknowledged alert is not updated unnecessarily`() { + val monitor = createMonitor( + randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, destinationId = createDestination().id)) + ) + ) + executeMonitor(monitor.id) + acknowledgeAlerts(monitor, searchAlerts(monitor).single()) + val acknowledgedAlert = searchAlerts(monitor).single() + verifyAlert(acknowledgedAlert, monitor, ACKNOWLEDGED) + + // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to + // let lastNotificationTime change. W/o this sleep the test can result in a false negative. + Thread.sleep(200) + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val currentAlert = searchAlerts(monitor).single() + assertEquals("Acknowledged alert was updated when nothing changed", currentAlert, acknowledgedAlert) + for (triggerResult in output.objectMap("trigger_results").values) { + assertTrue("Action run when alert is acknowledged.", triggerResult.objectMap("action_results").isEmpty()) + } + } + + fun `test alert completion`() { + val trigger = randomQueryLevelTrigger(condition = Script("ctx.alert == null"), destinationId = createDestination().id) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + val activeAlert = searchAlerts(monitor).single() + verifyAlert(activeAlert, monitor) + + executeMonitor(monitor.id) + assertTrue("There's still an active alert", searchAlerts(monitor, AlertIndices.ALERT_INDEX).isEmpty()) + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() + verifyAlert(completedAlert, monitor, COMPLETED) + } + + fun `test execute monitor script error`() { + // This painless script should cause a syntax error + val trigger = randomQueryLevelTrigger(condition = Script("foo bar baz")) + val monitor = randomQueryLevelMonitor(triggers = listOf(trigger)) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + assertTrue("Missing trigger error message", (triggerResult["error"] as String).isNotEmpty()) + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute action template error`() { + // Intentional syntax error in mustache template + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) + val monitor = randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) + ) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute monitor search with period`() { + // We cant query .opendistro-alerting-config as its system index. Create a test index starting with "." + val testIndex = createTestConfigIndex() + val fiveDaysAgo = ZonedDateTime.now().minus(5, DAYS).truncatedTo(MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(fiveDaysAgo) + val testDoc = """{ "test_strict_date_time" : "$testTime" }""" + indexDoc(testIndex, "1", testDoc) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val input = SearchInput(indices = listOf(".*"), query = SearchSourceBuilder().query(query)) + val triggerScript = """ + // make sure there is at least one monitor + return ctx.results[0].hits.hits.size() > 0 + """.trimIndent() + val destinationId = createDestination().id + val trigger = randomQueryLevelTrigger(condition = Script(triggerScript), destinationId = destinationId) + val monitor = createMonitor(randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger))) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val triggerResult = output.objectMap("trigger_results").objectMap(trigger.id) + assertEquals(true, triggerResult["triggered"].toString().toBoolean()) + assertTrue("Unexpected trigger error message", triggerResult["error"]?.toString().isNullOrEmpty()) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor) + } + + fun `test execute monitor search with period date math`() { + val testIndex = createTestIndex() + val fiveDaysAgo = ZonedDateTime.now().minus(5, DAYS).truncatedTo(MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(fiveDaysAgo) + val testDoc = """{ "test_strict_date_time" : "$testTime" }""" + indexDoc(testIndex, "1", testDoc) + + // Queries that use period_start/end should expect these values to always be formatted as 'epoch_millis'. Either + // the query should specify the format (like below) or the mapping for the index/field being queried should allow + // epoch_millis as an alternative (OpenSearch's default mapping for date fields "strict_date_optional_time||epoch_millis") + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(query)) + val triggerScript = """ + // make sure there is exactly one hit + return ctx.results[0].hits.hits.size() == 1 + """.trimIndent() + val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)) + val monitor = randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) + + val response = executeMonitor(monitor) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + val triggerResult = output.objectMap("trigger_results").objectMap(trigger.id) + assertEquals(true, triggerResult["triggered"].toString().toBoolean()) + assertTrue("Unexpected trigger error message", triggerResult["error"]?.toString().isNullOrEmpty()) + assertNotEquals("period incorrect", output["period_start"], output["period_end"]) + + // Don't expect any alerts for this monitor as it has not been saved + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor", 0, alerts.size) + } + + fun `test execute monitor search with date math`() { + // Give the index name in the date math format. + val testIndex = "" + // Add percent encoding for the http client to resolve the format. + val encodedTestIndex = createTestIndex( + URLEncoder.encode(testIndex, "utf-8") + ) + + val fiveDaysAgo = ZonedDateTime.now().minus(5, DAYS).truncatedTo(MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(fiveDaysAgo) + val testDoc = """{ "test_strict_date_time" : "$testTime" }""" + indexDoc(encodedTestIndex, "1", testDoc) + + // Queries that use period_start/end should expect these values to always be formatted as 'epoch_millis'. Either + // the query should specify the format (like below) or the mapping for the index/field being queried should allow + // epoch_millis as an alternative (OpenSearch's default mapping for date fields "strict_date_optional_time||epoch_millis") + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(query)) + val triggerScript = """ + // make sure there is exactly one hit + return ctx.results[0].hits.hits.size() == 1 + """.trimIndent() + val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)) + val monitor = randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) + + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val total = searchResult.stringMap("hits")?.get("total") as Map + assertEquals("Incorrect search result", 1, total["value"]) + } + + fun `test monitor with one bad action and one good action`() { + val goodAction = randomAction( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id + ) + val syntaxErrorAction = randomAction( + name = "bad syntax", + template = randomTemplateScript("{{foo"), + destinationId = createDestination().id + ) + val actions = listOf(goodAction, syntaxErrorAction) + val monitor = createMonitor( + randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = actions))) + ) + + val output = entityAsMap(executeMonitor(monitor.id)) + + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + @Suppress("UNCHECKED_CAST") val actionOutput = actionResult["output"] as Map + if (actionResult["name"] == goodAction.name) { + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } else if (actionResult["name"] == syntaxErrorAction.name) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } else { + fail("Unknown action: ${actionResult["name"]}") + } + } + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ERROR) + } + + fun `test execute monitor adds to alert error history`() { + putAlertMappings() // Required as we do not have a create alert API. + // This template script has a parsing error to purposefully create an errorMessage during runMonitor + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + val listOfFiveErrorMessages = (1..5).map { i -> AlertError(timestamp = Instant.now(), message = "error message $i") } + val activeAlert = createAlert( + randomAlert(monitor).copy( + state = ACTIVE, errorHistory = listOfFiveErrorMessages, + triggerId = trigger.id, triggerName = trigger.name, severity = trigger.severity + ) + ) + + val response = executeMonitor(monitor.id) + + val updatedAlert = searchAlerts(monitor).single() + assertEquals("Existing active alert was not updated", activeAlert.id, updatedAlert.id) + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } + } + assertEquals("Wrong number of error messages in history", 6, updatedAlert.errorHistory.size) + } + + fun `test latest error is not lost when alert is completed`() { + // Creates an active alert the first time it's run and completes it the second time the monitor is run. + val trigger = randomQueryLevelTrigger( + condition = Script( + """ + if (ctx.alert == null) { + throw new RuntimeException("foo"); + } else { + return false; + } + """.trimIndent() + ) + ) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + val errorAlert = searchAlerts(monitor).single() + verifyAlert(errorAlert, monitor, ERROR) + executeMonitor(monitor.id) + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() + verifyAlert(completedAlert, monitor, COMPLETED) + + assertNull("Completed alert still has error message.", completedAlert.errorMessage) + assertTrue("Missing error history.", completedAlert.errorHistory.isNotEmpty()) + val latestError = completedAlert.errorHistory.single().message + assertTrue("Latest error is missing from history.", latestError.contains("RuntimeException(\"foo\")")) + } + + fun `test throw script exception`() { + // Creates an active alert the first time it's run and completes it the second time the monitor is run. + val trigger = randomQueryLevelTrigger( + condition = Script( + """ + param[0]; return true + """.trimIndent() + ) + ) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + val errorAlert = searchAlerts(monitor).single() + verifyAlert(errorAlert, monitor, ERROR) + executeMonitor(monitor.id) + assertEquals( + "Error does not match", + "Failed evaluating trigger:\nparam[0]; return true\n ^---- HERE", errorAlert.errorMessage + ) + } + + fun `test execute monitor limits alert error history to 10 error messages`() { + putAlertMappings() // Required as we do not have a create alert API. + // This template script has a parsing error to purposefully create an errorMessage during runMonitor + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + val listOfTenErrorMessages = (1..10).map { i -> AlertError(timestamp = Instant.now(), message = "error message $i") } + val activeAlert = createAlert( + randomAlert(monitor).copy( + state = ACTIVE, errorHistory = listOfTenErrorMessages, + triggerId = trigger.id, triggerName = trigger.name, severity = trigger.severity + ) + ) + + val response = executeMonitor(monitor.id) + + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } + } + val updatedAlert = searchAlerts(monitor).single() + assertEquals("Existing active alert was not updated", activeAlert.id, updatedAlert.id) + assertEquals("Wrong number of error messages in history", 10, updatedAlert.errorHistory.size) + } + + fun `test execute monitor creates alert for trigger with no actions`() { + putAlertMappings() // Required as we do not have a create alert API. + + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = emptyList(), destinationId = createDestination().id) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ACTIVE) + } + + fun `test execute monitor non-dryrun`() { + val monitor = createMonitor( + randomQueryLevelMonitor( + triggers = listOf( + randomQueryLevelTrigger( + condition = ALWAYS_RUN, + actions = listOf(randomAction(destinationId = createDestination().id)) + ) + ) + ) + ) + + val response = executeMonitor(monitor.id, mapOf("dryrun" to "false")) + + assertEquals("failed dryrun", RestStatus.OK, response.restStatus()) + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ACTIVE) + } + + fun `test execute monitor with already active alert`() { + val monitor = createMonitor( + randomQueryLevelMonitor( + triggers = listOf( + randomQueryLevelTrigger( + condition = ALWAYS_RUN, + actions = listOf(randomAction(destinationId = createDestination().id)) + ) + ) + ) + ) + + val firstExecuteResponse = executeMonitor(monitor.id, mapOf("dryrun" to "false")) + + assertEquals("failed dryrun", RestStatus.OK, firstExecuteResponse.restStatus()) + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ACTIVE) + + val secondExecuteResponse = executeMonitor(monitor.id, mapOf("dryrun" to "false")) + + assertEquals("failed dryrun", RestStatus.OK, secondExecuteResponse.restStatus()) + val newAlerts = searchAlerts(monitor) + assertEquals("Second alert not saved", 1, newAlerts.size) + verifyAlert(newAlerts.single(), monitor, ACTIVE) + } + + fun `test delete monitor with no alerts after alert indices is initialized`() { + putAlertMappings() + + val newMonitor = createMonitor( + randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN, actions = listOf(randomAction()))) + ) + ) + val deleteNewMonitorResponse = client().makeRequest("DELETE", "$ALERTING_BASE_URI/${newMonitor.id}") + + assertEquals("Delete request not successful", RestStatus.OK, deleteNewMonitorResponse.restStatus()) + } + + fun `test update monitor stays on schedule`() { + val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) + + updateMonitor(monitor.copy(enabledTime = Instant.now())) + + val retrievedMonitor = getMonitor(monitorId = monitor.id) + assertEquals("Monitor enabled time changed.", monitor.enabledTime, retrievedMonitor.enabledTime) + } + + fun `test enabled time by disabling and re-enabling monitor`() { + val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) + assertNotNull("Enabled time is null on a enabled monitor.", getMonitor(monitor.id).enabledTime) + + val disabledMonitor = updateMonitor(randomQueryLevelMonitor(enabled = false).copy(id = monitor.id)) + assertNull("Enabled time is not null on a disabled monitor.", disabledMonitor.enabledTime) + + val enabledMonitor = updateMonitor(randomQueryLevelMonitor(enabled = true).copy(id = monitor.id)) + assertNotNull("Enabled time is null on a enabled monitor.", enabledMonitor.enabledTime) + } + + fun `test enabled time by providing enabled time`() { + val enabledTime = Instant.ofEpochSecond(1538164858L) // This is 2018-09-27 20:00:58 GMT + val monitor = createMonitor(randomQueryLevelMonitor(enabled = true, enabledTime = enabledTime)) + + val retrievedMonitor = getMonitor(monitorId = monitor.id) + assertTrue("Monitor is not enabled", retrievedMonitor.enabled) + assertEquals("Enabled times do not match", monitor.enabledTime, retrievedMonitor.enabledTime) + } + + fun `test monitor with throttled action for same alert`() { + val actionThrottleEnabled = randomAction( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + throttleEnabled = true, throttle = Throttle(value = 5, unit = MINUTES) + ) + val actionThrottleNotEnabled = randomAction( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + throttleEnabled = false, throttle = Throttle(value = 5, unit = MINUTES) + ) + val actions = listOf(actionThrottleEnabled, actionThrottleNotEnabled) + val monitor = createMonitor( + randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = actions)), + schedule = IntervalSchedule(interval = 1, unit = MINUTES) + ) + ) + val monitorRunResultNotThrottled = entityAsMap(executeMonitor(monitor.id)) + verifyActionThrottleResults( + monitorRunResultNotThrottled, + mutableMapOf( + Pair(actionThrottleEnabled.id, false), + Pair(actionThrottleNotEnabled.id, false) + ) + ) + + val notThrottledAlert = searchAlerts(monitor) + assertEquals("1 alert should be returned", 1, notThrottledAlert.size) + verifyAlert(notThrottledAlert.single(), monitor, ACTIVE) + val notThrottledActionResults = verifyActionExecutionResultInAlert( + notThrottledAlert[0], + mutableMapOf(Pair(actionThrottleEnabled.id, 0), Pair(actionThrottleNotEnabled.id, 0)) + ) + + assertEquals(notThrottledActionResults.size, 2) + val monitorRunResultThrottled = entityAsMap(executeMonitor(monitor.id)) + verifyActionThrottleResults( + monitorRunResultThrottled, + mutableMapOf( + Pair(actionThrottleEnabled.id, true), + Pair(actionThrottleNotEnabled.id, false) + ) + ) + + val throttledAlert = searchAlerts(monitor) + assertEquals("1 alert should be returned", 1, throttledAlert.size) + verifyAlert(throttledAlert.single(), monitor, ACTIVE) + val throttledActionResults = verifyActionExecutionResultInAlert( + throttledAlert[0], + mutableMapOf(Pair(actionThrottleEnabled.id, 1), Pair(actionThrottleNotEnabled.id, 0)) + ) + + assertEquals(notThrottledActionResults.size, 2) + + assertEquals( + notThrottledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime, + throttledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime + ) + } + + fun `test monitor with throttled action for different alerts`() { + val actionThrottleEnabled = randomAction( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + throttleEnabled = true, throttle = Throttle(value = 5, unit = MINUTES) + ) + val actions = listOf(actionThrottleEnabled) + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = actions) + val monitor = createMonitor( + randomQueryLevelMonitor( + triggers = listOf(trigger), + schedule = IntervalSchedule(interval = 1, unit = ChronoUnit.MINUTES) + ) + ) + val monitorRunResult1 = entityAsMap(executeMonitor(monitor.id)) + verifyActionThrottleResults(monitorRunResult1, mutableMapOf(Pair(actionThrottleEnabled.id, false))) + + val activeAlert1 = searchAlerts(monitor) + assertEquals("1 alert should be returned", 1, activeAlert1.size) + verifyAlert(activeAlert1.single(), monitor, ACTIVE) + val actionResults1 = verifyActionExecutionResultInAlert(activeAlert1[0], mutableMapOf(Pair(actionThrottleEnabled.id, 0))) + + Thread.sleep(200) + updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = NEVER_RUN)), id = monitor.id)) + executeMonitor(monitor.id) + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() + verifyAlert(completedAlert, monitor, COMPLETED) + + updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = ALWAYS_RUN)), id = monitor.id)) + val monitorRunResult2 = entityAsMap(executeMonitor(monitor.id)) + verifyActionThrottleResults(monitorRunResult2, mutableMapOf(Pair(actionThrottleEnabled.id, false))) + val activeAlert2 = searchAlerts(monitor) + assertEquals("1 alert should be returned", 1, activeAlert2.size) + assertNotEquals(activeAlert1[0].id, activeAlert2[0].id) + + val actionResults2 = verifyActionExecutionResultInAlert(activeAlert2[0], mutableMapOf(Pair(actionThrottleEnabled.id, 0))) + assertNotEquals( + actionResults1[actionThrottleEnabled.id]!!.lastExecutionTime, + actionResults2[actionThrottleEnabled.id]!!.lastExecutionTime + ) + } + + fun `test execute monitor with email destination creates alert in error state`() { + putAlertMappings() // Required as we do not have a create alert API. + + val emailAccount = createRandomEmailAccount() + val emailGroup = createRandomEmailGroup() + val email = Email( + emailAccountID = emailAccount.id, + recipients = listOf( + Recipient(type = Recipient.RecipientType.EMAIL, emailGroupID = null, email = "test@email.com"), + Recipient(type = Recipient.RecipientType.EMAIL_GROUP, emailGroupID = emailGroup.id, email = null) + ) + ) + + val destination = createDestination( + Destination( + type = DestinationType.EMAIL, + name = "testDesination", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = null, + email = email + ) + ) + val action = randomAction(destinationId = destination.id) + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + + executeMonitor(monitor.id) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ERROR) + Assert.assertTrue(alerts.single().errorMessage?.contains("Failed running action") as Boolean) + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + fun `test execute monitor with custom webhook destination`() { + val customWebhook = CustomWebhook("http://15.16.17.18", null, null, 80, null, "PUT", emptyMap(), emptyMap(), null, null) + val destination = createDestination( + Destination( + type = DestinationType.CUSTOM_WEBHOOK, + name = "testDesination", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = customWebhook, + email = null + ) + ) + val action = randomAction(destinationId = destination.id) + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + executeMonitor(adminClient(), monitor.id) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ERROR) + Assert.assertTrue(alerts.single().errorMessage?.contains("Connect timed out") as Boolean) + } + */ + + fun `test create ClusterMetricsInput monitor with ClusterHealth API`() { + // GIVEN + val path = "/_cluster/health" + val input = randomClusterMetricsInput(path = path) + val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input))) + + // WHEN + val response = executeMonitor(monitor.id) + + // THEN + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + val resultsContent = (inputResults?.get("results") as ArrayList<*>)[0] + val errorMessage = inputResults["error"] + + assertEquals(monitor.name, output["monitor_name"]) + assertTrue( + "Monitor results should contain cluster_name, but found: $resultsContent", + resultsContent.toString().contains("cluster_name") + ) + assertNull("There should not be an error message, but found: $errorMessage", errorMessage) + } + + fun `test create ClusterMetricsInput monitor with ClusterStats API`() { + // GIVEN + val path = "/_cluster/stats" + val input = randomClusterMetricsInput(path = path) + val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input))) + + // WHEN + val response = executeMonitor(monitor.id) + + // THEN + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + val resultsContent = (inputResults?.get("results") as ArrayList<*>)[0] + val errorMessage = inputResults["error"] + + assertEquals(monitor.name, output["monitor_name"]) + assertTrue( + "Monitor results should contain monitor_name, but found: $resultsContent", + resultsContent.toString().contains("memory_size_in_bytes") + ) + assertNull("There should not be an error message, but found: $errorMessage", errorMessage) + } + + fun `test create ClusterMetricsInput monitor with alert triggered`() { + // GIVEN + putAlertMappings() + val trigger = randomQueryLevelTrigger( + condition = Script( + """ + return ctx.results[0].number_of_pending_tasks >= 0 + """.trimIndent() + ), + destinationId = createDestination().id + ) + val path = "/_cluster/health" + val input = randomClusterMetricsInput(path = path) + val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input), triggers = listOf(trigger))) + + // WHEN + val response = executeMonitor(monitor.id) + + // THEN + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + + val triggerResults = output.objectMap("trigger_results").values + for (triggerResult in triggerResults) { + assertTrue( + "This triggerResult should be triggered: $triggerResult", + triggerResult.objectMap("action_results").isNotEmpty() + ) + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved, $output", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ACTIVE) + } + + fun `test create ClusterMetricsInput monitor with no alert triggered`() { + // GIVEN + putAlertMappings() + val trigger = randomQueryLevelTrigger( + condition = Script( + """ + return ctx.results[0].status.equals("red") + """.trimIndent() + ) + ) + val path = "/_cluster/stats" + val input = randomClusterMetricsInput(path = path) + val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input), triggers = listOf(trigger))) + + // WHEN + val response = executeMonitor(monitor.id) + + // THEN + val output = entityAsMap(response) + assertEquals(monitor.name, output["monitor_name"]) + + val triggerResults = output.objectMap("trigger_results").values + for (triggerResult in triggerResults) { + assertTrue( + "This triggerResult should not be triggered: $triggerResult", + triggerResult.objectMap("action_results").isEmpty() + ) + } + + val alerts = searchAlerts(monitor) + assertEquals("Alert saved for test monitor, output: $output", 0, alerts.size) + } + + fun `test create ClusterMetricsInput monitor for ClusterHealth API with path parameters`() { + // GIVEN + val indices = (1..5).map { createTestIndex() }.toTypedArray() + val pathParams = indices.joinToString(",") + val path = "/_cluster/health/" + val input = randomClusterMetricsInput( + path = path, + pathParams = pathParams + ) + val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input))) + + // WHEN + val response = executeMonitor(monitor.id) + + // THEN + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + val resultsContent = (inputResults?.get("results") as ArrayList<*>)[0] + val errorMessage = inputResults["error"] + + assertEquals(monitor.name, output["monitor_name"]) + assertTrue( + "Monitor results should contain cluster_name, but found: $resultsContent", + resultsContent.toString().contains("cluster_name") + ) + assertNull("There should not be an error message, but found: $errorMessage", errorMessage) + } + + // TODO: Once an API is implemented that supports adding/removing entries on the + // SupportedApiSettings::supportedApiList, create an test that simulates executing + // a preexisting ClusterMetricsInput monitor for an API that has been removed from the supportedApiList. + // This will likely involve adding an API to the list before creating the monitor, and then removing + // the API from the list before executing the monitor. + + fun `test execute monitor with custom webhook destination and denied host`() { + + listOf("http://10.1.1.1", "127.0.0.1").forEach { + val customWebhook = CustomWebhook(it, null, null, 80, null, "PUT", emptyMap(), emptyMap(), null, null) + val destination = createDestination( + Destination( + type = DestinationType.CUSTOM_WEBHOOK, + name = "testDesination", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = customWebhook, + email = null + ) + ) + val action = randomAction(destinationId = destination.id) + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + executeMonitor(adminClient(), monitor.id) + + val alerts = searchAlerts(monitor) + assertEquals("Alert not saved", 1, alerts.size) + verifyAlert(alerts.single(), monitor, ERROR) + + Assert.assertNotNull(alerts.single().errorMessage) + } + } + + fun `test execute AD monitor returns search result without user`() { + // TODO: change to REST API call to test security enabled case + if (!securityEnabled()) { + val user = randomUser() + val detectorId = randomAlphaOfLength(5) + prepareTestAnomalyResult(detectorId, user) + // for old monitor before enable FGAC, the user field is empty + val monitor = randomADMonitor(inputs = listOf(adSearchInput(detectorId)), triggers = listOf(adMonitorTrigger()), user = null) + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + @Suppress("UNCHECKED_CAST") + (output["trigger_results"] as HashMap).forEach { + _, v -> + assertTrue((v as HashMap)["triggered"] as Boolean) + } + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val total = searchResult.stringMap("hits")?.get("total") as Map + assertEquals("Incorrect search result", 5, total["value"]) + @Suppress("UNCHECKED_CAST") + val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map + assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) + } + } + + fun `test execute AD monitor returns search result with empty backend role`() { + // TODO: change to REST API call to test security enabled case + if (!securityEnabled()) { + val user = randomUser() + val detectorId = randomAlphaOfLength(5) + prepareTestAnomalyResult(detectorId, user) + // for old monitor before enable FGAC, the user field is empty + val monitor = randomADMonitor( + inputs = listOf(adSearchInput(detectorId)), triggers = listOf(adMonitorTrigger()), + user = User(user.name, listOf(), user.roles, user.customAttNames) + ) + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + @Suppress("UNCHECKED_CAST") + (output["trigger_results"] as HashMap).forEach { + _, v -> + assertTrue((v as HashMap)["triggered"] as Boolean) + } + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val total = searchResult.stringMap("hits")?.get("total") as Map + assertEquals("Incorrect search result", 5, total["value"]) + @Suppress("UNCHECKED_CAST") + val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map + assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) + } + } + + fun `test execute AD monitor returns search result with same backend role`() { + // TODO: change to REST API call to test security enabled case + if (!securityEnabled()) { + val detectorId = randomAlphaOfLength(5) + val user = randomUser() + prepareTestAnomalyResult(detectorId, user) + // Test monitor with same user + val monitor = randomADMonitor(inputs = listOf(adSearchInput(detectorId)), triggers = listOf(adMonitorTrigger()), user = user) + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + @Suppress("UNCHECKED_CAST") + (output["trigger_results"] as HashMap).forEach { + _, v -> + assertTrue((v as HashMap)["triggered"] as Boolean) + } + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val total = searchResult.stringMap("hits")?.get("total") as Map + assertEquals("Incorrect search result", 5, total["value"]) + @Suppress("UNCHECKED_CAST") + val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map + assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) + } + } + + fun `test execute AD monitor returns no search result with different backend role`() { + // TODO: change to REST API call to test security enabled case + if (!securityEnabled()) { + val detectorId = randomAlphaOfLength(5) + val user = randomUser() + prepareTestAnomalyResult(detectorId, user) + // Test monitor with different user + val monitor = randomADMonitor( + inputs = listOf(adSearchInput(detectorId)), + triggers = listOf(adMonitorTrigger()), user = randomUser() + ) + val response = executeMonitor(monitor, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + @Suppress("UNCHECKED_CAST") + (output["trigger_results"] as HashMap).forEach { + _, v -> + assertTrue((v as HashMap)["triggered"] as Boolean) + } + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val total = searchResult.stringMap("hits")?.get("total") as Map + assertEquals("Incorrect search result", 5, total["value"]) + } + } + + fun `test execute bucket-level monitor returns search result`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) + val response = executeMonitor(monitor.id, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg")?.get("buckets") as List> + assertEquals("Incorrect search result", 2, buckets.size) + } + + fun `test execute bucket-level monitor returns search result with multi term agg`() { + val index = "test_index_1234" + indexDoc( + index, + "1", + """{"user_id": "1", + "ip_addr": "12345678", + "user_agent": "chrome" + } + """.trimIndent() + ) + indexDoc( + index, + "2", + """{"user_id": "2", + "ip_addr": "12345678", + "user_agent": "chrome" + } + """.trimIndent() + ) + indexDoc( + index, + "3", + """{"user_id": "2", + "ip_addr": "3443534", + "user_agent": "chrome" + } + """.trimIndent() + ) + + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("_value" to "distinct_user_count", "docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "hot", + filter = null + ) + ) + + val m = randomBucketLevelMonitor( + triggers = listOf(trigger), + inputs = listOf( + SearchInput( + listOf(index), + SearchSourceBuilder().aggregation( + MultiTermsAggregationBuilder("hot") + .terms( + listOf( + MultiTermsValuesSourceConfig.Builder().setFieldName("ip_addr.keyword").build(), + MultiTermsValuesSourceConfig.Builder().setFieldName("user_agent.keyword").build() + ) + ) + .subAggregation(CardinalityAggregationBuilder("distinct_user_count").field("user_id.keyword")) + ) + ) + ) + ) + val monitor = createMonitor(m) + val response = executeMonitor(monitor.id, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + + assertEquals(monitor.name, output["monitor_name"]) + @Suppress("UNCHECKED_CAST") + val searchResult = (output.objectMap("input_results")["results"] as List>).first() + @Suppress("UNCHECKED_CAST") + val buckets = searchResult.stringMap("aggregations")?.stringMap("hot")?.get("buckets") as List> + assertEquals("Incorrect search result", 2, buckets.size) + val distinctUserCountAgg1 = buckets.find { + it.get("key_as_string") == "12345678|chrome" + }!!.get("distinct_user_count") as Map + assertEquals(2, distinctUserCountAgg1.get("value")) + val distinctUserCountAgg2 = buckets.find { + it.get("key_as_string") == "3443534|chrome" + }!!.get("distinct_user_count") as Map + assertEquals(1, distinctUserCountAgg2.get("value")) + } + + fun `test bucket-level monitor alert creation and completion`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_1", // adding duplicate to verify aggregation + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) + executeMonitor(monitor.id) + + // Check created alerts + var alerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, alerts.size) + alerts.forEach { + // Given the random configuration of the Bucket-Level Trigger for the test, it's possible to get + // an action configuration that leads to no notifications (meaning the field for the Alert is null). + // Since testing action execution is not relevant to this test, verifyAlert is asked to ignore it. + verifyAlert(it, monitor, ACTIVE, expectNotification = false) + } + + // Delete documents of a particular value + deleteDataWithDocIds( + testIndex, + listOf( + "1", // test_value_1 + "2" // test_value_1 + ) + ) + + // Execute monitor again + executeMonitor(monitor.id) + + // Verify expected alert was completed + alerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) + val activeAlerts = alerts.filter { it.state == ACTIVE } + val completedAlerts = alerts.filter { it.state == COMPLETED } + assertEquals("Incorrect number of active alerts", 1, activeAlerts.size) + assertEquals("Incorrect number of completed alerts", 1, completedAlerts.size) + } + + fun `test bucket-level monitor with acknowledged alert`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { + verifyAlert(it, monitor, ACTIVE) + } + + // Acknowledge one of the Alerts + val alertToAcknowledge = currentAlerts.single { it.aggregationResultBucket?.getBucketKeysHash().equals("test_value_1") } + acknowledgeAlerts(monitor, alertToAcknowledge) + currentAlerts = searchAlerts(monitor) + val acknowledgedAlert = currentAlerts.single { it.state == ACKNOWLEDGED } + val activeAlert = currentAlerts.single { it.state == ACTIVE } + + // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to + // let lastNotificationTime change. W/o this sleep the test can result in a false negative. + Thread.sleep(200) + executeMonitor(monitor.id) + + // Check that the lastNotification time of the acknowledged Alert wasn't updated and the active Alert's was + currentAlerts = searchAlerts(monitor) + val acknowledgedAlert2 = currentAlerts.single { it.state == ACKNOWLEDGED } + val activeAlert2 = currentAlerts.single { it.state == ACTIVE } + assertEquals("Acknowledged alert was updated", acknowledgedAlert.lastNotificationTime, acknowledgedAlert2.lastNotificationTime) + assertTrue("Active alert was not updated", activeAlert2.lastNotificationTime!! > activeAlert.lastNotificationTime) + + // Remove data so that both Alerts are moved into completed + deleteDataWithDocIds( + testIndex, + listOf( + "1", // test_value_1 + "2" // test_value_2 + ) + ) + + // Execute Monitor and check that both Alerts were updated + Thread.sleep(200) + executeMonitor(monitor.id) + currentAlerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) + val completedAlerts = currentAlerts.filter { it.state == COMPLETED } + assertEquals("Incorrect number of completed alerts", 2, completedAlerts.size) + val previouslyAcknowledgedAlert = completedAlerts.single { it.aggregationResultBucket?.getBucketKeysHash().equals("test_value_1") } + val previouslyActiveAlert = completedAlerts.single { it.aggregationResultBucket?.getBucketKeysHash().equals("test_value_2") } + assertTrue( + "Previously acknowledged alert was not updated when it moved to completed", + previouslyAcknowledgedAlert.lastNotificationTime!! > acknowledgedAlert2.lastNotificationTime + ) + assertTrue( + "Previously active alert was not updated when it moved to completed", + previouslyActiveAlert.lastNotificationTime!! > activeAlert2.lastNotificationTime + ) + } + + fun `test bucket-level monitor with findings enabled on term agg`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val termAgg = TermsAggregationBuilder("test_field").field("test_field") + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(termAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "test_field", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + } + + fun `test bucket-level monitor with findings enabled on composite agg`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 1, findings.size) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) + assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) + } + + fun `test bucket-level monitor with findings enabled for multiple group by fields`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field"), + TermsValuesSourceBuilder("number").field("number") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy + // so that the assertions done later in this test don't fail. + // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). + val actions = randomActionsForBucketLevelTrigger(min = 1).map { + if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { + it.copy( + actionExecutionPolicy = ActionExecutionPolicy( + PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) + ) + ) + } else { + it + } + } + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor( + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { alert -> + Assert.assertEquals("expected findings for alert", alert.findingIds.size, 0) + } + val findings = searchFindings(monitor) + assertEquals("Findings saved for test monitor", 0, findings.size) + } + + @Suppress("UNCHECKED_CAST") + fun `test bucket-level monitor with one good action and one bad action`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_1", + "test_value_3", + "test_value_2", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Trigger script should only create Alerts for 'test_value_1' and 'test_value_2' + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + val goodAction = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val syntaxErrorAction = randomAction( + name = "bad syntax", + template = randomTemplateScript("{{foo"), + destinationId = createDestination().id + ) + val actions = listOf(goodAction, syntaxErrorAction) + + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) + + val output = entityAsMap(executeMonitor(monitor.id)) + // The 'events' in this case are the bucketKeys hashes representing the Alert events + val expectedEvents = setOf("test_value_1", "test_value_2") + + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (alertEvent in triggerResult.objectMap("action_results")) { + assertTrue(expectedEvents.contains(alertEvent.key)) + val actionResults = alertEvent.value.values as Collection> + for (actionResult in actionResults) { + val actionOutput = actionResult["output"] as Map + if (actionResult["name"] == goodAction.name) { + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } else if (actionResult["name"] == syntaxErrorAction.name) { + assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) + } else { + fail("Unknown action: ${actionResult["name"]}") + } + } + } + } + + // Check created alerts + val alerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, alerts.size) + alerts.forEach { + verifyAlert(it, monitor, ACTIVE) + } + } + + @Suppress("UNCHECKED_CAST") + fun `test bucket-level monitor with per execution action scope`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_1", + "test_value_3", + "test_value_2", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + // Trigger script should only create Alerts for 'test_value_1' and 'test_value_2' + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + val action = randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = ActionExecutionPolicy(PerExecutionActionScope()) + ) + var trigger = randomBucketLevelTrigger(actions = listOf(action)) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) + + val output = entityAsMap(executeMonitor(monitor.id)) + // The 'events' in this case are the bucketKeys hashes representing the Alert events + val expectedEvents = setOf("test_value_1", "test_value_2") + + assertEquals(monitor.name, output["monitor_name"]) + for (triggerResult in output.objectMap("trigger_results").values) { + for (alertEvent in triggerResult.objectMap("action_results")) { + assertTrue(expectedEvents.contains(alertEvent.key)) + val actionResults = alertEvent.value.values as Collection> + for (actionResult in actionResults) { + val actionOutput = actionResult["output"] as Map + assertEquals("Unknown action: ${actionResult["name"]}", action.name, actionResult["name"]) + assertEquals("Hello ${monitor.name}", actionOutput["message"]) + } + } + } + + // Check created alerts + val alerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, alerts.size) + alerts.forEach { + verifyAlert(it, monitor, ACTIVE) + } + } + + fun `test bucket-level monitor with per alert action scope saves completed alerts even if not actionable`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_1", + "test_value_2", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 1 + """.trimIndent() + + val action = randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + actionExecutionPolicy = ActionExecutionPolicy(PerAlertActionScope(setOf(AlertCategory.DEDUPED, AlertCategory.NEW))) + ) + var trigger = randomBucketLevelTrigger(actions = listOf(action)) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) + executeMonitor(monitor.id) + + // Check created Alerts + var currentAlerts = searchAlerts(monitor) + assertEquals("Alerts not saved", 2, currentAlerts.size) + currentAlerts.forEach { + verifyAlert(it, monitor, ACTIVE) + } + + // Remove data so that both Alerts are moved into completed + deleteDataWithDocIds( + testIndex, + listOf( + "1", // test_value_1 + "2", // test_value_1 + "3", // test_value_2 + "4" // test_value_2 + ) + ) + + // Execute Monitor and check that both Alerts were moved to COMPLETED + executeMonitor(monitor.id) + currentAlerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) + val completedAlerts = currentAlerts.filter { it.state == COMPLETED } + assertEquals("Incorrect number of completed alerts", 2, completedAlerts.size) + } + + @Suppress("UNCHECKED_CAST") + fun `test bucket-level monitor throttling with per alert action scope`() { + val testIndex = createTestIndex() + insertSampleTimeSerializedData( + testIndex, + listOf( + "test_value_1", + "test_value_2" + ) + ) + + val query = QueryBuilders.rangeQuery("test_strict_date_time") + .gt("{{period_end}}||-10d") + .lte("{{period_end}}") + .format("epoch_millis") + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + val actionThrottleEnabled = randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + throttleEnabled = true, + throttle = Throttle(value = 5, unit = MINUTES), + actionExecutionPolicy = ActionExecutionPolicy( + actionExecutionScope = PerAlertActionScope(setOf(AlertCategory.DEDUPED, AlertCategory.NEW)) + ) + ) + val actionThrottleNotEnabled = randomActionWithPolicy( + template = randomTemplateScript("Hello {{ctx.monitor.name}}"), + destinationId = createDestination().id, + throttleEnabled = false, + throttle = Throttle(value = 5, unit = MINUTES), + actionExecutionPolicy = ActionExecutionPolicy( + actionExecutionScope = PerAlertActionScope(setOf(AlertCategory.DEDUPED, AlertCategory.NEW)) + ) + ) + val actions = listOf(actionThrottleEnabled, actionThrottleNotEnabled) + var trigger = randomBucketLevelTrigger(actions = actions) + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ) + ) + val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) + + val monitorRunResultNotThrottled = entityAsMap(executeMonitor(monitor.id)) + verifyActionThrottleResultsForBucketLevelMonitor( + monitorRunResult = monitorRunResultNotThrottled, + expectedEvents = setOf("test_value_1", "test_value_2"), + expectedActionResults = mapOf( + Pair(actionThrottleEnabled.id, false), + Pair(actionThrottleNotEnabled.id, false) + ) + ) + + val notThrottledAlerts = searchAlerts(monitor) + assertEquals("Alerts may not have been saved correctly", 2, notThrottledAlerts.size) + val previousAlertExecutionTime: MutableMap> = mutableMapOf() + notThrottledAlerts.forEach { + verifyAlert(it, monitor, ACTIVE) + val notThrottledActionResults = verifyActionExecutionResultInAlert( + it, + mutableMapOf(Pair(actionThrottleEnabled.id, 0), Pair(actionThrottleNotEnabled.id, 0)) + ) + assertEquals(notThrottledActionResults.size, 2) + // Save the lastExecutionTimes of the actions for the Alert to be compared later against + // the next Monitor execution run + previousAlertExecutionTime[it.id] = mutableMapOf() + previousAlertExecutionTime[it.id]!![actionThrottleEnabled.id] = + notThrottledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime + previousAlertExecutionTime[it.id]!![actionThrottleNotEnabled.id] = + notThrottledActionResults[actionThrottleNotEnabled.id]!!.lastExecutionTime + } + + // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to + // let Action executionTime change. W/o this sleep the test can result in a false negative. + Thread.sleep(200) + val monitorRunResultThrottled = entityAsMap(executeMonitor(monitor.id)) + verifyActionThrottleResultsForBucketLevelMonitor( + monitorRunResult = monitorRunResultThrottled, + expectedEvents = setOf("test_value_1", "test_value_2"), + expectedActionResults = mapOf( + Pair(actionThrottleEnabled.id, true), + Pair(actionThrottleNotEnabled.id, false) + ) + ) + + val throttledAlerts = searchAlerts(monitor) + assertEquals("Alerts may not have been saved correctly", 2, throttledAlerts.size) + throttledAlerts.forEach { + verifyAlert(it, monitor, ACTIVE) + val throttledActionResults = verifyActionExecutionResultInAlert( + it, + mutableMapOf(Pair(actionThrottleEnabled.id, 1), Pair(actionThrottleNotEnabled.id, 0)) + ) + assertEquals(throttledActionResults.size, 2) + + val prevthrottledActionLastExecutionTime = previousAlertExecutionTime[it.id]!![actionThrottleEnabled.id] + val prevNotThrottledActionLastExecutionTime = previousAlertExecutionTime[it.id]!![actionThrottleNotEnabled.id] + assertEquals( + "Last execution time of a throttled action was updated for one of the Alerts", + prevthrottledActionLastExecutionTime, + throttledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime + ) + assertTrue( + "Last execution time of a non-throttled action was not updated for one of the Alerts", + throttledActionResults[actionThrottleNotEnabled.id]!!.lastExecutionTime!! > prevNotThrottledActionLastExecutionTime + ) + } + } + + private fun prepareTestAnomalyResult(detectorId: String, user: User) { + val adResultIndex = ".opendistro-anomaly-results-history-2020.10.17" + try { + createTestIndex(adResultIndex, anomalyResultIndexMapping()) + } catch (e: Exception) { + // WarningFailureException is expected as we are creating system index start with dot + assertTrue(e is WarningFailureException) + } + + val twoMinsAgo = ZonedDateTime.now().minus(2, MINUTES).truncatedTo(MILLIS) + val testTime = twoMinsAgo.toEpochSecond() * 1000 + val testResult1 = randomAnomalyResult( + detectorId = detectorId, executionEndTime = testTime, user = user, + anomalyGrade = 0.1 + ) + indexDoc(adResultIndex, "1", testResult1) + val testResult2 = randomAnomalyResult( + detectorId = detectorId, executionEndTime = testTime, user = user, + anomalyGrade = 0.8 + ) + indexDoc(adResultIndex, "2", testResult2) + val testResult3 = randomAnomalyResult( + detectorId = detectorId, executionEndTime = testTime, user = user, + anomalyGrade = 0.5 + ) + indexDoc(adResultIndex, "3", testResult3) + val testResult4 = randomAnomalyResult( + detectorId = detectorId, executionEndTime = testTime, + user = User(user.name, listOf(), user.roles, user.customAttNames), + anomalyGrade = 0.9 + ) + indexDoc(adResultIndex, "4", testResult4) + // User is null + val testResult5 = randomAnomalyResultWithoutUser( + detectorId = detectorId, executionEndTime = testTime, + anomalyGrade = 0.75 + ) + indexDoc(adResultIndex, "5", testResult5) + } + + private fun verifyActionExecutionResultInAlert(alert: Alert, expectedResult: Map): + MutableMap { + val actionResult = mutableMapOf() + for (result in alert.actionExecutionResults) { + val expected = expectedResult[result.actionId] + assertEquals(expected, result.throttledCount) + actionResult.put(result.actionId, result) + } + return actionResult + } + + private fun verifyActionThrottleResults(output: MutableMap, expectedResult: Map) { + for (triggerResult in output.objectMap("trigger_results").values) { + for (actionResult in triggerResult.objectMap("action_results").values) { + val expected = expectedResult[actionResult["id"]] + assertEquals(expected, actionResult["throttled"]) + } + } + } + + @Suppress("UNCHECKED_CAST") + private fun verifyActionThrottleResultsForBucketLevelMonitor( + monitorRunResult: MutableMap, + expectedEvents: Set, + expectedActionResults: Map + ) { + for (triggerResult in monitorRunResult.objectMap("trigger_results").values) { + for (alertEvent in triggerResult.objectMap("action_results")) { + assertTrue(expectedEvents.contains(alertEvent.key)) + val actionResults = alertEvent.value.values as Collection> + for (actionResult in actionResults) { + val expected = expectedActionResults[actionResult["id"]] + assertEquals(expected, actionResult["throttled"]) + } + } + } + } + + private fun verifyAlert( + alert: Alert, + monitor: Monitor, + expectedState: State = ACTIVE, + expectNotification: Boolean = true + ) { + assertNotNull(alert.id) + assertNotNull(alert.startTime) + if (expectNotification) { + assertNotNull(alert.lastNotificationTime) + } + assertEquals("Alert in wrong state", expectedState, alert.state) + if (expectedState == ERROR) { + assertNotNull("Missing error message", alert.errorMessage) + } else { + assertNull("Unexpected error message", alert.errorMessage) + } + if (expectedState == COMPLETED) { + assertNotNull("End time missing for completed alert.", alert.endTime) + } else { + assertNull("End time set for active alert", alert.endTime) + } + assertEquals(monitor.id, alert.monitorId) + assertEquals(monitor.name, alert.monitorName) + assertEquals(monitor.version, alert.monitorVersion) + + // assert trigger exists for alert + val trigger = monitor.triggers.single { it.id == alert.triggerId } + assertEquals(trigger.name, alert.triggerName) + } + + @Suppress("UNCHECKED_CAST") + /** helper that returns a field in a json map whose values are all json objects */ + private fun Map.objectMap(key: String): Map> { + return this[key] as Map> + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/MonitorTests.kt b/alerting/bin/test/org/opensearch/alerting/MonitorTests.kt new file mode 100644 index 000000000..f6ed78541 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/MonitorTests.kt @@ -0,0 +1,47 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.test.OpenSearchTestCase +import java.lang.IllegalArgumentException +import java.time.Instant + +class MonitorTests : OpenSearchTestCase() { + + fun `test enabled time`() { + val monitor = randomQueryLevelMonitor() + val enabledMonitor = monitor.copy(enabled = true, enabledTime = Instant.now()) + try { + enabledMonitor.copy(enabled = false) + fail("Disabling monitor with enabled time set should fail.") + } catch (e: IllegalArgumentException) { + } + + val disabledMonitor = monitor.copy(enabled = false, enabledTime = null) + + try { + disabledMonitor.copy(enabled = true) + fail("Enabling monitor without enabled time should fail") + } catch (e: IllegalArgumentException) { + } + } + + fun `test max triggers`() { + val monitor = randomQueryLevelMonitor() + + val tooManyTriggers = mutableListOf() + for (i in 0..10) { + tooManyTriggers.add(randomQueryLevelTrigger()) + } + + try { + monitor.copy(triggers = tooManyTriggers) + fail("Monitor with too many triggers should be rejected.") + } catch (e: IllegalArgumentException) { + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt b/alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt new file mode 100644 index 000000000..5cfcf430f --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt @@ -0,0 +1,146 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.apache.http.HttpHost +import org.junit.After +import org.opensearch.client.Request +import org.opensearch.client.RequestOptions +import org.opensearch.client.RestClient +import org.opensearch.client.WarningsHandler +import org.opensearch.common.io.PathUtils +import org.opensearch.common.settings.Settings +import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_ENABLED +import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_FILEPATH +import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_KEYPASSWORD +import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_PASSWORD +import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_PEMCERT_FILEPATH +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.xcontent.DeprecationHandler +import org.opensearch.core.xcontent.MediaType +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.test.rest.OpenSearchRestTestCase +import java.io.IOException + +/** + * Must support below 3 scenario runs: + * 1. Without Security plugin + * 2. With Security plugin and https + * 3. With Security plugin and http + * Its possible to have security enabled with http transport. + * client() - > admin user + * adminClient() -> adminDN/super-admin user + */ + +abstract class ODFERestTestCase : OpenSearchRestTestCase() { + + fun isHttps(): Boolean { + return System.getProperty("https", "false")!!.toBoolean() + } + + fun securityEnabled(): Boolean { + return System.getProperty("security", "false")!!.toBoolean() + } + + @Suppress("UNCHECKED_CAST") + fun isNotificationPluginInstalled(): Boolean { + val response = entityAsMap(client().makeRequest("GET", "_nodes/plugins")) + val nodesInfo = response["nodes"] as Map> + for (nodeInfo in nodesInfo.values) { + val plugins = nodeInfo["plugins"] as List> + for (plugin in plugins) { + if (plugin["name"] == "opensearch-notifications") { + return true + } + } + } + return false + } + + override fun getProtocol(): String { + return if (isHttps()) { + "https" + } else { + "http" + } + } + + override fun preserveIndicesUponCompletion(): Boolean { + return true + } + + open fun preserveODFEIndicesAfterTest(): Boolean = false + + @Throws(IOException::class) + @After + open fun wipeAllODFEIndices() { + if (preserveODFEIndicesAfterTest()) return + + val response = client().performRequest(Request("GET", "/_cat/indices?format=json&expand_wildcards=all")) + + val xContentType = MediaType.fromMediaType(response.entity.contentType.value) + xContentType.xContent().createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + response.entity.content + ).use { parser -> + for (index in parser.list()) { + val jsonObject: Map<*, *> = index as java.util.HashMap<*, *> + val indexName: String = jsonObject["index"] as String + // .opendistro_security isn't allowed to delete from cluster + if (".opendistro_security" != indexName) { + var request = Request("DELETE", "/$indexName") + // TODO: remove PERMISSIVE option after moving system index access to REST API call + val options = RequestOptions.DEFAULT.toBuilder() + options.setWarningsHandler(WarningsHandler.PERMISSIVE) + request.options = options.build() + adminClient().performRequest(request) + } + } + } + } + + /** + * Returns the REST client settings used for super-admin actions like cleaning up after the test has completed. + */ + override fun restAdminSettings(): Settings { + return Settings + .builder() + .put("http.port", 9200) + .put(OPENSEARCH_SECURITY_SSL_HTTP_ENABLED, isHttps()) + .put(OPENSEARCH_SECURITY_SSL_HTTP_PEMCERT_FILEPATH, "sample.pem") + .put(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_FILEPATH, "test-kirk.jks") + .put(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_PASSWORD, "changeit") + .put(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_KEYPASSWORD, "changeit") + .build() + } + + @Throws(IOException::class) + override fun buildClient(settings: Settings, hosts: Array): RestClient { + if (securityEnabled()) { + val keystore = settings.get(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_FILEPATH) + return when (keystore != null) { + true -> { + // create adminDN (super-admin) client + val uri = javaClass.classLoader.getResource("sample.pem").toURI() + val configPath = PathUtils.get(uri).parent.toAbsolutePath() + SecureRestClientBuilder(settings, configPath).setSocketTimeout(60000).build() + } + false -> { + // create client with passed user + val userName = System.getProperty("user") + val password = System.getProperty("password") + SecureRestClientBuilder(hosts, isHttps(), userName, password).setSocketTimeout(60000).build() + } + } + } else { + val builder = RestClient.builder(*hosts) + configureClient(builder, settings) + builder.setStrictDeprecationMode(true) + return builder.build() + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/TestHelpers.kt b/alerting/bin/test/org/opensearch/alerting/TestHelpers.kt new file mode 100644 index 000000000..47d9f522f --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/TestHelpers.kt @@ -0,0 +1,797 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import junit.framework.TestCase.assertNull +import org.apache.http.Header +import org.apache.http.HttpEntity +import org.opensearch.alerting.model.ActionRunResult +import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.DocumentLevelTriggerRunResult +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.model.QueryLevelTriggerRunResult +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.EmailEntry +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.util.getBucketKeysHash +import org.opensearch.client.Request +import org.opensearch.client.RequestOptions +import org.opensearch.client.Response +import org.opensearch.client.RestClient +import org.opensearch.client.WarningsHandler +import org.opensearch.common.UUIDs +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.AggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.BucketLevelTrigger +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ChainedMonitorFindings +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Input +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.Schedule +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Sequence +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.commons.alerting.model.Workflow.WorkflowType +import org.opensearch.commons.alerting.model.action.Action +import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy +import org.opensearch.commons.alerting.model.action.ActionExecutionScope +import org.opensearch.commons.alerting.model.action.AlertCategory +import org.opensearch.commons.alerting.model.action.PerAlertActionScope +import org.opensearch.commons.alerting.model.action.PerExecutionActionScope +import org.opensearch.commons.alerting.model.action.Throttle +import org.opensearch.commons.alerting.util.string +import org.opensearch.commons.authuser.User +import org.opensearch.core.common.settings.SecureString +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.script.ScriptType +import org.opensearch.search.SearchModule +import org.opensearch.search.aggregations.bucket.terms.IncludeExclude +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase.randomBoolean +import org.opensearch.test.OpenSearchTestCase.randomInt +import org.opensearch.test.OpenSearchTestCase.randomIntBetween +import org.opensearch.test.rest.OpenSearchRestTestCase +import java.time.Instant +import java.time.temporal.ChronoUnit + +fun randomQueryLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User = randomUser(), + inputs: List = listOf(SearchInput(emptyList(), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + ) +} + +// Monitor of older versions without security. +fun randomQueryLevelMonitorWithoutUser( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + inputs: List = listOf(SearchInput(emptyList(), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = null, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + ) +} + +fun randomBucketLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User = randomUser(), + inputs: List = listOf( + SearchInput( + emptyList(), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) + ) + ), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomBucketLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.BUCKET_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + ) +} + +fun randomBucketLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User = randomUser(), + inputs: List = listOf( + SearchInput( + emptyList(), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) + ) + ), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomBucketLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false, + dataSources: DataSources +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.BUCKET_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), + dataSources = dataSources + ) +} + +fun randomClusterMetricsMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User = randomUser(), + inputs: List = listOf(randomClusterMetricsInput()), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.CLUSTER_METRICS_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() + ) +} + +fun randomDocumentLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + inputs: List = listOf(DocLevelMonitorInput("description", listOf("index"), emptyList())), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false, + owner: String? = null +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), owner = owner + ) +} + +fun randomDocumentLevelMonitor( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + inputs: List = listOf(DocLevelMonitorInput("description", listOf("index"), emptyList())), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + withMetadata: Boolean = false, + dataSources: DataSources, + owner: String? = null +): Monitor { + return Monitor( + name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, + schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, + uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), dataSources = dataSources, owner = owner + ) +} + +fun randomWorkflow( + id: String = Workflow.NO_ID, + monitorIds: List, + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + triggers: List = emptyList(), + auditDelegateMonitorAlerts: Boolean? = true +): Workflow { + val delegates = mutableListOf() + if (!monitorIds.isNullOrEmpty()) { + delegates.add(Delegate(1, monitorIds[0])) + for (i in 1 until monitorIds.size) { + // Order of monitors in workflow will be the same like forwarded meaning that the first monitorId will be used as second monitor chained finding + delegates.add(Delegate(i + 1, monitorIds [i], ChainedMonitorFindings(monitorIds[i - 1]))) + } + } + + return Workflow( + id = id, + name = name, + enabled = enabled, + schedule = schedule, + lastUpdateTime = lastUpdateTime, + enabledTime = enabledTime, + workflowType = WorkflowType.COMPOSITE, + user = user, + inputs = listOf(CompositeInput(Sequence(delegates))), + version = -1L, + schemaVersion = 0, + triggers = triggers, + auditDelegateMonitorAlerts = auditDelegateMonitorAlerts + ) +} + +fun randomWorkflowWithDelegates( + id: String = Workflow.NO_ID, + delegates: List, + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + user: User? = randomUser(), + schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), + enabled: Boolean = randomBoolean(), + enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, + lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + triggers: List = emptyList() +): Workflow { + return Workflow( + id = id, + name = name, + enabled = enabled, + schedule = schedule, + lastUpdateTime = lastUpdateTime, + enabledTime = enabledTime, + workflowType = WorkflowType.COMPOSITE, + user = user, + inputs = listOf(CompositeInput(Sequence(delegates))), + version = -1L, + schemaVersion = 0, + triggers = triggers + ) +} + +fun randomQueryLevelTrigger( + id: String = UUIDs.base64UUID(), + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + condition: Script = randomScript(), + actions: List = mutableListOf(), + destinationId: String = "" +): QueryLevelTrigger { + return QueryLevelTrigger( + id = id, + name = name, + severity = severity, + condition = condition, + actions = if (actions.isEmpty()) (0..randomInt(10)).map { randomAction(destinationId = destinationId) } else actions + ) +} + +fun randomBucketLevelTrigger( + id: String = UUIDs.base64UUID(), + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + bucketSelector: BucketSelectorExtAggregationBuilder = randomBucketSelectorExtAggregationBuilder(name = id), + actions: List = mutableListOf(), + destinationId: String = "" +): BucketLevelTrigger { + return BucketLevelTrigger( + id = id, + name = name, + severity = severity, + bucketSelector = bucketSelector, + actions = if (actions.isEmpty()) randomActionsForBucketLevelTrigger(destinationId = destinationId) else actions + ) +} + +fun randomActionsForBucketLevelTrigger(min: Int = 0, max: Int = 10, destinationId: String = ""): List = + (min..randomInt(max)).map { randomActionWithPolicy(destinationId = destinationId) } + +fun randomDocumentLevelTrigger( + id: String = UUIDs.base64UUID(), + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + condition: Script = randomScript(), + actions: List = mutableListOf(), + destinationId: String = "" +): DocumentLevelTrigger { + return DocumentLevelTrigger( + id = id, + name = name, + severity = severity, + condition = condition, + actions = if (actions.isEmpty() && destinationId.isNotBlank()) { + (0..randomInt(10)).map { randomAction(destinationId = destinationId) } + } else actions + ) +} + +fun randomBucketSelectorExtAggregationBuilder( + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + bucketsPathsMap: MutableMap = mutableMapOf("avg" to "10"), + script: Script = randomBucketSelectorScript(params = bucketsPathsMap), + parentBucketPath: String = "testPath", + filter: BucketSelectorExtFilter = BucketSelectorExtFilter(IncludeExclude("foo*", "bar*")) +): BucketSelectorExtAggregationBuilder { + return BucketSelectorExtAggregationBuilder(name, bucketsPathsMap, script, parentBucketPath, filter) +} + +fun randomBucketSelectorScript( + idOrCode: String = "params.avg >= 0", + params: Map = mutableMapOf("avg" to "10") +): Script { + return Script(Script.DEFAULT_SCRIPT_TYPE, Script.DEFAULT_SCRIPT_LANG, idOrCode, emptyMap(), params) +} + +fun randomEmailAccount( + salt: String = "", + name: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(10), + email: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(5) + "@email.com", + host: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(10), + port: Int = randomIntBetween(1, 100), + method: EmailAccount.MethodType = randomEmailAccountMethod(), + username: SecureString? = null, + password: SecureString? = null +): EmailAccount { + return EmailAccount( + name = name, + email = email, + host = host, + port = port, + method = method, + username = username, + password = password + ) +} + +fun randomEmailGroup( + salt: String = "", + name: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(10), + emails: List = (1..randomInt(10)).map { + EmailEntry(email = salt + OpenSearchRestTestCase.randomAlphaOfLength(5) + "@email.com") + } +): EmailGroup { + return EmailGroup(name = name, emails = emails) +} + +fun randomScript(source: String = "return " + OpenSearchRestTestCase.randomBoolean().toString()): Script = Script(source) + +val ADMIN = "admin" +val ALERTING_BASE_URI = "/_plugins/_alerting/monitors" +val WORKFLOW_ALERTING_BASE_URI = "/_plugins/_alerting/workflows" +val DESTINATION_BASE_URI = "/_plugins/_alerting/destinations" +val LEGACY_OPENDISTRO_ALERTING_BASE_URI = "/_opendistro/_alerting/monitors" +val LEGACY_OPENDISTRO_DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" +val ALWAYS_RUN = Script("return true") +val NEVER_RUN = Script("return false") +val DRYRUN_MONITOR = mapOf("dryrun" to "true") +val TEST_HR_INDEX = "hr_data" +val TEST_NON_HR_INDEX = "not_hr_data" +val TEST_HR_ROLE = "hr_role" +val TEST_HR_BACKEND_ROLE = "HR" + +// Using a triple-quote string for the query so escaped quotes are kept as-is +// in the request made using triple-quote strings (i.e. createIndexRoleWithDocLevelSecurity). +// Removing the escape slash in the request causes the security API role request to fail with parsing exception. +val TERM_DLS_QUERY = """{\"term\": { \"accessible\": true}}""" + +fun randomTemplateScript( + source: String, + params: Map = emptyMap() +): Script = Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, source, params) + +fun randomAction( + name: String = OpenSearchRestTestCase.randomUnicodeOfLength(10), + template: Script = randomTemplateScript("Hello World"), + destinationId: String = "", + throttleEnabled: Boolean = false, + throttle: Throttle = randomThrottle() +) = Action(name, destinationId, template, template, throttleEnabled, throttle, actionExecutionPolicy = null) + +fun randomActionWithPolicy( + name: String = OpenSearchRestTestCase.randomUnicodeOfLength(10), + template: Script = randomTemplateScript("Hello World"), + destinationId: String = "", + throttleEnabled: Boolean = false, + throttle: Throttle = randomThrottle(), + actionExecutionPolicy: ActionExecutionPolicy? = randomActionExecutionPolicy() +): Action { + return if (actionExecutionPolicy?.actionExecutionScope is PerExecutionActionScope) { + // Return null for throttle when using PerExecutionActionScope since throttling is currently not supported for it + Action(name, destinationId, template, template, throttleEnabled, null, actionExecutionPolicy = actionExecutionPolicy) + } else { + Action(name, destinationId, template, template, throttleEnabled, throttle, actionExecutionPolicy = actionExecutionPolicy) + } +} + +fun randomThrottle( + value: Int = randomIntBetween(60, 120), + unit: ChronoUnit = ChronoUnit.MINUTES +) = Throttle(value, unit) + +fun randomActionExecutionPolicy( + actionExecutionScope: ActionExecutionScope = randomActionExecutionScope() +) = ActionExecutionPolicy(actionExecutionScope) + +fun randomActionExecutionScope(): ActionExecutionScope { + return if (randomBoolean()) { + val alertCategories = AlertCategory.values() + PerAlertActionScope(actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet()) + } else { + PerExecutionActionScope() + } +} + +fun randomAlert(monitor: Monitor = randomQueryLevelMonitor()): Alert { + val trigger = randomQueryLevelTrigger() + val actionExecutionResults = mutableListOf(randomActionExecutionResult(), randomActionExecutionResult()) + return Alert( + monitor, + trigger, + Instant.now().truncatedTo(ChronoUnit.MILLIS), + null, + actionExecutionResults = actionExecutionResults + ) +} + +fun randomDocLevelQuery( + id: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + query: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + name: String = "${randomInt(5)}", + tags: List = mutableListOf(0..randomInt(10)).map { OpenSearchRestTestCase.randomAlphaOfLength(10) } +): DocLevelQuery { + return DocLevelQuery(id = id, query = query, name = name, tags = tags, fields = listOf()) +} + +fun randomDocLevelMonitorInput( + description: String = OpenSearchRestTestCase.randomAlphaOfLength(randomInt(10)), + indices: List = listOf(1..randomInt(10)).map { OpenSearchRestTestCase.randomAlphaOfLength(10) }, + queries: List = listOf(1..randomInt(10)).map { randomDocLevelQuery() } +): DocLevelMonitorInput { + return DocLevelMonitorInput(description = description, indices = indices, queries = queries) +} + +fun randomFinding( + id: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + relatedDocIds: List = listOf(OpenSearchRestTestCase.randomAlphaOfLength(10)), + monitorId: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + monitorName: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + index: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + docLevelQueries: List = listOf(randomDocLevelQuery()), + timestamp: Instant = Instant.now() +): Finding { + return Finding( + id = id, + relatedDocIds = relatedDocIds, + monitorId = monitorId, + monitorName = monitorName, + index = index, + docLevelQueries = docLevelQueries, + timestamp = timestamp + ) +} + +fun randomAlertWithAggregationResultBucket(monitor: Monitor = randomBucketLevelMonitor()): Alert { + val trigger = randomBucketLevelTrigger() + val actionExecutionResults = mutableListOf(randomActionExecutionResult(), randomActionExecutionResult()) + return Alert( + monitor, + trigger, + Instant.now().truncatedTo(ChronoUnit.MILLIS), + null, + actionExecutionResults = actionExecutionResults, + aggregationResultBucket = AggregationResultBucket( + "parent_bucket_path_1", + listOf("bucket_key_1"), + mapOf("k1" to "val1", "k2" to "val2") + ) + ) +} + +fun randomEmailAccountMethod(): EmailAccount.MethodType { + val methodValues = EmailAccount.MethodType.values().map { it.value } + val randomValue = methodValues[randomInt(methodValues.size - 1)] + return EmailAccount.MethodType.getByValue(randomValue)!! +} + +fun randomActionExecutionResult( + actionId: String = UUIDs.base64UUID(), + lastExecutionTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), + throttledCount: Int = randomInt() +) = ActionExecutionResult(actionId, lastExecutionTime, throttledCount) + +fun randomQueryLevelMonitorRunResult(): MonitorRunResult { + val triggerResults = mutableMapOf() + val triggerRunResult = randomQueryLevelTriggerRunResult() + triggerResults.plus(Pair("test", triggerRunResult)) + + return MonitorRunResult( + "test-monitor", + Instant.now(), + Instant.now(), + null, + randomInputRunResults(), + triggerResults + ) +} + +fun randomBucketLevelMonitorRunResult(): MonitorRunResult { + val triggerResults = mutableMapOf() + val triggerRunResult = randomBucketLevelTriggerRunResult() + triggerResults.plus(Pair("test", triggerRunResult)) + + return MonitorRunResult( + "test-monitor", + Instant.now(), + Instant.now(), + null, + randomInputRunResults(), + triggerResults + ) +} + +fun randomDocumentLevelMonitorRunResult(): MonitorRunResult { + val triggerResults = mutableMapOf() + val triggerRunResult = randomDocumentLevelTriggerRunResult() + triggerResults.plus(Pair("test", triggerRunResult)) + + return MonitorRunResult( + "test-monitor", + Instant.now(), + Instant.now(), + null, + randomInputRunResults(), + triggerResults + ) +} + +fun randomInputRunResults(): InputRunResults { + return InputRunResults(listOf(), null) +} + +fun randomQueryLevelTriggerRunResult(): QueryLevelTriggerRunResult { + val map = mutableMapOf() + map.plus(Pair("key1", randomActionRunResult())) + map.plus(Pair("key2", randomActionRunResult())) + return QueryLevelTriggerRunResult("trigger-name", true, null, map) +} + +fun randomClusterMetricsInput( + path: String = ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH.defaultPath, + pathParams: String = "", + url: String = "" +): ClusterMetricsInput { + return ClusterMetricsInput(path, pathParams, url) +} + +fun randomBucketLevelTriggerRunResult(): BucketLevelTriggerRunResult { + val map = mutableMapOf() + map.plus(Pair("key1", randomActionRunResult())) + map.plus(Pair("key2", randomActionRunResult())) + + val aggBucket1 = AggregationResultBucket( + "parent_bucket_path_1", + listOf("bucket_key_1"), + mapOf("k1" to "val1", "k2" to "val2") + ) + val aggBucket2 = AggregationResultBucket( + "parent_bucket_path_2", + listOf("bucket_key_2"), + mapOf("k1" to "val1", "k2" to "val2") + ) + + val actionResultsMap: MutableMap> = mutableMapOf() + actionResultsMap[aggBucket1.getBucketKeysHash()] = map + actionResultsMap[aggBucket2.getBucketKeysHash()] = map + + return BucketLevelTriggerRunResult( + "trigger-name", + null, + mapOf( + aggBucket1.getBucketKeysHash() to aggBucket1, + aggBucket2.getBucketKeysHash() to aggBucket2 + ), + actionResultsMap + ) +} + +fun randomDocumentLevelTriggerRunResult(): DocumentLevelTriggerRunResult { + val map = mutableMapOf() + map.plus(Pair("key1", randomActionRunResult())) + map.plus(Pair("key2", randomActionRunResult())) + return DocumentLevelTriggerRunResult( + "trigger-name", + mutableListOf(UUIDs.randomBase64UUID().toString()), + null, + mutableMapOf(Pair("alertId", map)) + ) +} + +fun randomActionRunResult(): ActionRunResult { + val map = mutableMapOf() + map.plus(Pair("key1", "val1")) + map.plus(Pair("key2", "val2")) + return ActionRunResult( + "1234", + "test-action", + map, + false, + Instant.now(), + null + ) +} + +fun Alert.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return this.toXContent(builder, ToXContent.EMPTY_PARAMS).string() +} + +fun randomUser(): User { + return User( + OpenSearchRestTestCase.randomAlphaOfLength(10), + listOf( + OpenSearchRestTestCase.randomAlphaOfLength(10), + OpenSearchRestTestCase.randomAlphaOfLength(10) + ), + listOf(OpenSearchRestTestCase.randomAlphaOfLength(10), ALL_ACCESS_ROLE), + listOf("test_attr=test") + ) +} + +fun randomUserEmpty(): User { + return User("", listOf(), listOf(), listOf()) +} + +fun EmailAccount.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return this.toXContent(builder).string() +} + +fun EmailGroup.toJsonString(): String { + val builder = XContentFactory.jsonBuilder() + return this.toXContent(builder).string() +} + +/** + * Wrapper for [RestClient.performRequest] which was deprecated in ES 6.5 and is used in tests. This provides + * a single place to suppress deprecation warnings. This will probably need further work when the API is removed entirely + * but that's an exercise for another day. + */ +@Suppress("DEPRECATION") +fun RestClient.makeRequest( + method: String, + endpoint: String, + params: Map = emptyMap(), + entity: HttpEntity? = null, + vararg headers: Header +): Response { + val request = Request(method, endpoint) + // TODO: remove PERMISSIVE option after moving system index access to REST API call + val options = RequestOptions.DEFAULT.toBuilder() + options.setWarningsHandler(WarningsHandler.PERMISSIVE) + headers.forEach { options.addHeader(it.name, it.value) } + request.options = options.build() + params.forEach { request.addParameter(it.key, it.value) } + if (entity != null) { + request.entity = entity + } + return performRequest(request) +} + +/** + * Wrapper for [RestClient.performRequest] which was deprecated in ES 6.5 and is used in tests. This provides + * a single place to suppress deprecation warnings. This will probably need further work when the API is removed entirely + * but that's an exercise for another day. + */ +@Suppress("DEPRECATION") +fun RestClient.makeRequest( + method: String, + endpoint: String, + entity: HttpEntity? = null, + vararg headers: Header +): Response { + val request = Request(method, endpoint) + val options = RequestOptions.DEFAULT.toBuilder() + // TODO: remove PERMISSIVE option after moving system index access to REST API call + options.setWarningsHandler(WarningsHandler.PERMISSIVE) + headers.forEach { options.addHeader(it.name, it.value) } + request.options = options.build() + if (entity != null) { + request.entity = entity + } + return performRequest(request) +} + +fun builder(): XContentBuilder { + return XContentBuilder.builder(XContentType.JSON.xContent()) +} + +fun parser(xc: String): XContentParser { + val parser = XContentType.JSON.xContent().createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, xc) + parser.nextToken() + return parser +} + +fun xContentRegistry(): NamedXContentRegistry { + return NamedXContentRegistry( + listOf( + SearchInput.XCONTENT_REGISTRY, + DocLevelMonitorInput.XCONTENT_REGISTRY, + QueryLevelTrigger.XCONTENT_REGISTRY, + BucketLevelTrigger.XCONTENT_REGISTRY, + DocumentLevelTrigger.XCONTENT_REGISTRY + ) + SearchModule(Settings.EMPTY, emptyList()).namedXContents + ) +} + +fun assertUserNull(map: Map) { + val user = map["user"] + assertNull("User is not null", user) +} + +fun assertUserNull(monitor: Monitor) { + assertNull("User is not null", monitor.user) +} + +fun assertUserNull(workflow: Workflow) { + assertNull("User is not null", workflow.user) +} + +fun randomChainedAlertTrigger( + id: String = UUIDs.base64UUID(), + name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), + severity: String = "1", + condition: Script = randomScript(), + actions: List = mutableListOf(), + destinationId: String = "" +): ChainedAlertTrigger { + return ChainedAlertTrigger( + id = id, + name = name, + severity = severity, + condition = condition, + actions = if (actions.isEmpty() && destinationId.isNotBlank()) { + (0..randomInt(10)).map { randomAction(destinationId = destinationId) } + } else actions + ) +} diff --git a/alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt b/alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt new file mode 100644 index 000000000..6076ebac6 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt @@ -0,0 +1,260 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting + +import org.junit.Before +import org.mockito.Mockito +import org.opensearch.alerting.model.BucketLevelTriggerRunResult +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.MonitorRunResult +import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext +import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.xcontent.DeprecationHandler +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.script.ScriptService +import org.opensearch.test.OpenSearchTestCase +import java.time.Instant + +class TriggerServiceTests : OpenSearchTestCase() { + private lateinit var scriptService: ScriptService + private lateinit var triggerService: TriggerService + + @Before + fun setup() { + scriptService = Mockito.mock(ScriptService::class.java) + triggerService = TriggerService(scriptService) + } + + fun `test run bucket level trigger with bucket key as int`() { + val bucketSelectorExtAggregationBuilder = randomBucketSelectorExtAggregationBuilder( + bucketsPathsMap = mutableMapOf("_count" to "_count", "_key" to "_key"), + script = randomScript(source = "params._count > 0"), + parentBucketPath = "status_code" + ) + val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) + val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) + + val inputResultsStr = "{\n" + + " \"_shards\": {\n" + + " \"total\": 1,\n" + + " \"failed\": 0,\n" + + " \"successful\": 1,\n" + + " \"skipped\": 0\n" + + " },\n" + + " \"hits\": {\n" + + " \"hits\": [\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 100000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 1,\n" + + " \"_score\": 1\n" + + " }\n" + + " ],\n" + + " \"total\": {\n" + + " \"value\": 4,\n" + + " \"relation\": \"eq\"\n" + + " },\n" + + " \"max_score\": 1\n" + + " },\n" + + " \"took\": 37,\n" + + " \"timed_out\": false,\n" + + " \"aggregations\": {\n" + + " \"status_code\": {\n" + + " \"doc_count_error_upper_bound\": 0,\n" + + " \"sum_other_doc_count\": 0,\n" + + " \"buckets\": [\n" + + " {\n" + + " \"doc_count\": 2,\n" + + " \"key\": 100\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": 102\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": 201\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"${trigger.id}\": {\n" + + " \"parent_bucket_path\": \"status_code\",\n" + + " \"bucket_indices\": [\n" + + " 0,\n" + + " 1,\n" + + " 2\n" + + " ]\n" + + " }\n" + + " }\n" + + "}" + + val parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inputResultsStr + ) + + val inputResults = parser.map() + + var monitorRunResult = MonitorRunResult(monitor.name, Instant.now(), Instant.now()) + monitorRunResult = monitorRunResult.copy(inputResults = InputRunResults(listOf(inputResults))) + val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger, monitorRunResult) + + val bucketLevelTriggerRunResult = triggerService.runBucketLevelTrigger(monitor, trigger, triggerCtx) + assertNull(bucketLevelTriggerRunResult.error) + } + + fun `test run bucket level trigger with bucket key as map`() { + val bucketSelectorExtAggregationBuilder = randomBucketSelectorExtAggregationBuilder( + bucketsPathsMap = mutableMapOf("_count" to "_count", "_key" to "_key"), + script = randomScript(source = "params._count > 0"), + parentBucketPath = "status_code" + ) + val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) + val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) + + val inputResultsStr = "{\n" + + " \"_shards\": {\n" + + " \"total\": 1,\n" + + " \"failed\": 0,\n" + + " \"successful\": 1,\n" + + " \"skipped\": 0\n" + + " },\n" + + " \"hits\": {\n" + + " \"hits\": [\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 100000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 1,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 102,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 160000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 2,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 100,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 0,\n" + + " \"timestamp\": 220000,\n" + + " \"http_1xx\": 1\n" + + " },\n" + + " \"_id\": 4,\n" + + " \"_score\": 1\n" + + " },\n" + + " {\n" + + " \"_index\": \"sample-http-responses\",\n" + + " \"_type\": \"http\",\n" + + " \"_source\": {\n" + + " \"status_code\": 201,\n" + + " \"http_4xx\": 0,\n" + + " \"http_3xx\": 0,\n" + + " \"http_5xx\": 0,\n" + + " \"http_2xx\": 1,\n" + + " \"timestamp\": 280000,\n" + + " \"http_1xx\": 0\n" + + " },\n" + + " \"_id\": 5,\n" + + " \"_score\": 1\n" + + " }\n" + + " ],\n" + + " \"total\": {\n" + + " \"value\": 4,\n" + + " \"relation\": \"eq\"\n" + + " },\n" + + " \"max_score\": 1\n" + + " },\n" + + " \"took\": 15,\n" + + " \"timed_out\": false,\n" + + " \"aggregations\": {\n" + + " \"${trigger.id}\": {\n" + + " \"parent_bucket_path\": \"status_code\",\n" + + " \"bucket_indices\": [\n" + + " 0,\n" + + " 1,\n" + + " 2\n" + + " ]\n" + + " },\n" + + " \"status_code\": {\n" + + " \"buckets\": [\n" + + " {\n" + + " \"doc_count\": 2,\n" + + " \"key\": {\n" + + " \"status_code\": 100\n" + + " }\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": {\n" + + " \"status_code\": 102\n" + + " }\n" + + " },\n" + + " {\n" + + " \"doc_count\": 1,\n" + + " \"key\": {\n" + + " \"status_code\": 201\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"after_key\": {\n" + + " \"status_code\": 201\n" + + " }\n" + + " }\n" + + " }\n" + + "}" + + val parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + inputResultsStr + ) + + val inputResults = parser.map() + + var monitorRunResult = MonitorRunResult(monitor.name, Instant.now(), Instant.now()) + monitorRunResult = monitorRunResult.copy(inputResults = InputRunResults(listOf(inputResults))) + val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger, monitorRunResult) + + val bucketLevelTriggerRunResult = triggerService.runBucketLevelTrigger(monitor, trigger, triggerCtx) + assertNull(bucketLevelTriggerRunResult.error) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt new file mode 100644 index 000000000..d284fc70e --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt @@ -0,0 +1,15 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting.action + +import org.opensearch.test.OpenSearchTestCase + +class ExecuteMonitorActionTests : OpenSearchTestCase() { + + fun `test execute monitor action name`() { + assertNotNull(ExecuteMonitorAction.INSTANCE.name()) + assertEquals(ExecuteMonitorAction.INSTANCE.name(), ExecuteMonitorAction.NAME) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt new file mode 100644 index 000000000..f54b6fea6 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt @@ -0,0 +1,47 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase + +class ExecuteMonitorRequestTests : OpenSearchTestCase() { + + fun `test execute monitor request with id`() { + + val req = ExecuteMonitorRequest(false, TimeValue.timeValueSeconds(100L), "1234", null) + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = ExecuteMonitorRequest(sin) + assertEquals("1234", newReq.monitorId) + assertEquals(false, newReq.dryrun) + assertNull(newReq.monitor) + assertEquals(req.monitor, newReq.monitor) + } + + fun `test execute monitor request with monitor`() { + val monitor = randomQueryLevelMonitor().copy(inputs = listOf(SearchInput(emptyList(), SearchSourceBuilder()))) + val req = ExecuteMonitorRequest(false, TimeValue.timeValueSeconds(100L), null, monitor) + assertNotNull(req.monitor) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = ExecuteMonitorRequest(sin) + assertNull(newReq.monitorId) + assertEquals(false, newReq.dryrun) + assertNotNull(newReq.monitor) + assertEquals(req.monitor, newReq.monitor) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt new file mode 100644 index 000000000..10ccd7038 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt @@ -0,0 +1,42 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.junit.Assert +import org.opensearch.alerting.randomBucketLevelMonitorRunResult +import org.opensearch.alerting.randomQueryLevelMonitorRunResult +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.test.OpenSearchTestCase + +class ExecuteMonitorResponseTests : OpenSearchTestCase() { + + fun `test exec query-level monitor response`() { + val req = ExecuteMonitorResponse(randomQueryLevelMonitorRunResult()) + Assert.assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = ExecuteMonitorResponse(sin) + assertNotNull(newReq.monitorRunResult) + assertEquals("test-monitor", newReq.monitorRunResult.monitorName) + assertNotNull(newReq.monitorRunResult.inputResults) + } + + fun `test exec bucket-level monitor response`() { + val req = ExecuteMonitorResponse(randomBucketLevelMonitorRunResult()) + Assert.assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = ExecuteMonitorResponse(sin) + assertNotNull(newReq.monitorRunResult) + assertEquals("test-monitor", newReq.monitorRunResult.monitorName) + assertNotNull(newReq.monitorRunResult.inputResults) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt new file mode 100644 index 000000000..c18e33790 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.test.OpenSearchTestCase + +class GetDestinationsActionTests : OpenSearchTestCase() { + + fun `test get destinations action name`() { + assertNotNull(GetDestinationsAction.INSTANCE.name()) + assertEquals(GetDestinationsAction.INSTANCE.name(), GetDestinationsAction.NAME) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt new file mode 100644 index 000000000..7c76621f9 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt @@ -0,0 +1,83 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.commons.alerting.model.Table +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.test.OpenSearchTestCase + +class GetDestinationsRequestTests : OpenSearchTestCase() { + + fun `test get destination request`() { + + val table = Table("asc", "sortString", null, 1, 0, "") + val req = GetDestinationsRequest("1234", 1L, FetchSourceContext.FETCH_SOURCE, table, "slack") + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetDestinationsRequest(sin) + assertEquals("1234", newReq.destinationId) + assertEquals(1L, newReq.version) + assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) + assertEquals(table, newReq.table) + assertEquals("slack", newReq.destinationType) + } + + fun `test get destination request without src context`() { + + val table = Table("asc", "sortString", null, 1, 0, "") + val req = GetDestinationsRequest("1234", 1L, null, table, "slack") + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetDestinationsRequest(sin) + assertEquals("1234", newReq.destinationId) + assertEquals(1L, newReq.version) + assertEquals(null, newReq.srcContext) + assertEquals(table, newReq.table) + assertEquals("slack", newReq.destinationType) + } + + fun `test get destination request without destinationId`() { + + val table = Table("asc", "sortString", null, 1, 0, "") + val req = GetDestinationsRequest(null, 1L, FetchSourceContext.FETCH_SOURCE, table, "slack") + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetDestinationsRequest(sin) + assertEquals(null, newReq.destinationId) + assertEquals(1L, newReq.version) + assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) + assertEquals(table, newReq.table) + assertEquals("slack", newReq.destinationType) + } + + fun `test get destination request with filter`() { + + val table = Table("asc", "sortString", null, 1, 0, "") + val req = GetDestinationsRequest(null, 1L, FetchSourceContext.FETCH_SOURCE, table, "slack") + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetDestinationsRequest(sin) + assertEquals(null, newReq.destinationId) + assertEquals(1L, newReq.version) + assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) + assertEquals(table, newReq.table) + assertEquals("slack", newReq.destinationType) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt new file mode 100644 index 000000000..ed837bdce --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt @@ -0,0 +1,62 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.Slack +import org.opensearch.alerting.util.DestinationType +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.OpenSearchTestCase +import java.time.Instant +import java.util.Collections + +class GetDestinationsResponseTests : OpenSearchTestCase() { + + fun `test get destination response with no destinations`() { + val req = GetDestinationsResponse(RestStatus.OK, 0, Collections.emptyList()) + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetDestinationsResponse(sin) + assertEquals(0, newReq.totalDestinations) + assertTrue(newReq.destinations.isEmpty()) + assertEquals(RestStatus.OK, newReq.status) + } + + fun `test get destination response with a destination`() { + val slack = Slack("url") + val destination = Destination( + "id", + 0L, + 0, + 0, + 0, + DestinationType.SLACK, + "name", + null, + Instant.MIN, + null, + slack, + null, + null + ) + + val req = GetDestinationsResponse(RestStatus.OK, 1, listOf(destination)) + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetDestinationsResponse(sin) + assertEquals(1, newReq.totalDestinations) + assertEquals(destination, newReq.destinations[0]) + assertEquals(RestStatus.OK, newReq.status) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt new file mode 100644 index 000000000..cb26b182e --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.test.OpenSearchTestCase + +class GetEmailAccountActionTests : OpenSearchTestCase() { + + fun `test get email account name`() { + assertNotNull(GetEmailAccountAction.INSTANCE.name()) + assertEquals(GetEmailAccountAction.INSTANCE.name(), GetEmailAccountAction.NAME) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt new file mode 100644 index 000000000..02631a38b --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt @@ -0,0 +1,45 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.rest.RestRequest +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.test.OpenSearchTestCase + +class GetEmailAccountRequestTests : OpenSearchTestCase() { + + fun `test get email account request`() { + + val req = GetEmailAccountRequest("1234", 1L, RestRequest.Method.GET, FetchSourceContext.FETCH_SOURCE) + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetEmailAccountRequest(sin) + assertEquals("1234", newReq.emailAccountID) + assertEquals(1L, newReq.version) + assertEquals(RestRequest.Method.GET, newReq.method) + assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) + } + + fun `test head email account request`() { + + val req = GetEmailAccountRequest("1234", 2L, RestRequest.Method.HEAD, FetchSourceContext.FETCH_SOURCE) + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetEmailAccountRequest(sin) + assertEquals("1234", newReq.emailAccountID) + assertEquals(2L, newReq.version) + assertEquals(RestRequest.Method.HEAD, newReq.method) + assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt new file mode 100644 index 000000000..ed60c3439 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt @@ -0,0 +1,47 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.randomEmailAccount +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.OpenSearchTestCase + +class GetEmailAccountResponseTests : OpenSearchTestCase() { + + fun `test get email account response`() { + + val res = GetEmailAccountResponse("1234", 1L, 2L, 0L, RestStatus.OK, null) + assertNotNull(res) + + val out = BytesStreamOutput() + res.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRes = GetEmailAccountResponse(sin) + assertEquals("1234", newRes.id) + assertEquals(1L, newRes.version) + assertEquals(RestStatus.OK, newRes.status) + assertEquals(null, newRes.emailAccount) + } + + fun `test get email account with email account`() { + + val emailAccount = randomEmailAccount(name = "test_email_account") + val res = GetEmailAccountResponse("1234", 1L, 2L, 0L, RestStatus.OK, emailAccount) + assertNotNull(res) + + val out = BytesStreamOutput() + res.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRes = GetEmailAccountResponse(sin) + assertEquals("1234", newRes.id) + assertEquals(1L, newRes.version) + assertEquals(RestStatus.OK, newRes.status) + assertNotNull(newRes.emailAccount) + assertEquals("test_email_account", newRes.emailAccount?.name) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt new file mode 100644 index 000000000..647de76c3 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.test.OpenSearchTestCase + +class GetEmailGroupActionTests : OpenSearchTestCase() { + + fun `test get email group name`() { + assertNotNull(GetEmailGroupAction.INSTANCE.name()) + assertEquals(GetEmailGroupAction.INSTANCE.name(), GetEmailGroupAction.NAME) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt new file mode 100644 index 000000000..7fa8b2037 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt @@ -0,0 +1,45 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.rest.RestRequest +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.test.OpenSearchTestCase + +class GetEmailGroupRequestTests : OpenSearchTestCase() { + + fun `test get email group request`() { + + val req = GetEmailGroupRequest("1234", 1L, RestRequest.Method.GET, FetchSourceContext.FETCH_SOURCE) + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetEmailGroupRequest(sin) + assertEquals("1234", newReq.emailGroupID) + assertEquals(1L, newReq.version) + assertEquals(RestRequest.Method.GET, newReq.method) + assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) + } + + fun `test head email group request`() { + + val req = GetEmailGroupRequest("1234", 1L, RestRequest.Method.HEAD, FetchSourceContext.FETCH_SOURCE) + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetEmailGroupRequest(sin) + assertEquals("1234", newReq.emailGroupID) + assertEquals(1L, newReq.version) + assertEquals(RestRequest.Method.HEAD, newReq.method) + assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt new file mode 100644 index 000000000..19612fe4a --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt @@ -0,0 +1,47 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.alerting.randomEmailGroup +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.OpenSearchTestCase + +class GetEmailGroupResponseTests : OpenSearchTestCase() { + + fun `test get email group response`() { + + val res = GetEmailGroupResponse("1234", 1L, 2L, 0L, RestStatus.OK, null) + assertNotNull(res) + + val out = BytesStreamOutput() + res.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRes = GetEmailGroupResponse(sin) + assertEquals("1234", newRes.id) + assertEquals(1L, newRes.version) + assertEquals(RestStatus.OK, newRes.status) + assertEquals(null, newRes.emailGroup) + } + + fun `test get email group with email group`() { + + val emailGroup = randomEmailGroup(name = "test-email-group") + val res = GetEmailGroupResponse("1234", 1L, 2L, 0L, RestStatus.OK, emailGroup) + assertNotNull(res) + + val out = BytesStreamOutput() + res.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRes = GetEmailGroupResponse(sin) + assertEquals("1234", newRes.id) + assertEquals(1L, newRes.version) + assertEquals(RestStatus.OK, newRes.status) + assertNotNull(newRes.emailGroup) + assertEquals("test-email-group", newRes.emailGroup?.name) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt new file mode 100644 index 000000000..d1bd6f7e3 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt @@ -0,0 +1,41 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.model.Table +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.test.OpenSearchTestCase + +class GetFindingsRequestTests : OpenSearchTestCase() { + + fun `test get findings request`() { + + val table = Table("asc", "sortString", null, 1, 0, "") + + val req = GetFindingsRequest("2121", table, "1", "finding_index_name") + assertNotNull(req) + + val out = BytesStreamOutput() + req.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newReq = GetFindingsRequest(sin) + + assertEquals("1", newReq.monitorId) + assertEquals("2121", newReq.findingId) + assertEquals("finding_index_name", newReq.findingIndex) + assertEquals(table, newReq.table) + } + + fun `test validate returns null`() { + val table = Table("asc", "sortString", null, 1, 0, "") + + val req = GetFindingsRequest("2121", table, "1", "active") + assertNotNull(req) + assertNull(req.validate()) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt new file mode 100644 index 000000000..14942c977 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.test.OpenSearchTestCase + +class SearchEmailAccountActionTests : OpenSearchTestCase() { + + fun `test search email account action name`() { + assertNotNull(SearchEmailAccountAction.INSTANCE.name()) + assertEquals(SearchEmailAccountAction.INSTANCE.name(), SearchEmailAccountAction.NAME) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt new file mode 100644 index 000000000..6cd01cf9d --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt @@ -0,0 +1,16 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.action + +import org.opensearch.test.OpenSearchTestCase + +class SearchEmailGroupActionTests : OpenSearchTestCase() { + + fun `test search email group action name`() { + assertNotNull(SearchEmailGroupAction.INSTANCE.name()) + assertEquals(SearchEmailGroupAction.INSTANCE.name(), SearchEmailGroupAction.NAME) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt b/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt new file mode 100644 index 000000000..60021e20b --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt @@ -0,0 +1,56 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.aggregation.bucketselectorext + +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter +import org.opensearch.plugins.SearchPlugin +import org.opensearch.script.Script +import org.opensearch.script.ScriptType +import org.opensearch.search.aggregations.BasePipelineAggregationTestCase +import org.opensearch.search.aggregations.bucket.terms.IncludeExclude +import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy + +class BucketSelectorExtAggregationBuilderTests : BasePipelineAggregationTestCase() { + override fun plugins(): List { + return listOf(AlertingPlugin()) + } + + override fun createTestAggregatorFactory(): BucketSelectorExtAggregationBuilder { + val name = randomAlphaOfLengthBetween(3, 20) + val bucketsPaths: MutableMap = HashMap() + val numBucketPaths = randomIntBetween(1, 10) + for (i in 0 until numBucketPaths) { + bucketsPaths[randomAlphaOfLengthBetween(1, 20)] = randomAlphaOfLengthBetween(1, 40) + } + val script: Script + if (randomBoolean()) { + script = mockScript("script") + } else { + val params: MutableMap = HashMap() + if (randomBoolean()) { + params["foo"] = "bar" + } + val type = randomFrom(*ScriptType.values()) + script = Script( + type, + if (type == ScriptType.STORED) null else randomFrom("my_lang", Script.DEFAULT_SCRIPT_LANG), + "script", params + ) + } + val parentBucketPath = randomAlphaOfLengthBetween(3, 20) + val filter = BucketSelectorExtFilter(IncludeExclude("foo.*", "bar.*")) + val factory = BucketSelectorExtAggregationBuilder( + name, bucketsPaths, + script, parentBucketPath, filter + ) + if (randomBoolean()) { + factory.gapPolicy(randomFrom(*GapPolicy.values())) + } + return factory + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt b/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt new file mode 100644 index 000000000..257a0a705 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt @@ -0,0 +1,371 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.aggregation.bucketselectorext + +import org.apache.lucene.document.Document +import org.apache.lucene.document.SortedNumericDocValuesField +import org.apache.lucene.document.SortedSetDocValuesField +import org.apache.lucene.index.DirectoryReader +import org.apache.lucene.search.MatchAllDocsQuery +import org.apache.lucene.search.Query +import org.apache.lucene.tests.index.RandomIndexWriter +import org.apache.lucene.util.BytesRef +import org.hamcrest.CoreMatchers +import org.opensearch.common.CheckedConsumer +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices +import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType +import org.opensearch.index.mapper.MappedFieldType +import org.opensearch.index.mapper.NumberFieldMapper +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType +import org.opensearch.index.query.MatchAllQueryBuilder +import org.opensearch.script.MockScriptEngine +import org.opensearch.script.Script +import org.opensearch.script.ScriptEngine +import org.opensearch.script.ScriptModule +import org.opensearch.script.ScriptService +import org.opensearch.script.ScriptType +import org.opensearch.search.aggregations.Aggregation +import org.opensearch.search.aggregations.Aggregator +import org.opensearch.search.aggregations.AggregatorTestCase +import org.opensearch.search.aggregations.bucket.filter.FilterAggregationBuilder +import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder +import org.opensearch.search.aggregations.bucket.filter.InternalFilter +import org.opensearch.search.aggregations.bucket.filter.InternalFilters +import org.opensearch.search.aggregations.bucket.terms.IncludeExclude +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder +import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder +import java.io.IOException +import java.util.Collections +import java.util.function.Consumer +import java.util.function.Function + +class BucketSelectorExtAggregatorTests : AggregatorTestCase() { + + private var SCRIPTNAME = "bucket_selector_script" + private var paramName = "the_avg" + private var paramValue = 19.0 + + override fun getMockScriptService(): ScriptService { + + val scriptEngine = MockScriptEngine( + MockScriptEngine.NAME, + Collections.singletonMap( + SCRIPTNAME, + Function, Any> { script: Map -> + script[paramName].toString().toDouble() == paramValue + } + ), + emptyMap() + ) + val engines: Map = Collections.singletonMap(scriptEngine.type, scriptEngine) + return ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS) + } + + @Throws(Exception::class) + fun `test bucket selector script`() { + val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) + val fieldType1: MappedFieldType = KeywordFieldType("the_field") + + val filters: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) + .subAggregation( + TermsAggregationBuilder("the_terms").field("the_field") + .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) + ) + .subAggregation( + BucketSelectorExtAggregationBuilder( + "test_bucket_selector_ext", + Collections.singletonMap("the_avg", "the_avg.value"), + Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), + "the_terms", + null + ) + ) + paramName = "the_avg" + paramValue = 19.0 + testCase( + filters, MatchAllDocsQuery(), + CheckedConsumer { iw: RandomIndexWriter -> + var doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) + doc.add(SortedNumericDocValuesField("number_field", 20)) + iw.addDocument(doc) + doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) + doc.add(SortedNumericDocValuesField("number_field", 19)) + iw.addDocument(doc) + }, + Consumer { f: InternalFilters -> + assertThat( + (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], + CoreMatchers.equalTo(1) + ) + }, + fieldType, fieldType1 + ) + } + + @Throws(Exception::class) + fun `test bucket selector filter include`() { + val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) + val fieldType1: MappedFieldType = KeywordFieldType("the_field") + + val selectorAgg1: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) + .subAggregation( + TermsAggregationBuilder("the_terms").field("the_field") + .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) + ) + .subAggregation( + BucketSelectorExtAggregationBuilder( + "test_bucket_selector_ext", + Collections.singletonMap("the_avg", "the_avg.value"), + Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), + "the_terms", + BucketSelectorExtFilter(IncludeExclude(arrayOf("test1"), arrayOf())) + ) + ) + + val selectorAgg2: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) + .subAggregation( + TermsAggregationBuilder("the_terms").field("the_field") + .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) + ) + .subAggregation( + BucketSelectorExtAggregationBuilder( + "test_bucket_selector_ext", + Collections.singletonMap("the_avg", "the_avg.value"), + Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), + "the_terms", + BucketSelectorExtFilter(IncludeExclude(arrayOf("test2"), arrayOf())) + ) + ) + + paramName = "the_avg" + paramValue = 19.0 + + testCase( + selectorAgg1, MatchAllDocsQuery(), + CheckedConsumer { iw: RandomIndexWriter -> + var doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) + doc.add(SortedNumericDocValuesField("number_field", 20)) + iw.addDocument(doc) + doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) + doc.add(SortedNumericDocValuesField("number_field", 19)) + iw.addDocument(doc) + }, + Consumer { f: InternalFilters -> + assertThat( + (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices.size, + CoreMatchers.equalTo(0) + ) + }, + fieldType, fieldType1 + ) + + testCase( + selectorAgg2, MatchAllDocsQuery(), + CheckedConsumer { iw: RandomIndexWriter -> + var doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) + doc.add(SortedNumericDocValuesField("number_field", 20)) + iw.addDocument(doc) + doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) + doc.add(SortedNumericDocValuesField("number_field", 19)) + iw.addDocument(doc) + }, + Consumer { f: InternalFilters -> + assertThat( + (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], + CoreMatchers.equalTo(1) + ) + }, + fieldType, fieldType1 + ) + } + + @Throws(Exception::class) + fun `test bucket selector filter exclude`() { + val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) + val fieldType1: MappedFieldType = KeywordFieldType("the_field") + + val selectorAgg1: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) + .subAggregation( + TermsAggregationBuilder("the_terms").field("the_field") + .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) + ) + .subAggregation( + BucketSelectorExtAggregationBuilder( + "test_bucket_selector_ext", + Collections.singletonMap("the_avg", "the_avg.value"), + Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), + "the_terms", + BucketSelectorExtFilter(IncludeExclude(arrayOf(), arrayOf("test2"))) + ) + ) + paramName = "the_avg" + paramValue = 19.0 + testCase( + selectorAgg1, MatchAllDocsQuery(), + CheckedConsumer { iw: RandomIndexWriter -> + var doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) + doc.add(SortedNumericDocValuesField("number_field", 20)) + iw.addDocument(doc) + doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) + doc.add(SortedNumericDocValuesField("number_field", 19)) + iw.addDocument(doc) + }, + Consumer { f: InternalFilters -> + assertThat( + (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices.size, + CoreMatchers.equalTo(0) + ) + }, + fieldType, fieldType1 + ) + } + + @Throws(Exception::class) + fun `test bucket selector filter numeric key`() { + val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) + val fieldType1: MappedFieldType = KeywordFieldType("the_field") + + val selectorAgg1: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) + .subAggregation( + TermsAggregationBuilder("number_agg").field("number_field") + .subAggregation(ValueCountAggregationBuilder("count").field("number_field")) + ) + .subAggregation( + BucketSelectorExtAggregationBuilder( + "test_bucket_selector_ext", + Collections.singletonMap("count", "count"), + Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), + "number_agg", + BucketSelectorExtFilter(IncludeExclude(doubleArrayOf(19.0), doubleArrayOf())) + ) + ) + + paramName = "count" + paramValue = 1.0 + testCase( + selectorAgg1, MatchAllDocsQuery(), + CheckedConsumer { iw: RandomIndexWriter -> + var doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) + doc.add(SortedNumericDocValuesField("number_field", 20)) + iw.addDocument(doc) + doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) + doc.add(SortedNumericDocValuesField("number_field", 19)) + iw.addDocument(doc) + }, + Consumer { f: InternalFilters -> + assertThat( + (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], + CoreMatchers.equalTo(0) + ) + }, + fieldType, fieldType1 + ) + } + + @Throws(Exception::class) + fun `test bucket selector nested parent path`() { + val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) + val fieldType1: MappedFieldType = KeywordFieldType("the_field") + + val selectorAgg1: FilterAggregationBuilder = FilterAggregationBuilder("placeholder", MatchAllQueryBuilder()) + .subAggregation( + FilterAggregationBuilder("parent_agg", MatchAllQueryBuilder()) + .subAggregation( + TermsAggregationBuilder("term_agg").field("the_field") + .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) + ) + ) + .subAggregation( + BucketSelectorExtAggregationBuilder( + "test_bucket_selector_ext", + Collections.singletonMap("the_avg", "the_avg.value"), + Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), + "parent_agg>term_agg", + null + ) + ) + paramName = "the_avg" + paramValue = 19.0 + testCaseInternalFilter( + selectorAgg1, MatchAllDocsQuery(), + CheckedConsumer { iw: RandomIndexWriter -> + var doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) + + doc.add(SortedNumericDocValuesField("number_field", 20)) + iw.addDocument(doc) + doc = Document() + doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) + + doc.add(SortedNumericDocValuesField("number_field", 19)) + iw.addDocument(doc) + }, + Consumer { f: InternalFilter -> + assertThat( + (f.aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], + CoreMatchers.equalTo(1) + ) + }, + fieldType, fieldType1 + ) + } + + @Throws(IOException::class) + private fun testCase( + aggregationBuilder: FiltersAggregationBuilder, + query: Query, + buildIndex: CheckedConsumer, + verify: Consumer, + vararg fieldType: MappedFieldType + ) { + newDirectory().use { directory -> + val indexWriter = RandomIndexWriter(random(), directory) + buildIndex.accept(indexWriter) + indexWriter.close() + DirectoryReader.open(directory).use { indexReader -> + val indexSearcher = newIndexSearcher(indexReader) + val filters: InternalFilters + filters = searchAndReduce(indexSearcher, query, aggregationBuilder, *fieldType) + verify.accept(filters) + } + } + } + + @Throws(IOException::class) + private fun testCaseInternalFilter( + aggregationBuilder: FilterAggregationBuilder, + query: Query, + buildIndex: CheckedConsumer, + verify: Consumer, + vararg fieldType: MappedFieldType + ) { + newDirectory().use { directory -> + val indexWriter = RandomIndexWriter(random(), directory) + buildIndex.accept(indexWriter) + indexWriter.close() + DirectoryReader.open(directory).use { indexReader -> + val indexSearcher = newIndexSearcher(indexReader) + val filters: InternalFilter + filters = searchAndReduce(indexSearcher, query, aggregationBuilder, *fieldType) + verify.accept(filters) + } + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt b/alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt new file mode 100644 index 000000000..da402c5b7 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt @@ -0,0 +1,371 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.alerts + +import org.apache.http.entity.ContentType.APPLICATION_JSON +import org.apache.http.entity.StringEntity +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.NEVER_RUN +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.alerting.randomQueryLevelTrigger +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.json.JsonXContent.jsonXContent +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.OpenSearchTestCase +import java.util.concurrent.TimeUnit + +class AlertIndicesIT : AlertingRestTestCase() { + + fun `test create alert index`() { + executeMonitor(randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN)))) + + assertIndexExists(AlertIndices.ALERT_INDEX) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + } + + fun `test create finding index`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + + executeMonitor(monitor.id) + + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + } + + fun `test update alert index mapping with new schema version`() { + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) + assertIndexDoesNotExist(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + + putAlertMappings( + AlertIndices.alertMapping().trimStart('{').trimEnd('}') + .replace("\"schema_version\": 5", "\"schema_version\": 0") + ) + assertIndexExists(AlertIndices.ALERT_INDEX) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 0) + verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 0) + wipeAllODFEIndices() + executeMonitor(createRandomMonitor()) + assertIndexExists(AlertIndices.ALERT_INDEX) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 8) + verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 5) + verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 5) + } + + fun `test update finding index mapping with new schema version`() { + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + + putFindingMappings( + AlertIndices.findingMapping().trimStart('{').trimEnd('}') + .replace("\"schema_version\": 4", "\"schema_version\": 0") + ) + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 0) + wipeAllODFEIndices() + + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id) + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 8) + verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 4) + } + + fun `test alert index gets recreated automatically if deleted`() { + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) + val trueMonitor = randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN))) + + executeMonitor(trueMonitor) + assertIndexExists(AlertIndices.ALERT_INDEX) + assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) + assertIndexDoesNotExist(AlertIndices.ALERT_HISTORY_WRITE_INDEX) + + val executeResponse = executeMonitor(trueMonitor) + val xcp = createParser(XContentType.JSON.xContent(), executeResponse.entity.content) + val output = xcp.map() + assertNull("Error running a monitor after wiping alert indices", output["error"]) + } + + fun `test finding index gets recreated automatically if deleted`() { + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + + executeMonitor(trueMonitor.id) + assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + wipeAllODFEIndices() + assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) + + createTestIndex(testIndex) + val executeResponse = executeMonitor(trueMonitor) + val xcp = createParser(XContentType.JSON.xContent(), executeResponse.entity.content) + val output = xcp.map() + assertNull("Error running a monitor after wiping finding indices", output["error"]) + } + + fun `test rollover alert history index`() { + // Update the rollover check to be every 1 second and the index max age to be 1 second + client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") + client().updateSettings(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.key, "1s") + + val trueMonitor = randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN))) + executeMonitor(trueMonitor) + + // Allow for a rollover index. + Thread.sleep(2000) + assertTrue("Did not find 3 alert indices", getAlertIndices().size >= 3) + } + + fun `test rollover finding history index`() { + // Update the rollover check to be every 1 second and the index max age to be 1 second + client().updateSettings(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.key, "1s") + client().updateSettings(AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.key, "1s") + + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id) + + // Allow for a rollover index. + Thread.sleep(2000) + assertTrue("Did not find 2 alert indices", getFindingIndices().size >= 2) + } + + fun `test alert history disabled`() { + resetHistorySettings() + + val trigger1 = randomQueryLevelTrigger(condition = ALWAYS_RUN) + val monitor1 = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger1))) + executeMonitor(monitor1.id) + + // Check if alert is active + val activeAlert1 = searchAlerts(monitor1) + assertEquals("1 alert should be active", 1, activeAlert1.size) + + // Change trigger and re-execute monitor to mark alert as COMPLETED + updateMonitor(monitor1.copy(triggers = listOf(trigger1.copy(condition = NEVER_RUN)), id = monitor1.id), true) + executeMonitor(monitor1.id) + + val completedAlert1 = searchAlerts(monitor1, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() + assertNotNull("Alert is not completed", completedAlert1.endTime) + + assertEquals(1, getAlertHistoryDocCount()) + + // Disable alert history + client().updateSettings(AlertingSettings.ALERT_HISTORY_ENABLED.key, "false") + + val trigger2 = randomQueryLevelTrigger(condition = ALWAYS_RUN) + val monitor2 = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger2))) + executeMonitor(monitor2.id) + + // Check if second alert is active + val activeAlert2 = searchAlerts(monitor2) + assertEquals("1 alert should be active", 1, activeAlert2.size) + + // Mark second alert as COMPLETED + updateMonitor(monitor2.copy(triggers = listOf(trigger2.copy(condition = NEVER_RUN)), id = monitor2.id), true) + executeMonitor(monitor2.id) + + // For the second alert, since history is now disabled, searching for the completed alert should return an empty List + // since a COMPLETED alert will be removed from the alert index and not added to the history index + val completedAlert2 = searchAlerts(monitor2, AlertIndices.ALL_ALERT_INDEX_PATTERN) + assertTrue("Alert is not completed", completedAlert2.isEmpty()) + + // Get history entry count again and ensure the new alert was not added + assertEquals(1, getAlertHistoryDocCount()) + } + + fun `test short retention period`() { + resetHistorySettings() + + // Create monitor and execute + val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + executeMonitor(monitor.id) + + // Check if alert is active and alert index is created + val activeAlert = searchAlerts(monitor) + assertEquals("1 alert should be active", 1, activeAlert.size) + + waitUntil { return@waitUntil getAlertIndices().size == 2 } + + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) + // History index is created but is empty + assertEquals(0, getAlertHistoryDocCount()) + + // Mark alert as COMPLETED + updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = NEVER_RUN)), id = monitor.id), true) + executeMonitor(monitor.id) + + // Verify alert is completed + val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() + assertNotNull("Alert is not completed", completedAlert.endTime) + + // The completed alert should be removed from the active alert index and added to the history index + assertEquals(1, getAlertHistoryDocCount()) + + // Update rollover check and max docs as well as decreasing the retention period + client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") + client().updateSettings(AlertingSettings.ALERT_HISTORY_MAX_DOCS.key, 1) + client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "1s") + + // Give some time for history to be rolled over and cleared + OpenSearchTestCase.waitUntil({ + val alertIndices = getAlertIndices().size + val docCount = getAlertHistoryDocCount() + if (alertIndices > 2 || docCount > 0) { + return@waitUntil false + } + return@waitUntil true + }, 30, TimeUnit.SECONDS) + + // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. + // This leaves two indices: alert index and an empty history write index + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) + assertEquals(0, getAlertHistoryDocCount()) + } + + fun `test short finding retention period`() { + resetHistorySettings() + + // Create monitor and execute + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "1", testDoc) + + executeMonitor(monitor.id) + + // Check if alert is active and alert index is created + val activeAlert = searchAlerts(monitor) + assertEquals("1 alert should be active", 1, activeAlert.size) + + waitUntil { return@waitUntil getAlertIndices().size == 2 } + + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) + // History index is created but is empty + assertEquals(0, getAlertHistoryDocCount()) + + // Mark doc level alert as Acknowledged + acknowledgeAlerts(monitor, activeAlert[0]) + + // Verify alert is completed + val ackAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() + assertNotNull("Alert is not acknowledged", ackAlert.acknowledgedTime) + + // The completed alert should be removed from the active alert index and added to the history index + assertEquals(1, getAlertHistoryDocCount()) + + // Update rollover check and max docs as well as decreasing the retention period + client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") + client().updateSettings(AlertingSettings.ALERT_HISTORY_MAX_DOCS.key, 1) + client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "1s") + + // Give some time for history to be rolled over and cleared + OpenSearchTestCase.waitUntil({ + val alertIndices = getAlertIndices().size + val docCount = getAlertHistoryDocCount() + if (alertIndices > 2 || docCount > 0) { + return@waitUntil false + } + return@waitUntil true + }, 30, TimeUnit.SECONDS) + + // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. + // This leaves two indices: alert index and an empty history write index + assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) + assertEquals(0, getAlertHistoryDocCount()) + } + + private fun assertIndexExists(index: String) { + val response = client().makeRequest("HEAD", index) + assertEquals("Index $index does not exist.", RestStatus.OK, response.restStatus()) + } + + private fun assertIndexDoesNotExist(index: String) { + val response = client().makeRequest("HEAD", index) + assertEquals("Index $index does not exist.", RestStatus.NOT_FOUND, response.restStatus()) + } + + private fun resetHistorySettings() { + client().updateSettings(AlertingSettings.ALERT_HISTORY_ENABLED.key, "true") + client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "60s") + client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "60s") + client().updateSettings(AlertingSettings.FINDING_HISTORY_ENABLED.key, "true") + client().updateSettings(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.key, "60s") + client().updateSettings(AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD.key, "60s") + } + + private fun getAlertIndices(): List { + val response = client().makeRequest("GET", "/_cat/indices/${AlertIndices.ALL_ALERT_INDEX_PATTERN}?format=json") + val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) + val responseList = xcp.list() + val indices = mutableListOf() + responseList.filterIsInstance>().forEach { indices.add(it["index"] as String) } + + return indices + } + + private fun getFindingIndices(): List { + val response = client().makeRequest("GET", "/_cat/indices/${AlertIndices.ALL_FINDING_INDEX_PATTERN}?format=json") + val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) + val responseList = xcp.list() + val indices = mutableListOf() + responseList.filterIsInstance>().forEach { indices.add(it["index"] as String) } + + return indices + } + + private fun getAlertHistoryDocCount(): Long { + val request = """ + { + "query": { + "match_all": {} + } + } + """.trimIndent() + val response = adminClient().makeRequest( + "POST", "${AlertIndices.ALERT_HISTORY_ALL}/_search", emptyMap(), + StringEntity(request, APPLICATION_JSON) + ) + assertEquals("Request to get alert history failed", RestStatus.OK, response.restStatus()) + return SearchResponse.fromXContent(createParser(jsonXContent, response.entity.content)).hits.totalHits!!.value + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt b/alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt new file mode 100644 index 000000000..937be869d --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt @@ -0,0 +1,207 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.bwc + +import org.apache.http.entity.ContentType.APPLICATION_JSON +import org.apache.http.entity.StringEntity +import org.opensearch.alerting.ALERTING_BASE_URI +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.makeRequest +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder + +class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { + + companion object { + private val CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.bwcsuite")) + private val CLUSTER_NAME = System.getProperty("tests.clustername") + } + + override fun preserveIndicesUponCompletion(): Boolean = true + + override fun preserveReposUponCompletion(): Boolean = true + + override fun preserveTemplatesUponCompletion(): Boolean = true + + override fun preserveODFEIndicesAfterTest(): Boolean = true + + override fun restClientSettings(): Settings { + return Settings.builder() + .put(super.restClientSettings()) + // increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(CLIENT_SOCKET_TIMEOUT, "90s") + .build() + } + + @Throws(Exception::class) + @Suppress("UNCHECKED_CAST") + fun `test backwards compatibility`() { + val uri = getPluginUri() + val responseMap = getAsMap(uri)["nodes"] as Map> + for (response in responseMap.values) { + val plugins = response["plugins"] as List> + val pluginNames = plugins.map { plugin -> plugin["name"] }.toSet() + when (CLUSTER_TYPE) { + ClusterType.OLD -> { + assertTrue(pluginNames.contains("opensearch-alerting")) + createBasicMonitor() + } + ClusterType.MIXED -> { + assertTrue(pluginNames.contains("opensearch-alerting")) + verifyMonitorExists(ALERTING_BASE_URI) + // TODO: Need to move the base URI being used here into a constant and rename ALERTING_BASE_URI to + // MONITOR_BASE_URI + verifyMonitorStats("/_plugins/_alerting") + } + ClusterType.UPGRADED -> { + assertTrue(pluginNames.contains("opensearch-alerting")) + verifyMonitorExists(ALERTING_BASE_URI) + // TODO: Change the next execution time of the Monitor manually instead since this inflates + // the test execution by a lot (might have to wait for Job Scheduler plugin integration first) + // Waiting a minute to ensure the Monitor ran again at least once before checking if the job is running + // on time + Thread.sleep(60000) + verifyMonitorStats("/_plugins/_alerting") + } + } + break + } + } + + private enum class ClusterType { + OLD, + MIXED, + UPGRADED; + + companion object { + fun parse(value: String): ClusterType { + return when (value) { + "old_cluster" -> OLD + "mixed_cluster" -> MIXED + "upgraded_cluster" -> UPGRADED + else -> throw AssertionError("Unknown cluster type: $value") + } + } + } + } + + private fun getPluginUri(): String { + return when (CLUSTER_TYPE) { + ClusterType.OLD -> "_nodes/$CLUSTER_NAME-0/plugins" + ClusterType.MIXED -> { + when (System.getProperty("tests.rest.bwcsuite_round")) { + "second" -> "_nodes/$CLUSTER_NAME-1/plugins" + "third" -> "_nodes/$CLUSTER_NAME-2/plugins" + else -> "_nodes/$CLUSTER_NAME-0/plugins" + } + } + ClusterType.UPGRADED -> "_nodes/plugins" + } + } + + @Throws(Exception::class) + private fun createBasicMonitor() { + val indexName = "test_bwc_index" + val bwcMonitorString = """ + { + "type": "monitor", + "name": "test_bwc_monitor", + "enabled": true, + "schedule": { + "period": { + "interval": 1, + "unit": "MINUTES" + } + }, + "inputs": [{ + "search": { + "indices": ["$indexName"], + "query": { + "size": 0, + "aggregations": {}, + "query": { + "match_all": {} + } + } + } + }], + "triggers": [{ + "name": "abc", + "severity": "1", + "condition": { + "script": { + "source": "ctx.results[0].hits.total.value > 100000", + "lang": "painless" + } + }, + "actions": [] + }] + } + """.trimIndent() + createIndex(indexName, Settings.EMPTY) + + val createResponse = client().makeRequest( + method = "POST", + endpoint = "$ALERTING_BASE_URI?refresh=true", + params = emptyMap(), + entity = StringEntity(bwcMonitorString, APPLICATION_JSON) + ) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("Create monitor response is missing id", Monitor.NO_ID, createdId) + assertTrue("Create monitor response has incorrect version", createdVersion > 0) + } + + @Throws(Exception::class) + @Suppress("UNCHECKED_CAST") + private fun verifyMonitorExists(uri: String) { + val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString() + val searchResponse = client().makeRequest( + "GET", + "$uri/_search", + emptyMap(), + StringEntity(search, APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Unexpected number of Monitors returned", 1, numberDocsFound) + } + + @Throws(Exception::class) + @Suppress("UNCHECKED_CAST") + /** + * Monitor stats will check if the Monitor scheduled job is running on time but does not necessarily mean that the + * Monitor execution itself did not fail. + */ + private fun verifyMonitorStats(uri: String) { + val statsResponse = client().makeRequest( + "GET", + "$uri/stats", + emptyMap() + ) + assertEquals("Monitor stats failed", RestStatus.OK, statsResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), statsResponse.entity.content) + val responseMap = xcp.map() + val nodesCount = responseMap["_nodes"]!! as Map + val totalNodes = nodesCount["total"] + val successfulNodes = nodesCount["successful"] + val nodesOnSchedule = responseMap["nodes_on_schedule"]!! + assertEquals("Incorrect number of total nodes", 3, totalNodes) + assertEquals("Some nodes in stats response failed", totalNodes, successfulNodes) + assertEquals("Not all nodes are on schedule", totalNodes, nodesOnSchedule) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt b/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt new file mode 100644 index 000000000..7ebc82697 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt @@ -0,0 +1,84 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition + +import org.junit.Assert +import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser +import org.opensearch.test.OpenSearchTestCase + +class ChainedAlertsExpressionParserTests : OpenSearchTestCase() { + + fun `test trigger expression posix parsing simple AND`() { + val eqString = "(monitor[id=abc] && monitor[id=xyz])" + val equation = ChainedAlertExpressionParser(eqString).parse() + val expectedEquation = "monitor[id=abc] monitor[id=xyz] && " + Assert.assertTrue(expectedEquation == equation.toString()) + } + + fun `test trigger expression posix parsing simple AND without parentheses`() { + val eqString = "monitor[id=abc] && monitor[id=xyz]" + val equation = ChainedAlertExpressionParser(eqString).parse() + val expectedEquation = "monitor[id=abc] monitor[id=xyz] && " + Assert.assertTrue(expectedEquation == equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND`() { + val eqString = "(monitor[id=abc] && monitor[id=def]) && monitor[id=ghi]" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=abc] monitor[id=def] && monitor[id=ghi] && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND with parenthesis`() { + val eqString = "(monitor[id=sigma-123] && monitor[id=sigma-456]) && (monitor[id=sigma-789] && monitor[id=id-2aw34])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=sigma-123] monitor[id=sigma-456] && monitor[id=sigma-789] monitor[id=id-2aw34] && && ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple OR`() { + val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR`() { + val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456]) || monitor[id=sigma-789]" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] || monitor[id=sigma-789] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR with parenthesis`() { + val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456]) || (monitor[id=sigma-789] || monitor[id=id-2aw34])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=sigma-123] monitor[id=sigma-456] || monitor[id=sigma-789] monitor[id=id-2aw34] || || ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple NOT`() { + val eqString = "(monitor[id=sigma-123] || !monitor[id=sigma-456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] ! || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple NOT`() { + val eqString = "(monitor[id=sigma-123] && !monitor[tag=tag-456]) && !(monitor[id=sigma-789])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=sigma-123] monitor[tag=tag-456] ! && monitor[id=sigma-789] ! && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple operators with parenthesis`() { + val eqString = "(monitor[id=sigma-123] && monitor[tag=sev1]) || !(!monitor[id=sigma-789] || monitor[id=id-2aw34])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=sigma-123] monitor[tag=sev1] && monitor[id=sigma-789] ! monitor[id=id-2aw34] || ! || ", + equation.toString() + ) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt b/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt new file mode 100644 index 000000000..a0851d58d --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt @@ -0,0 +1,118 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.chainedAlertCondition + +import org.junit.Assert +import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser +import org.opensearch.test.OpenSearchTestCase + +class ChainedAlertsExpressionResolveTests : OpenSearchTestCase() { + + fun `test chained alert trigger expression evaluation simple AND`() { + val eqString = "(monitor[id=123] && monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] && ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "123", + "789" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation AND with NOT`() { + val eqString = "(monitor[id=123] && !monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] ! && ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors1: Set = setOf( + "123", + "223" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors1)) + } + + fun `test chained alert trigger expression evaluation simple OR`() { + val eqString = "(monitor[id=123] || monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] || ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "234", + "567" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation OR with NOT`() { + val eqString = "(monitor[id=123] || !monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=123] monitor[id=456] ! || ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123", + "456" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "456" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation simple NOT`() { + val eqString = "!(monitor[id=456])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals("monitor[id=456] ! ", equation.toString()) + val alertGeneratingMonitors: Set = setOf( + "123" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) + val alertGeneratingMonitors2: Set = setOf( + "456" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + } + + fun `test chained alert trigger expression evaluation with multiple operators with parenthesis`() { + val eqString = "(monitor[id=123] && monitor[id=456]) || !(!monitor[id=789] || monitor[id=abc])" + val equation = ChainedAlertExpressionParser(eqString).parse() + Assert.assertEquals( + "monitor[id=123] monitor[id=456] && monitor[id=789] ! monitor[id=abc] || ! || ", + equation.toString() + ) + // part 1 evaluates, part 2 evaluates + val alertGeneratingMonitors1: Set = setOf( + "123", + "456", + "789", + "abc" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors1)) + // part 1 not evaluates, part 2 not evaluates + val alertGeneratingMonitors2: Set = setOf( + "789", + "abc" + ) + Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) + // part 1 not evaluates, part 2 evaluates + val alertGeneratingMonitors3: Set = setOf( + "789" + ) + Assert.assertTrue(equation.evaluate(alertGeneratingMonitors3)) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt b/alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt new file mode 100644 index 000000000..08fba74cb --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt @@ -0,0 +1,62 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.junit.Assert +import org.opensearch.alerting.randomAlert +import org.opensearch.alerting.randomAlertWithAggregationResultBucket +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.test.OpenSearchTestCase + +class AlertTests : OpenSearchTestCase() { + fun `test alert as template args`() { + val alert = randomAlert().copy(acknowledgedTime = null, lastNotificationTime = null) + + val templateArgs = alert.asTemplateArg() + + assertEquals("Template args id does not match", templateArgs[Alert.ALERT_ID_FIELD], alert.id) + assertEquals("Template args version does not match", templateArgs[Alert.ALERT_VERSION_FIELD], alert.version) + assertEquals("Template args state does not match", templateArgs[Alert.STATE_FIELD], alert.state.toString()) + assertEquals("Template args error message does not match", templateArgs[Alert.ERROR_MESSAGE_FIELD], alert.errorMessage) + assertEquals("Template args acknowledged time does not match", templateArgs[Alert.ACKNOWLEDGED_TIME_FIELD], null) + assertEquals("Template args end time does not", templateArgs[Alert.END_TIME_FIELD], alert.endTime?.toEpochMilli()) + assertEquals("Template args start time does not", templateArgs[Alert.START_TIME_FIELD], alert.startTime.toEpochMilli()) + assertEquals("Template args last notification time does not match", templateArgs[Alert.LAST_NOTIFICATION_TIME_FIELD], null) + assertEquals("Template args severity does not match", templateArgs[Alert.SEVERITY_FIELD], alert.severity) + } + + fun `test agg alert as template args`() { + val alert = randomAlertWithAggregationResultBucket().copy(acknowledgedTime = null, lastNotificationTime = null) + + val templateArgs = alert.asTemplateArg() + + assertEquals("Template args id does not match", templateArgs[Alert.ALERT_ID_FIELD], alert.id) + assertEquals("Template args version does not match", templateArgs[Alert.ALERT_VERSION_FIELD], alert.version) + assertEquals("Template args state does not match", templateArgs[Alert.STATE_FIELD], alert.state.toString()) + assertEquals("Template args error message does not match", templateArgs[Alert.ERROR_MESSAGE_FIELD], alert.errorMessage) + assertEquals("Template args acknowledged time does not match", templateArgs[Alert.ACKNOWLEDGED_TIME_FIELD], null) + assertEquals("Template args end time does not", templateArgs[Alert.END_TIME_FIELD], alert.endTime?.toEpochMilli()) + assertEquals("Template args start time does not", templateArgs[Alert.START_TIME_FIELD], alert.startTime.toEpochMilli()) + assertEquals("Template args last notification time does not match", templateArgs[Alert.LAST_NOTIFICATION_TIME_FIELD], null) + assertEquals("Template args severity does not match", templateArgs[Alert.SEVERITY_FIELD], alert.severity) + Assert.assertEquals( + "Template args bucketKeys do not match", + templateArgs[Alert.BUCKET_KEYS], alert.aggregationResultBucket?.bucketKeys?.joinToString(",") + ) + Assert.assertEquals( + "Template args parentBucketPath does not match", + templateArgs[Alert.PARENTS_BUCKET_PATH], alert.aggregationResultBucket?.parentBucketPath + ) + } + + fun `test alert acknowledged`() { + val ackAlert = randomAlert().copy(state = Alert.State.ACKNOWLEDGED) + assertTrue("Alert is not acknowledged", ackAlert.isAcknowledged()) + + val activeAlert = randomAlert().copy(state = Alert.State.ACTIVE) + assertFalse("Alert is acknowledged", activeAlert.isAcknowledged()) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt b/alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt new file mode 100644 index 000000000..7dac05b2a --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt @@ -0,0 +1,310 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.ADMIN +import org.opensearch.alerting.model.destination.Chime +import org.opensearch.alerting.model.destination.CustomWebhook +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.Slack +import org.opensearch.alerting.model.destination.email.Email +import org.opensearch.alerting.model.destination.email.Recipient +import org.opensearch.alerting.parser +import org.opensearch.alerting.randomUser +import org.opensearch.alerting.util.DestinationType +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.test.OpenSearchTestCase +import java.time.Instant + +class DestinationTests : OpenSearchTestCase() { + + fun `test chime destination`() { + val chime = Chime("http://abc.com") + assertEquals("Url is manipulated", chime.url, "http://abc.com") + } + + fun `test chime destination with out url`() { + try { + Chime("") + fail("Creating a chime destination with empty url did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test slack destination`() { + val slack = Slack("http://abc.com") + assertEquals("Url is manipulated", slack.url, "http://abc.com") + } + + fun `test slack destination with out url`() { + try { + Slack("") + fail("Creating a slack destination with empty url did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test email destination without recipients`() { + try { + Email("", emptyList()) + fail("Creating an email destination with empty recipients did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test email recipient with valid email`() { + Recipient( + Recipient.RecipientType.EMAIL, + null, + "test@email.com" + ) + } + + fun `test email recipient with invalid email fails`() { + try { + Recipient( + Recipient.RecipientType.EMAIL, + null, + "invalid@email" + ) + fail("Creating an email recipient with an invalid email did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test custom webhook destination with url and no host`() { + val customWebhook = CustomWebhook("http://abc.com", null, null, -1, null, null, emptyMap(), emptyMap(), null, null) + assertEquals("Url is manipulated", customWebhook.url, "http://abc.com") + } + + fun `test custom webhook destination with host and no url`() { + try { + val customWebhook = CustomWebhook(null, null, "abc.com", 80, null, null, emptyMap(), emptyMap(), null, null) + assertEquals("host is manipulated", customWebhook.host, "abc.com") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test custom webhook destination with url and host`() { + // In this case, url will be given priority + val customWebhook = CustomWebhook("http://abc.com", null, null, -1, null, null, emptyMap(), emptyMap(), null, null) + assertEquals("Url is manipulated", customWebhook.url, "http://abc.com") + } + + fun `test custom webhook destination with no url and no host`() { + try { + CustomWebhook("", null, null, 80, null, null, emptyMap(), emptyMap(), null, null) + fail("Creating a custom webhook destination with empty url did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test chime destination create using stream`() { + val chimeDest = Destination( + "1234", 0L, 1, 1, 1, DestinationType.CHIME, "TestChimeDest", + randomUser(), Instant.now(), Chime("test.com"), null, null, null + ) + + val out = BytesStreamOutput() + chimeDest.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newDest = Destination.readFrom(sin) + + assertNotNull(newDest) + assertEquals("1234", newDest.id) + assertEquals(0, newDest.version) + assertEquals(1, newDest.schemaVersion) + assertEquals(DestinationType.CHIME, newDest.type) + assertEquals("TestChimeDest", newDest.name) + assertNotNull(newDest.lastUpdateTime) + assertNotNull(newDest.chime) + assertNull(newDest.slack) + assertNull(newDest.customWebhook) + assertNull(newDest.email) + } + + fun `test slack destination create using stream`() { + val slackDest = Destination( + "2345", 1L, 2, 1, 1, DestinationType.SLACK, "TestSlackDest", + randomUser(), Instant.now(), null, Slack("mytest.com"), null, null + ) + + val out = BytesStreamOutput() + slackDest.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newDest = Destination.readFrom(sin) + + assertNotNull(newDest) + assertEquals("2345", newDest.id) + assertEquals(1, newDest.version) + assertEquals(2, newDest.schemaVersion) + assertEquals(DestinationType.SLACK, newDest.type) + assertEquals("TestSlackDest", newDest.name) + assertNotNull(newDest.lastUpdateTime) + assertNull(newDest.chime) + assertNotNull(newDest.slack) + assertNull(newDest.customWebhook) + assertNull(newDest.email) + } + + fun `test customwebhook destination create using stream`() { + val customWebhookDest = Destination( + "2345", + 1L, + 2, + 1, + 1, + DestinationType.SLACK, + "TestSlackDest", + randomUser(), + Instant.now(), + null, + null, + CustomWebhook( + "test.com", + "schema", + "localhost", + 162, + "/tmp/", + "POST", + mutableMapOf(), + mutableMapOf(), + ADMIN, + ADMIN + ), + null + ) + val out = BytesStreamOutput() + customWebhookDest.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newDest = Destination.readFrom(sin) + + assertNotNull(newDest) + assertEquals("2345", newDest.id) + assertEquals(1, newDest.version) + assertEquals(2, newDest.schemaVersion) + assertEquals(DestinationType.SLACK, newDest.type) + assertEquals("TestSlackDest", newDest.name) + assertNotNull(newDest.lastUpdateTime) + assertNull(newDest.chime) + assertNull(newDest.slack) + assertNotNull(newDest.customWebhook) + assertNull(newDest.email) + } + + fun `test customwebhook destination create using stream with optionals`() { + val customWebhookDest = Destination( + "2345", + 1L, + 2, + 1, + 1, + DestinationType.SLACK, + "TestSlackDest", + randomUser(), + Instant.now(), + null, + null, + CustomWebhook( + "test.com", + null, + "localhost", + 162, + null, + "POST", + mutableMapOf(), + mutableMapOf(), + null, + null + ), + null + ) + val out = BytesStreamOutput() + customWebhookDest.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newDest = Destination.readFrom(sin) + + assertNotNull(newDest) + assertEquals("2345", newDest.id) + assertEquals(1, newDest.version) + assertEquals(2, newDest.schemaVersion) + assertEquals(DestinationType.SLACK, newDest.type) + assertEquals("TestSlackDest", newDest.name) + assertNotNull(newDest.lastUpdateTime) + assertNull(newDest.chime) + assertNull(newDest.slack) + assertNotNull(newDest.customWebhook) + assertNull(newDest.email) + } + + fun `test email destination create using stream`() { + val recipients = listOf( + Recipient( + Recipient.RecipientType.EMAIL, + null, + "test@email.com" + ) + ) + val mailDest = Destination( + "2345", + 1L, + 2, + 1, + 1, + DestinationType.EMAIL, + "TestEmailDest", + randomUser(), + Instant.now(), + null, + null, + null, + Email("3456", recipients) + ) + + val out = BytesStreamOutput() + mailDest.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newDest = Destination.readFrom(sin) + + assertNotNull(newDest) + assertEquals("2345", newDest.id) + assertEquals(1, newDest.version) + assertEquals(2, newDest.schemaVersion) + assertEquals(DestinationType.EMAIL, newDest.type) + assertEquals("TestEmailDest", newDest.name) + assertNotNull(newDest.lastUpdateTime) + assertNull(newDest.chime) + assertNull(newDest.slack) + assertNull(newDest.customWebhook) + assertNotNull(newDest.email) + + assertEquals("3456", newDest.email!!.emailAccountID) + assertEquals(recipients, newDest.email!!.recipients) + } + + fun `test chime destination without user`() { + val userString = "{\"type\":\"chime\",\"name\":\"TestChimeDest\",\"schema_version\":1," + + "\"last_update_time\":1600063313658,\"chime\":{\"url\":\"test.com\"}}" + val parsedDest = Destination.parse(parser(userString)) + assertNull(parsedDest.user) + } + + fun `test chime destination with user`() { + val userString = "{\"type\":\"chime\",\"name\":\"TestChimeDest\",\"user\":{\"name\":\"joe\",\"backend_roles\"" + + ":[\"ops\",\"backup\"],\"roles\":[\"ops_role, backup_role\"],\"custom_attribute_names\":[\"test_attr=test\"]}," + + "\"schema_version\":1,\"last_update_time\":1600063313658,\"chime\":{\"url\":\"test.com\"}}" + val parsedDest = Destination.parse(parser(userString)) + assertNotNull(parsedDest.user) + } + + fun `test chime destination with user as null`() { + val userString = "{\"type\":\"chime\",\"name\":\"TestChimeDest\",\"user\":null,\"schema_version\":1," + + "\"last_update_time\":1600063313658,\"chime\":{\"url\":\"test.com\"}}" + val parsedDest = Destination.parse(parser(userString)) + assertNull(parsedDest.user) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt b/alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt new file mode 100644 index 000000000..d3e436378 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt @@ -0,0 +1,61 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.test.OpenSearchTestCase + +class EmailAccountTests : OpenSearchTestCase() { + + fun `test email account`() { + val emailAccount = EmailAccount( + name = "test", + email = "test@email.com", + host = "smtp.com", + port = 25, + method = EmailAccount.MethodType.NONE, + username = null, + password = null + ) + assertEquals("Email account name was changed", emailAccount.name, "test") + assertEquals("Email account email was changed", emailAccount.email, "test@email.com") + assertEquals("Email account host was changed", emailAccount.host, "smtp.com") + assertEquals("Email account port was changed", emailAccount.port, 25) + assertEquals("Email account method was changed", emailAccount.method, EmailAccount.MethodType.NONE) + } + + fun `test email account with invalid name`() { + try { + EmailAccount( + name = "invalid-name", + email = "test@email.com", + host = "smtp.com", + port = 25, + method = EmailAccount.MethodType.NONE, + username = null, + password = null + ) + fail("Creating an email account with an invalid name did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test email account with invalid email`() { + try { + EmailAccount( + name = "test", + email = "test@.com", + host = "smtp.com", + port = 25, + method = EmailAccount.MethodType.NONE, + username = null, + password = null + ) + fail("Creating an email account with an invalid email did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt b/alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt new file mode 100644 index 000000000..a0c215059 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt @@ -0,0 +1,60 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.model.destination.email.EmailEntry +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.test.OpenSearchTestCase + +class EmailGroupTests : OpenSearchTestCase() { + + fun `test email group`() { + val emailGroup = EmailGroup( + name = "test", + emails = listOf(EmailEntry("test@email.com")) + ) + assertEquals("Email group name was changed", emailGroup.name, "test") + assertEquals("Email group emails count was changed", emailGroup.emails.size, 1) + assertEquals("Email group email entry was changed", emailGroup.emails[0].email, "test@email.com") + } + + fun `test email group get emails as list of string`() { + val emailGroup = EmailGroup( + name = "test", + emails = listOf( + EmailEntry("test@email.com"), + EmailEntry("test2@email.com") + ) + ) + + assertEquals( + "List of email strings does not match email entries", + listOf("test@email.com", "test2@email.com"), emailGroup.getEmailsAsListOfString() + ) + } + + fun `test email group with invalid name fails`() { + try { + EmailGroup( + name = "invalid name", + emails = listOf(EmailEntry("test@email.com")) + ) + fail("Creating an email group with an invalid name did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } + + fun `test email group with invalid email fails`() { + try { + EmailGroup( + name = "test", + emails = listOf(EmailEntry("invalid.com")) + ) + fail("Creating an email group with an invalid email did not fail.") + } catch (ignored: IllegalArgumentException) { + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt b/alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt new file mode 100644 index 000000000..f77ca3ddc --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt @@ -0,0 +1,40 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.randomFinding +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.test.OpenSearchTestCase + +class FindingTests : OpenSearchTestCase() { + fun `test finding asTemplateArgs`() { + // GIVEN + val finding = randomFinding() + + // WHEN + val templateArgs = finding.asTemplateArg() + + // THEN + assertEquals("Template args 'id' field does not match:", templateArgs[Finding.FINDING_ID_FIELD], finding.id) + assertEquals( + "Template args 'relatedDocIds' field does not match:", + templateArgs[Finding.RELATED_DOC_IDS_FIELD], + finding.relatedDocIds + ) + assertEquals("Template args 'monitorId' field does not match:", templateArgs[Finding.MONITOR_ID_FIELD], finding.monitorId) + assertEquals( + "Template args 'monitorName' field does not match:", + templateArgs[Finding.MONITOR_NAME_FIELD], + finding.monitorName + ) + assertEquals("Template args 'queries' field does not match:", templateArgs[Finding.QUERIES_FIELD], finding.docLevelQueries) + assertEquals( + "Template args 'timestamp' field does not match:", + templateArgs[Finding.TIMESTAMP_FIELD], + finding.timestamp.toEpochMilli() + ) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt b/alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt new file mode 100644 index 000000000..6851c471d --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt @@ -0,0 +1,126 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.randomActionRunResult +import org.opensearch.alerting.randomBucketLevelMonitorRunResult +import org.opensearch.alerting.randomBucketLevelTriggerRunResult +import org.opensearch.alerting.randomDocumentLevelMonitorRunResult +import org.opensearch.alerting.randomDocumentLevelTriggerRunResult +import org.opensearch.alerting.randomEmailAccount +import org.opensearch.alerting.randomEmailGroup +import org.opensearch.alerting.randomInputRunResults +import org.opensearch.alerting.randomQueryLevelMonitorRunResult +import org.opensearch.alerting.randomQueryLevelTriggerRunResult +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase + +class WriteableTests : OpenSearchTestCase() { + + fun `test actionrunresult as stream`() { + val actionRunResult = randomActionRunResult() + val out = BytesStreamOutput() + actionRunResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newActionRunResult = ActionRunResult(sin) + assertEquals("Round tripping ActionRunResult doesn't work", actionRunResult, newActionRunResult) + } + + fun `test query-level triggerrunresult as stream`() { + val runResult = randomQueryLevelTriggerRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = QueryLevelTriggerRunResult(sin) + assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) + } + + fun `test bucket-level triggerrunresult as stream`() { + val runResult = randomBucketLevelTriggerRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = BucketLevelTriggerRunResult(sin) + assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) + } + + fun `test doc-level triggerrunresult as stream`() { + val runResult = randomDocumentLevelTriggerRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = DocumentLevelTriggerRunResult(sin) + assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) + } + + fun `test inputrunresult as stream`() { + val runResult = randomInputRunResults() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = InputRunResults.readFrom(sin) + assertEquals("Round tripping InputRunResults doesn't work", runResult, newRunResult) + } + + fun `test query-level monitorrunresult as stream`() { + val runResult = randomQueryLevelMonitorRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = MonitorRunResult(sin) + assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) + } + + fun `test bucket-level monitorrunresult as stream`() { + val runResult = randomBucketLevelMonitorRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = MonitorRunResult(sin) + assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) + } + + fun `test doc-level monitorrunresult as stream`() { + val runResult = randomDocumentLevelMonitorRunResult() + val out = BytesStreamOutput() + runResult.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newRunResult = MonitorRunResult(sin) + assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) + } + + fun `test searchinput as stream`() { + val input = SearchInput(emptyList(), SearchSourceBuilder()) + val out = BytesStreamOutput() + input.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newInput = SearchInput(sin) + assertEquals("Round tripping MonitorRunResult doesn't work", input, newInput) + } + + fun `test emailaccount as stream`() { + val emailAccount = randomEmailAccount() + val out = BytesStreamOutput() + emailAccount.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newEmailAccount = EmailAccount.readFrom(sin) + assertEquals("Round tripping EmailAccount doesn't work", emailAccount, newEmailAccount) + } + + fun `test emailgroup as stream`() { + val emailGroup = randomEmailGroup() + val out = BytesStreamOutput() + emailGroup.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newEmailGroup = EmailGroup.readFrom(sin) + assertEquals("Round tripping EmailGroup doesn't work", emailGroup, newEmailGroup) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt b/alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt new file mode 100644 index 000000000..7d07af331 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt @@ -0,0 +1,94 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.model + +import org.opensearch.alerting.builder +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.parser +import org.opensearch.alerting.randomActionExecutionResult +import org.opensearch.alerting.randomAlert +import org.opensearch.alerting.randomEmailAccount +import org.opensearch.alerting.randomEmailGroup +import org.opensearch.alerting.toJsonString +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.commons.alerting.model.ActionExecutionResult +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.util.string +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.test.OpenSearchTestCase + +class XContentTests : OpenSearchTestCase() { + + fun `test alert parsing`() { + val alert = randomAlert() + + val alertString = alert.toXContentWithUser(builder()).string() + val parsedAlert = Alert.parse(parser(alertString)) + + assertEquals("Round tripping alert doesn't work", alert, parsedAlert) + } + + fun `test alert parsing without user`() { + val alertStr = "{\"id\":\"\",\"version\":-1,\"monitor_id\":\"\",\"schema_version\":0,\"monitor_version\":1," + + "\"monitor_name\":\"ARahqfRaJG\",\"trigger_id\":\"fhe1-XQBySl0wQKDBkOG\",\"trigger_name\":\"ffELMuhlro\"," + + "\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\"" + + ":[{\"action_id\":\"ghe1-XQBySl0wQKDBkOG\",\"last_execution_time\":1601917224583,\"throttled_count\":-1478015168}," + + "{\"action_id\":\"gxe1-XQBySl0wQKDBkOH\",\"last_execution_time\":1601917224583,\"throttled_count\":-768533744}]," + + "\"start_time\":1601917224599,\"last_notification_time\":null,\"end_time\":null,\"acknowledged_time\":null}" + val parsedAlert = Alert.parse(parser(alertStr)) + assertNull(parsedAlert.monitorUser) + } + + fun `test alert parsing with user as null`() { + val alertStr = "{\"id\":\"\",\"version\":-1,\"monitor_id\":\"\",\"schema_version\":0,\"monitor_version\":1,\"monitor_user\":null," + + "\"monitor_name\":\"ARahqfRaJG\",\"trigger_id\":\"fhe1-XQBySl0wQKDBkOG\",\"trigger_name\":\"ffELMuhlro\"," + + "\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\"" + + ":[{\"action_id\":\"ghe1-XQBySl0wQKDBkOG\",\"last_execution_time\":1601917224583,\"throttled_count\":-1478015168}," + + "{\"action_id\":\"gxe1-XQBySl0wQKDBkOH\",\"last_execution_time\":1601917224583,\"throttled_count\":-768533744}]," + + "\"start_time\":1601917224599,\"last_notification_time\":null,\"end_time\":null,\"acknowledged_time\":null}" + val parsedAlert = Alert.parse(parser(alertStr)) + assertNull(parsedAlert.monitorUser) + } + + fun `test action execution result parsing`() { + val actionExecutionResult = randomActionExecutionResult() + + val actionExecutionResultString = actionExecutionResult.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() + val parsedActionExecutionResultString = ActionExecutionResult.parse(parser(actionExecutionResultString)) + + assertEquals("Round tripping alert doesn't work", actionExecutionResult, parsedActionExecutionResultString) + } + + fun `test email account parsing`() { + val emailAccount = randomEmailAccount() + + val emailAccountString = emailAccount.toJsonString() + val parsedEmailAccount = EmailAccount.parse(parser(emailAccountString)) + assertEquals("Round tripping EmailAccount doesn't work", emailAccount, parsedEmailAccount) + } + + fun `test email group parsing`() { + val emailGroup = randomEmailGroup() + + val emailGroupString = emailGroup.toJsonString() + val parsedEmailGroup = EmailGroup.parse(parser(emailGroupString)) + assertEquals("Round tripping EmailGroup doesn't work", emailGroup, parsedEmailGroup) + } + + fun `test MonitorMetadata`() { + val monitorMetadata = MonitorMetadata( + id = "monitorId-metadata", + monitorId = "monitorId", + lastActionExecutionTimes = emptyList(), + lastRunContext = emptyMap(), + sourceToQueryIndexMapping = mutableMapOf() + ) + val monitorMetadataString = monitorMetadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS).string() + val parsedMonitorMetadata = MonitorMetadata.parse(parser(monitorMetadataString)) + assertEquals("Round tripping MonitorMetadata doesn't work", monitorMetadata, parsedMonitorMetadata) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt new file mode 100644 index 000000000..00e7c197d --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt @@ -0,0 +1,189 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.junit.Assert +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.model.destination.Chime +import org.opensearch.alerting.model.destination.CustomWebhook +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.Slack +import org.opensearch.alerting.model.destination.email.Email +import org.opensearch.alerting.model.destination.email.Recipient +import org.opensearch.alerting.randomUser +import org.opensearch.alerting.util.DestinationType +import org.opensearch.test.junit.annotations.TestLogging +import java.time.Instant + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class DestinationRestApiIT : AlertingRestTestCase() { + + fun `test creating a chime destination`() { + val chime = Chime("http://abc.com") + val destination = Destination( + type = DestinationType.CHIME, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = chime, + slack = null, + customWebhook = null, + email = null + ) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CHIME) + Assert.assertNotNull("chime object should not be null", createdDestination.chime) + } + + fun `test creating a custom webhook destination with url`() { + val customWebhook = CustomWebhook("http://abc.com", null, null, 80, null, "PUT", emptyMap(), emptyMap(), null, null) + val destination = Destination( + type = DestinationType.CUSTOM_WEBHOOK, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = customWebhook, + email = null + ) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CUSTOM_WEBHOOK) + Assert.assertNotNull("custom webhook object should not be null", createdDestination.customWebhook) + } + + fun `test creating a custom webhook destination with host`() { + val customWebhook = CustomWebhook( + "", "http", "abc.com", 80, "a/b/c", "PATCH", + mapOf("foo" to "1", "bar" to "2"), mapOf("h1" to "1", "h2" to "2"), null, null + ) + val destination = Destination( + type = DestinationType.CUSTOM_WEBHOOK, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = customWebhook, + email = null + ) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CUSTOM_WEBHOOK) + assertEquals("Incorrect destination host", createdDestination.customWebhook?.host, "abc.com") + assertEquals("Incorrect destination port", createdDestination.customWebhook?.port, 80) + assertEquals("Incorrect destination path", createdDestination.customWebhook?.path, "a/b/c") + assertEquals("Incorrect destination scheme", createdDestination.customWebhook?.scheme, "http") + assertEquals("Incorrect destination method", createdDestination.customWebhook?.method, "PATCH") + Assert.assertNotNull("custom webhook object should not be null", createdDestination.customWebhook) + } + + fun `test creating an email destination`() { + val recipient = Recipient(type = Recipient.RecipientType.EMAIL, emailGroupID = null, email = "test@email.com") + val email = Email("", listOf(recipient)) + val destination = Destination( + type = DestinationType.EMAIL, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = null, + email = email + ) + + val createdDestination = createDestination(destination = destination) + Assert.assertNotNull("Email object should not be null", createdDestination.email) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.EMAIL) + assertEquals( + "Incorrect email destination recipient type", + createdDestination.email?.recipients?.get(0)?.type, + Recipient.RecipientType.EMAIL + ) + assertEquals( + "Incorrect email destination recipient email", + createdDestination.email?.recipients?.get(0)?.email, + "test@email.com" + ) + } + + fun `test get destination`() { + val destination = createDestination() + val getDestinationResponse = getDestination(destination) + assertEquals(destination.id, getDestinationResponse["id"]) + assertEquals(destination.type.value, getDestinationResponse["type"]) + assertEquals(destination.seqNo, getDestinationResponse["seq_no"]) + assertEquals(destination.lastUpdateTime.toEpochMilli(), getDestinationResponse["last_update_time"]) + assertEquals(destination.primaryTerm, getDestinationResponse["primary_term"]) + } + + fun `test get destinations with slack destination type`() { + val slack = Slack("url") + val dest = Destination( + type = DestinationType.SLACK, + name = "testSlack", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = slack, + customWebhook = null, + email = null + ) + + val inputMap = HashMap() + inputMap["missing"] = "_last" + inputMap["destinationType"] = "slack" + + val destination = createDestination(dest) + val destination2 = createDestination() + val getDestinationsResponse = getDestinations(inputMap) + + assertEquals(1, getDestinationsResponse.size) + val getDestinationResponse = getDestinationsResponse[0] + + assertEquals(destination.id, getDestinationResponse["id"]) + assertNotEquals(destination2.id, getDestinationResponse["id"]) + assertEquals(destination.type.value, getDestinationResponse["type"]) + assertEquals(destination.seqNo, getDestinationResponse["seq_no"]) + assertEquals(destination.lastUpdateTime.toEpochMilli(), getDestinationResponse["last_update_time"]) + assertEquals(destination.primaryTerm, getDestinationResponse["primary_term"]) + } + + fun `test get destinations matching a given name`() { + val slack = Slack("url") + val dest = Destination( + type = DestinationType.SLACK, + name = "testSlack", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = slack, + customWebhook = null, + email = null + ) + + val inputMap = HashMap() + inputMap["searchString"] = "testSlack" + + val destination = createDestination(dest) + val destination2 = createDestination() + val getDestinationsResponse = getDestinations(inputMap) + + assertEquals(1, getDestinationsResponse.size) + val getDestinationResponse = getDestinationsResponse[0] + + assertEquals(destination.id, getDestinationResponse["id"]) + assertNotEquals(destination2.id, getDestinationResponse["id"]) + assertEquals(destination.type.value, getDestinationResponse["type"]) + assertEquals(destination.seqNo, getDestinationResponse["seq_no"]) + assertEquals(destination.lastUpdateTime.toEpochMilli(), getDestinationResponse["last_update_time"]) + assertEquals(destination.primaryTerm, getDestinationResponse["primary_term"]) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt new file mode 100644 index 000000000..fc730f20b --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt @@ -0,0 +1,180 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.http.entity.ContentType +import org.apache.http.nio.entity.NStringEntity +import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_ACCOUNT_BASE_URI +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.randomEmailAccount +import org.opensearch.client.ResponseException +import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.test.junit.annotations.TestLogging + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class EmailAccountRestApiIT : AlertingRestTestCase() { + + fun `test creating an email account`() { + val emailAccount = EmailAccount( + name = "test", + email = "test@email.com", + host = "smtp.com", + port = 25, + method = EmailAccount.MethodType.NONE, + username = null, + password = null + ) + val createdEmailAccount = createEmailAccount(emailAccount = emailAccount) + assertEquals("Incorrect email account name", createdEmailAccount.name, "test") + assertEquals("Incorrect email account email", createdEmailAccount.email, "test@email.com") + assertEquals("Incorrect email account host", createdEmailAccount.host, "smtp.com") + assertEquals("Incorrect email account port", createdEmailAccount.port, 25) + assertEquals("Incorrect email account method", createdEmailAccount.method, EmailAccount.MethodType.NONE) + } + + fun `test creating an email account with PUT fails`() { + try { + val emailAccount = randomEmailAccount() + client().makeRequest("PUT", EMAIL_ACCOUNT_BASE_URI, emptyMap(), emailAccount.toHttpEntity()) + fail("Expected 405 Method Not Allowed response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.METHOD_NOT_ALLOWED, e.response.restStatus()) + } + } + + fun `test creating an email account when email destination is disallowed fails`() { + try { + removeEmailFromAllowList() + createRandomEmailAccount() + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + } + + fun `test getting an email account`() { + val emailAccount = createRandomEmailAccount() + val storedEmailAccount = getEmailAccount(emailAccount.id) + assertEquals("Indexed and retrieved email account differ", emailAccount, storedEmailAccount) + } + + fun `test getting an email account that doesn't exist`() { + try { + getEmailAccount(randomAlphaOfLength(20)) + fail("Expected response exception") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test getting an email account when email destination is disallowed fails`() { + val emailAccount = createRandomEmailAccount() + + try { + removeEmailFromAllowList() + getEmailAccount(emailAccount.id) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + } + + fun `test checking if an email account exists`() { + val emailAccount = createRandomEmailAccount() + + val headResponse = client().makeRequest("HEAD", "$EMAIL_ACCOUNT_BASE_URI/${emailAccount.id}") + assertEquals("Unable to HEAD email account", RestStatus.OK, headResponse.restStatus()) + assertNull("Response contains unexpected body", headResponse.entity) + } + + fun `test checking if a non-existent email account exists`() { + val headResponse = client().makeRequest("HEAD", "$EMAIL_ACCOUNT_BASE_URI/foobar") + assertEquals("Unexpected status", RestStatus.NOT_FOUND, headResponse.restStatus()) + } + + fun `test querying an email account that exists`() { + val emailAccount = createRandomEmailAccount() + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailAccount.id)).toString() + val searchResponse = client().makeRequest( + "GET", + "$EMAIL_ACCOUNT_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberOfDocsFound = hits["total"]?.get("value") + assertEquals("Email account not found during search", 1, numberOfDocsFound) + } + + fun `test querying an email account that exists with POST`() { + val emailAccount = createRandomEmailAccount() + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailAccount.id)).toString() + val searchResponse = client().makeRequest( + "POST", + "$EMAIL_ACCOUNT_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberOfDocsFound = hits["total"]?.get("value") + assertEquals("Email account not found during search", 1, numberOfDocsFound) + } + + fun `test querying an email account that doesn't exist`() { + // Create a random email account to create the ScheduledJob index. Otherwise the test will fail with a 404 index not found error. + createRandomEmailAccount() + val search = SearchSourceBuilder() + .query( + QueryBuilders.termQuery( + OpenSearchTestCase.randomAlphaOfLength(5), + OpenSearchTestCase.randomAlphaOfLength(5) + ) + ).toString() + + val searchResponse = client().makeRequest( + "GET", + "$EMAIL_ACCOUNT_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberOfDocsFound = hits["total"]?.get("value") + assertEquals("Email account found during search when no document was present", 0, numberOfDocsFound) + } + + fun `test querying an email account when email destination is disallowed fails`() { + val emailAccount = createRandomEmailAccount() + + try { + removeEmailFromAllowList() + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailAccount.id)).toString() + client().makeRequest( + "GET", + "$EMAIL_ACCOUNT_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt new file mode 100644 index 000000000..94c880c61 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt @@ -0,0 +1,173 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.http.entity.ContentType +import org.apache.http.nio.entity.NStringEntity +import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_GROUP_BASE_URI +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.model.destination.email.EmailEntry +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.randomEmailGroup +import org.opensearch.client.ResponseException +import org.opensearch.common.xcontent.XContentType +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.test.junit.annotations.TestLogging + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class EmailGroupRestApiIT : AlertingRestTestCase() { + + fun `test creating an email group`() { + val emailGroup = EmailGroup( + name = "test", + emails = listOf(EmailEntry("test@email.com")) + ) + val createdEmailGroup = createEmailGroup(emailGroup = emailGroup) + assertEquals("Incorrect email group name", createdEmailGroup.name, "test") + assertEquals("Incorrect email group email entry", createdEmailGroup.emails[0].email, "test@email.com") + } + + fun `test creating an email group with PUT fails`() { + try { + val emailGroup = randomEmailGroup() + client().makeRequest("PUT", EMAIL_GROUP_BASE_URI, emptyMap(), emailGroup.toHttpEntity()) + fail("Expected 405 Method Not Allowed respone") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.METHOD_NOT_ALLOWED, e.response.restStatus()) + } + } + + fun `test creating an email group when email destination is disallowed fails`() { + try { + removeEmailFromAllowList() + createRandomEmailGroup() + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + } + + fun `test getting an email group`() { + val emailGroup = createRandomEmailGroup() + val storedEmailGroup = getEmailGroup(emailGroup.id) + assertEquals("Indexed and retrieved email group differ", emailGroup, storedEmailGroup) + } + + fun `test getting an email group that doesn't exist`() { + try { + getEmailGroup(randomAlphaOfLength(20)) + fail("Expected response exception") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test getting an email group when email destination is disallowed fails`() { + val emailGroup = createRandomEmailGroup() + + try { + removeEmailFromAllowList() + getEmailGroup(emailGroup.id) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + } + + fun `test checking if an email group exists`() { + val emailGroup = createRandomEmailGroup() + + val headResponse = client().makeRequest("HEAD", "$EMAIL_GROUP_BASE_URI/${emailGroup.id}") + assertEquals("Unable to HEAD email group", RestStatus.OK, headResponse.restStatus()) + assertNull("Response contains unexpected body", headResponse.entity) + } + + fun `test checking if a non-existent email group exists`() { + val headResponse = client().makeRequest("HEAD", "$EMAIL_GROUP_BASE_URI/foobar") + assertEquals("Unexpected status", RestStatus.NOT_FOUND, headResponse.restStatus()) + } + + fun `test querying an email group that exists`() { + val emailGroup = createRandomEmailGroup() + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailGroup.id)).toString() + val searchResponse = client().makeRequest( + "GET", + "$EMAIL_GROUP_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberOfDocsFound = hits["total"]?.get("value") + assertEquals("Email group not found during search", 1, numberOfDocsFound) + } + + fun `test querying an email group that exists with POST`() { + val emailGroup = createRandomEmailGroup() + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailGroup.id)).toString() + val searchResponse = client().makeRequest( + "POST", + "$EMAIL_GROUP_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberOfDocsFound = hits["total"]?.get("value") + assertEquals("Email group not found during search", 1, numberOfDocsFound) + } + + fun `test querying an email group that doesn't exist`() { + // Create a random email group to create the ScheduledJob index. Otherwise the test will fail with a 404 index not found error. + createRandomEmailGroup() + val search = SearchSourceBuilder() + .query( + QueryBuilders.termQuery( + OpenSearchTestCase.randomAlphaOfLength(5), + OpenSearchTestCase.randomAlphaOfLength(5) + ) + ).toString() + + val searchResponse = client().makeRequest( + "GET", + "$EMAIL_GROUP_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberOfDocsFound = hits["total"]?.get("value") + assertEquals("Email group found during search when no document was present", 0, numberOfDocsFound) + } + + fun `test querying an email group when email destination is disallowed fails`() { + val emailGroup = createRandomEmailGroup() + + try { + removeEmailFromAllowList() + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailGroup.id)).toString() + client().makeRequest( + "GET", + "$EMAIL_GROUP_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt new file mode 100644 index 000000000..1839bc807 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt @@ -0,0 +1,210 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.test.junit.annotations.TestLogging + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class FindingsRestApiIT : AlertingRestTestCase() { + + fun `test find Finding where doc is not retrieved`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) + createFinding(matchingDocIds = listOf("someId")) + val response = searchFindings() + assertEquals(1, response.totalFindings) + assertEquals(1, response.findings[0].documents.size) + assertFalse(response.findings[0].documents[0].found) + } + + fun `test find Finding where doc is retrieved`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) + + val findingWith1 = createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingWith2 = createFinding(matchingDocIds = listOf("someId", "someId2"), index = testIndex) + val response = searchFindings() + assertEquals(2, response.totalFindings) + for (findingWithDoc in response.findings) { + if (findingWithDoc.finding.id == findingWith1) { + assertEquals(1, findingWithDoc.documents.size) + assertTrue(findingWithDoc.documents[0].found) + assertEquals(testDoc, findingWithDoc.documents[0].document) + } else if (findingWithDoc.finding.id == findingWith2) { + assertEquals(2, findingWithDoc.documents.size) + assertTrue(findingWithDoc.documents[0].found) + assertTrue(findingWithDoc.documents[1].found) + assertEquals(testDoc, findingWithDoc.documents[0].document) + assertEquals(testDoc2, findingWithDoc.documents[1].document) + } else { + fail("Found a finding that should not have been retrieved") + } + } + } + + fun `test find Finding for specific finding by id`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) + + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingId = createFinding(matchingDocIds = listOf("someId", "someId2"), index = testIndex) + val response = searchFindings(mapOf(Pair("findingId", findingId))) + assertEquals(1, response.totalFindings) + assertEquals(findingId, response.findings[0].finding.id) + assertEquals(2, response.findings[0].documents.size) + assertTrue(response.findings[0].documents[0].found) + assertTrue(response.findings[0].documents[1].found) + assertEquals(testDoc, response.findings[0].documents[0].document) + assertEquals(testDoc2, response.findings[0].documents[1].document) + } + + fun `test find Finding by tag`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) + + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingId = createFinding( + matchingDocIds = listOf("someId", "someId2"), + index = testIndex, + docLevelQueries = listOf(docLevelQuery) + ) + val response = searchFindings(mapOf(Pair("searchString", "sigma"))) + assertEquals(1, response.totalFindings) + assertEquals(findingId, response.findings[0].finding.id) + assertEquals(2, response.findings[0].documents.size) + assertTrue(response.findings[0].documents[0].found) + assertTrue(response.findings[0].documents[1].found) + assertEquals(testDoc, response.findings[0].documents[0].document) + assertEquals(testDoc2, response.findings[0].documents[1].document) + } + + fun `test find Finding by name`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) + + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingId = createFinding( + matchingDocIds = listOf("someId", "someId2"), + index = testIndex, + docLevelQueries = listOf(docLevelQuery) + ) + val response = searchFindings(mapOf(Pair("searchString", "realQuery"))) + assertEquals(1, response.totalFindings) + assertEquals(findingId, response.findings[0].finding.id) + assertEquals(2, response.findings[0].documents.size) + assertTrue(response.findings[0].documents[0].found) + assertTrue(response.findings[0].documents[1].found) + assertEquals(testDoc, response.findings[0].documents[0].document) + assertEquals(testDoc2, response.findings[0].documents[1].document) + } + + fun `test find Finding by monitor id`() { + val testIndex = createTestIndex() + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + indexDoc(testIndex, "someId", testDoc) + val testDoc2 = """{ + "message" : "This is an error2 from IAD region", + "test_field" : "us-west-3" + }""" + indexDoc(testIndex, "someId2", testDoc2) + + val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) + + createFinding(matchingDocIds = listOf("someId"), index = testIndex) + val findingId = createFinding( + monitorId = "monitorToFind", + matchingDocIds = listOf("someId", "someId2"), + index = testIndex, + docLevelQueries = listOf(docLevelQuery) + ) + val response = searchFindings(mapOf(Pair("searchString", "monitorToFind"))) + assertEquals(1, response.totalFindings) + assertEquals(findingId, response.findings[0].finding.id) + assertEquals(2, response.findings[0].documents.size) + assertTrue(response.findings[0].documents[0].found) + assertTrue(response.findings[0].documents[1].found) + assertEquals(testDoc, response.findings[0].documents[0].document) + assertEquals(testDoc2, response.findings[0].documents[1].document) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt new file mode 100644 index 000000000..c4ed196c7 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt @@ -0,0 +1,1412 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.alerting.resthandler + +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.message.BasicHeader +import org.apache.http.nio.entity.NStringEntity +import org.opensearch.alerting.ALERTING_BASE_URI +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.ANOMALY_DETECTOR_INDEX +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.LEGACY_OPENDISTRO_ALERTING_BASE_URI +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.anomalyDetectorIndexMapping +import org.opensearch.alerting.core.settings.ScheduledJobSettings +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.model.destination.Chime +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.randomADMonitor +import org.opensearch.alerting.randomAction +import org.opensearch.alerting.randomAlert +import org.opensearch.alerting.randomAnomalyDetector +import org.opensearch.alerting.randomAnomalyDetectorWithUser +import org.opensearch.alerting.randomBucketLevelMonitor +import org.opensearch.alerting.randomBucketLevelTrigger +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.alerting.randomQueryLevelTrigger +import org.opensearch.alerting.randomThrottle +import org.opensearch.alerting.randomUser +import org.opensearch.alerting.settings.AlertingSettings +import org.opensearch.alerting.toJsonString +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.ResponseException +import org.opensearch.client.WarningFailureException +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.CronSchedule +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.DocumentLevelTrigger +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.QueryLevelTrigger +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.AggregationBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.SortOrder +import org.opensearch.test.OpenSearchTestCase +import org.opensearch.test.junit.annotations.TestLogging +import org.opensearch.test.rest.OpenSearchRestTestCase +import java.time.Instant +import java.time.ZoneId +import java.time.temporal.ChronoUnit +import java.util.concurrent.TimeUnit + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class MonitorRestApiIT : AlertingRestTestCase() { + + val USE_TYPED_KEYS = ToXContent.MapParams(mapOf("with_type" to "true")) + + @Throws(Exception::class) + fun `test plugin is loaded`() { + val response = entityAsMap(OpenSearchRestTestCase.client().makeRequest("GET", "_nodes/plugins")) + val nodesInfo = response["nodes"] as Map> + for (nodeInfo in nodesInfo.values) { + val plugins = nodeInfo["plugins"] as List> + for (plugin in plugins) { + if (plugin["name"] == "opensearch-alerting") { + return + } + } + } + fail("Plugin not installed") + } + + fun `test parsing monitor as a scheduled job`() { + val monitor = createRandomMonitor() + + val builder = monitor.toXContentWithUser(XContentBuilder.builder(XContentType.JSON.xContent()), USE_TYPED_KEYS) + val string = BytesReference.bytes(builder).utf8ToString() + val xcp = createParser(XContentType.JSON.xContent(), string) + val scheduledJob = ScheduledJob.parse(xcp, monitor.id, monitor.version) + assertEquals(monitor, scheduledJob) + } + + @Throws(Exception::class) + fun `test creating a monitor`() { + val monitor = randomQueryLevelMonitor() + + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + } + + @Throws(Exception::class) + fun `test creating a bucket monitor`() { + val monitor = randomBucketLevelMonitor() + + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + } + + fun `test creating a monitor with legacy ODFE`() { + val monitor = randomQueryLevelMonitor() + val createResponse = client().makeRequest("POST", LEGACY_OPENDISTRO_ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + } + + fun `test creating a monitor with action threshold greater than max threshold`() { + val monitor = randomMonitorWithThrottle(100000, ChronoUnit.MINUTES) + + try { + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + fun `test creating a monitor with action threshold less than min threshold`() { + val monitor = randomMonitorWithThrottle(-1) + + try { + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + fun `test creating a monitor with updating action threshold`() { + adminClient().updateSettings("plugins.alerting.action_throttle_max_value", TimeValue.timeValueHours(1)) + + val monitor = randomMonitorWithThrottle(2, ChronoUnit.HOURS) + + try { + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + adminClient().updateSettings("plugins.alerting.action_throttle_max_value", TimeValue.timeValueHours(24)) + } + + fun `test creating a monitor with PUT fails`() { + try { + val monitor = randomQueryLevelMonitor() + client().makeRequest("PUT", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Expected 405 Method Not Allowed response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.METHOD_NOT_ALLOWED, e.response.restStatus()) + } + } + + fun `test creating a monitor with illegal index name`() { + try { + val si = SearchInput(listOf("_#*IllegalIndexCharacters"), SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + val monitor = randomQueryLevelMonitor() + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.copy(inputs = listOf(si)).toHttpEntity()) + } catch (e: ResponseException) { + // When an index with invalid name is mentioned, instead of returning invalid_index_name_exception security plugin throws security_exception. + // Refer: https://github.com/opendistro-for-elasticsearch/security/issues/718 + // Without security plugin we get BAD_REQUEST correctly. With security_plugin we get INTERNAL_SERVER_ERROR, till above issue is fixed. + assertTrue( + "Unexpected status", + listOf(RestStatus.BAD_REQUEST, RestStatus.FORBIDDEN).contains(e.response.restStatus()) + ) + } + } + + fun `test creating an AD monitor without detector index`() { + try { + val monitor = randomADMonitor() + + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + } catch (e: ResponseException) { + // When user create AD monitor without detector index, will throw index not found exception + assertTrue("Unexpected error", e.message!!.contains("Configured indices are not found")) + assertTrue( + "Unexpected status", + listOf(RestStatus.NOT_FOUND).contains(e.response.restStatus()) + ) + } + } + + fun `test creating an AD monitor with detector index created but no detectors`() { + createAnomalyDetectorIndex() + try { + val monitor = randomADMonitor() + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + } catch (e: ResponseException) { + // When user create AD monitor with no detector, will throw exception + assertTrue("Unexpected error", e.message!!.contains("User has no available detectors")) + assertTrue( + "Unexpected status", + listOf(RestStatus.NOT_FOUND).contains(e.response.restStatus()) + ) + } + } + + fun `test creating an AD monitor with no detector has monitor backend role`() { + if (!securityEnabled()) { + createAnomalyDetectorIndex() + // TODO: change to REST API call to test security enabled case + indexDoc(ANOMALY_DETECTOR_INDEX, "1", randomAnomalyDetector()) + indexDoc(ANOMALY_DETECTOR_INDEX, "2", randomAnomalyDetectorWithUser(randomAlphaOfLength(5))) + try { + val monitor = randomADMonitor() + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + } catch (e: ResponseException) { + // When user create AD monitor with no detector has backend role, will throw exception + assertTrue("Unexpected error", e.message!!.contains("User has no available detectors")) + assertTrue( + "Unexpected status", + listOf(RestStatus.NOT_FOUND).contains(e.response.restStatus()) + ) + } + } + } + + /* + fun `test creating an AD monitor with detector has monitor backend role`() { + createAnomalyDetectorIndex() + val backendRole = "test-role" + val user = randomADUser(backendRole) + indexDoc(ANOMALY_DETECTOR_INDEX, "1", randomAnomalyDetector()) + indexDoc(ANOMALY_DETECTOR_INDEX, "2", randomAnomalyDetectorWithUser(randomAlphaOfLength(5))) + indexDoc(ANOMALY_DETECTOR_INDEX, "3", randomAnomalyDetectorWithUser(backendRole = backendRole), refresh = true) + + val monitor = randomADMonitor(user = user) + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + }*/ + + private fun createAnomalyDetectorIndex() { + try { + createTestIndex(ANOMALY_DETECTOR_INDEX, anomalyDetectorIndexMapping()) + } catch (e: Exception) { + // WarningFailureException is expected as we are creating system index start with dot + assertTrue(e is WarningFailureException) + } + } + + /* Enable this test case after checking for disallowed destination during Monitor creation is added in + fun `test creating a monitor with a disallowed destination type fails`() { + try { + // Create a Chime Destination + val chime = Chime("http://abc.com") + val destination = Destination( + type = DestinationType.CHIME, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = chime, + slack = null, + customWebhook = null, + email = null + ) + val chimeDestination = createDestination(destination = destination) + + // Remove Chime from the allow_list + val allowedDestinations = DestinationType.values().toList() + .filter { destinationType -> destinationType != DestinationType.CHIME } + .joinToString(prefix = "[", postfix = "]") { string -> "\"$string\"" } + client().updateSettings(DestinationSettings.ALLOW_LIST.key, allowedDestinations) + + createMonitor(randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(destinationId = chimeDestination.id)))) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + } + */ + + @Throws(Exception::class) + fun `test updating search for a monitor`() { + val monitor = createRandomMonitor() + + val updatedSearch = SearchInput( + emptyList(), + SearchSourceBuilder().query(QueryBuilders.termQuery("foo", "bar")) + ) + val updateResponse = client().makeRequest( + "PUT", + monitor.relativeUrl(), + emptyMap(), + monitor.copy(inputs = listOf(updatedSearch)).toHttpEntity() + ) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor search not updated", listOf(updatedSearch), updatedMonitor.inputs) + } + + @Throws(Exception::class) + fun `test updating conditions for a monitor`() { + val monitor = createRandomMonitor() + + val updatedTriggers = listOf( + QueryLevelTrigger( + name = "foo", + severity = "1", + condition = Script("return true"), + actions = emptyList() + ) + ) + val updateResponse = client().makeRequest( + "PUT", + monitor.relativeUrl(), + emptyMap(), + monitor.copy(triggers = updatedTriggers).toHttpEntity() + ) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor trigger not updated", updatedTriggers, updatedMonitor.triggers) + } + + @Throws(Exception::class) + fun `test updating schedule for a monitor`() { + val monitor = createRandomMonitor() + + val updatedSchedule = CronSchedule(expression = "0 9 * * *", timezone = ZoneId.of("UTC")) + val updateResponse = client().makeRequest( + "PUT", + monitor.relativeUrl(), + emptyMap(), + monitor.copy(schedule = updatedSchedule).toHttpEntity() + ) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor trigger not updated", updatedSchedule, updatedMonitor.schedule) + } + + @Throws(Exception::class) + fun `test getting a monitor`() { + val monitor = createRandomMonitor() + + val storedMonitor = getMonitor(monitor.id) + + assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) + } + + @Throws(Exception::class) + fun `test getting a monitor that doesn't exist`() { + try { + getMonitor(randomAlphaOfLength(20)) + fail("expected response exception") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + @Throws(Exception::class) + fun `test checking if a monitor exists`() { + val monitor = createRandomMonitor() + + val headResponse = client().makeRequest("HEAD", monitor.relativeUrl()) + assertEquals("Unable to HEAD monitor", RestStatus.OK, headResponse.restStatus()) + assertNull("Response contains unexpected body", headResponse.entity) + } + + fun `test checking if a non-existent monitor exists`() { + val headResponse = client().makeRequest("HEAD", "$ALERTING_BASE_URI/foobarbaz") + assertEquals("Unexpected status", RestStatus.NOT_FOUND, headResponse.restStatus()) + } + + @Throws(Exception::class) + fun `test deleting a monitor`() { + val monitor = createRandomMonitor() + + val deleteResponse = client().makeRequest("DELETE", monitor.relativeUrl()) + assertEquals("Delete failed", RestStatus.OK, deleteResponse.restStatus()) + + val getResponse = client().makeRequest("HEAD", monitor.relativeUrl()) + assertEquals("Deleted monitor still exists", RestStatus.NOT_FOUND, getResponse.restStatus()) + } + + @Throws(Exception::class) + fun `test deleting a monitor that doesn't exist`() { + try { + client().makeRequest("DELETE", "$ALERTING_BASE_URI/foobarbaz") + fail("expected 404 ResponseException") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test getting UI metadata monitor not from OpenSearch Dashboards`() { + val monitor = createRandomMonitor(withMetadata = true) + val getMonitor = getMonitor(monitorId = monitor.id) + assertEquals( + "UI Metadata returned but request did not come from OpenSearch Dashboards.", + getMonitor.uiMetadata, + mapOf() + ) + } + + fun `test getting UI metadata monitor from OpenSearch Dashboards`() { + val monitor = createRandomMonitor(refresh = true, withMetadata = true) + val header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards") + val getMonitor = getMonitor(monitorId = monitor.id, header = header) + assertEquals("", monitor.uiMetadata, getMonitor.uiMetadata) + } + + fun `test query a monitor that exists`() { + val monitor = createRandomMonitor(true) + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitor not found during search", 1, numberDocsFound) + } + + fun `test query a monitor that exists POST`() { + val monitor = createRandomMonitor(true) + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val searchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitor not found during search", 1, numberDocsFound) + } + + fun `test query a monitor that doesn't exist`() { + // Create a random monitor to create the ScheduledJob index. Otherwise we test will fail with 404 index not found. + createRandomMonitor(refresh = true) + val search = SearchSourceBuilder().query( + QueryBuilders.termQuery( + OpenSearchTestCase.randomAlphaOfLength(5), + OpenSearchTestCase.randomAlphaOfLength(5) + ) + ).toString() + + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitor found during search when no document present.", 0, numberDocsFound) + } + + fun `test query a monitor with UI metadata from OpenSearch Dashboards`() { + val monitor = createRandomMonitor(refresh = true, withMetadata = true) + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards") + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON), + header + ) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"] as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitor not found during search", 1, numberDocsFound) + + val searchHits = hits["hits"] as List + val hit = searchHits[0] as Map + val monitorHit = hit["_source"] as Map + assertNotNull( + "UI Metadata returned from search but request did not come from OpenSearchDashboards", + monitorHit[Monitor.UI_METADATA_FIELD] + ) + } + + fun `test query a monitor with UI metadata as user`() { + val monitor = createRandomMonitor(refresh = true, withMetadata = true) + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"] as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitor not found during search", 1, numberDocsFound) + + val searchHits = hits["hits"] as List + val hit = searchHits[0] as Map + val monitorHit = hit["_source"] as Map + assertNull( + "UI Metadata returned from search but request did not come from OpenSearchDashboards", + monitorHit[Monitor.UI_METADATA_FIELD] + ) + } + + fun `test acknowledge all alert states`() { + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + val invalidAlert = randomAlert(monitor).copy(id = "foobar") + + val response = acknowledgeAlerts(monitor, acknowledgedAlert, completedAlert, errorAlert, activeAlert, invalidAlert) + val responseMap = response.asMap() + + val activeAlertAcknowledged = searchAlerts(monitor).single { it.id == activeAlert.id } + assertNotNull("Unsuccessful acknowledgement", responseMap["success"] as List) + assertTrue("Alert not in acknowledged response", responseMap["success"].toString().contains(activeAlert.id)) + assertEquals("Alert not acknowledged.", Alert.State.ACKNOWLEDGED, activeAlertAcknowledged.state) + assertNotNull("Alert acknowledged time is NULL", activeAlertAcknowledged.acknowledgedTime) + + val failedResponseList = responseMap["failed"].toString() + assertTrue("Alert in state ${acknowledgedAlert.state} not found in failed list", failedResponseList.contains(acknowledgedAlert.id)) + assertTrue("Alert in state ${completedAlert.state} not found in failed list", failedResponseList.contains(errorAlert.id)) + assertTrue("Alert in state ${errorAlert.state} not found in failed list", failedResponseList.contains(completedAlert.id)) + assertTrue("Invalid alert not found in failed list", failedResponseList.contains(invalidAlert.id)) + assertFalse("Alert in state ${activeAlert.state} found in failed list", failedResponseList.contains(activeAlert.id)) + } + + fun `test acknowledging more than 10 alerts at once`() { + // GIVEN + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val alertsToAcknowledge = (1..15).map { createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) }.toTypedArray() + + // WHEN + val response = acknowledgeAlerts(monitor, *alertsToAcknowledge) + + // THEN + val responseMap = response.asMap() + val expectedAcknowledgedCount = alertsToAcknowledge.size + + val acknowledgedAlerts = responseMap["success"] as List + assertTrue( + "Expected $expectedAcknowledgedCount alerts to be acknowledged successfully.", + acknowledgedAlerts.size == expectedAcknowledgedCount + ) + + val acknowledgedAlertsList = acknowledgedAlerts.toString() + alertsToAcknowledge.forEach { alert -> + assertTrue("Alert with ID ${alert.id} not found in failed list.", acknowledgedAlertsList.contains(alert.id)) + } + + val failedResponse = responseMap["failed"] as List + assertTrue("Expected 0 alerts to fail acknowledgment.", failedResponse.isEmpty()) + } + + fun `test acknowledging more than 10 alerts at once, including acknowledged alerts`() { + // GIVEN + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val alertsGroup1 = (1..15).map { createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) }.toTypedArray() + acknowledgeAlerts(monitor, *alertsGroup1) // Acknowledging the first array of alerts. + + val alertsGroup2 = (1..15).map { createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) }.toTypedArray() + + // Creating an array of alerts that includes alerts that have been already acknowledged, and new alerts. + val alertsToAcknowledge = arrayOf(*alertsGroup1, *alertsGroup2) + + // WHEN + val response = acknowledgeAlerts(monitor, *alertsToAcknowledge) + + // THEN + val responseMap = response.asMap() + val expectedAcknowledgedCount = alertsToAcknowledge.size - alertsGroup1.size + + val acknowledgedAlerts = responseMap["success"] as List + assertTrue( + "Expected $expectedAcknowledgedCount alerts to be acknowledged successfully.", + acknowledgedAlerts.size == expectedAcknowledgedCount + ) + + val acknowledgedAlertsList = acknowledgedAlerts.toString() + alertsGroup2.forEach { alert -> + assertTrue("Alert with ID ${alert.id} not found in failed list.", acknowledgedAlertsList.contains(alert.id)) + } + alertsGroup1.forEach { alert -> + assertFalse("Alert with ID ${alert.id} found in failed list.", acknowledgedAlertsList.contains(alert.id)) + } + + val failedResponse = responseMap["failed"] as List + assertTrue("Expected ${alertsGroup1.size} alerts to fail acknowledgment.", failedResponse.size == alertsGroup1.size) + + val failedResponseList = failedResponse.toString() + alertsGroup1.forEach { alert -> + assertTrue("Alert with ID ${alert.id} not found in failed list.", failedResponseList.contains(alert.id)) + } + alertsGroup2.forEach { alert -> + assertFalse("Alert with ID ${alert.id} found in failed list.", failedResponseList.contains(alert.id)) + } + } + + @Throws(Exception::class) + fun `test acknowledging 0 alerts`() { + // GIVEN + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val alertsToAcknowledge = arrayOf() + + // WHEN & THEN + try { + acknowledgeAlerts(monitor, *alertsToAcknowledge) + fail("Expected acknowledgeAlerts to throw an exception.") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + fun `test get all alerts in all states`() { + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + val invalidAlert = randomAlert(monitor).copy(id = "foobar") + + val inputMap = HashMap() + inputMap["missing"] = "_last" + + val responseMap = getAlerts(inputMap).asMap() + val alerts = responseMap["alerts"].toString() + + assertEquals(4, responseMap["totalAlerts"]) + assertTrue("Acknowledged alert with id, ${acknowledgedAlert.id}, not found in alert list", alerts.contains(acknowledgedAlert.id)) + assertTrue("Completed alert with id, ${completedAlert.id}, not found in alert list", alerts.contains(completedAlert.id)) + assertTrue("Error alert with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) + assertTrue("Active alert with id, ${activeAlert.id}, not found in alert list", alerts.contains(activeAlert.id)) + assertFalse("Invalid alert with id, ${invalidAlert.id}, found in alert list", alerts.contains(invalidAlert.id)) + } + + fun `test get all alerts with active states`() { + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + val invalidAlert = randomAlert(monitor).copy(id = "foobar") + + val inputMap = HashMap() + inputMap["alertState"] = Alert.State.ACTIVE.name + + val responseMap = getAlerts(inputMap).asMap() + val alerts = responseMap["alerts"].toString() + + assertEquals(1, responseMap["totalAlerts"]) + assertFalse("Acknowledged alert with id, ${acknowledgedAlert.id}, found in alert list", alerts.contains(acknowledgedAlert.id)) + assertFalse("Completed alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) + assertFalse("Error alert with id, ${errorAlert.id}, found in alert list", alerts.contains(errorAlert.id)) + assertTrue("Active alert with id, ${activeAlert.id}, not found in alert list", alerts.contains(activeAlert.id)) + assertFalse("Invalid alert with id, ${invalidAlert.id}, found in alert list", alerts.contains(invalidAlert.id)) + } + + fun `test get all alerts with severity 1`() { + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED, severity = "1")) + val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED, severity = "3")) + val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR, severity = "1")) + val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE, severity = "2")) + + val inputMap = HashMap() + inputMap["severityLevel"] = "1" + + val responseMap = getAlerts(inputMap).asMap() + val alerts = responseMap["alerts"].toString() + + assertEquals(2, responseMap["totalAlerts"]) + assertTrue( + "Acknowledged sev 1 alert with id, ${acknowledgedAlert.id}, not found in alert list", + alerts.contains(acknowledgedAlert.id) + ) + assertFalse("Completed sev 3 alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) + assertTrue("Error sev 1 alert with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) + assertFalse("Active sev 2 alert with id, ${activeAlert.id}, found in alert list", alerts.contains(activeAlert.id)) + } + + fun `test get all alerts for a specific monitor by id`() { + putAlertMappings() // Required as we do not have a create alert API. + val monitor = createRandomMonitor(refresh = true) + val monitor2 = createRandomMonitor(refresh = true) + val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + val completedAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.COMPLETED)) + val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + val activeAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.ACTIVE)) + + val inputMap = HashMap() + inputMap["monitorId"] = monitor.id + + val responseMap = getAlerts(inputMap).asMap() + val alerts = responseMap["alerts"].toString() + + assertEquals(2, responseMap["totalAlerts"]) + assertTrue( + "Acknowledged alert for chosen monitor with id, ${acknowledgedAlert.id}, not found in alert list", + alerts.contains(acknowledgedAlert.id) + ) + assertFalse("Completed sev 3 alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) + assertTrue("Error alert for chosen monitor with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) + assertFalse("Active alert sev 2 with id, ${activeAlert.id}, found in alert list", alerts.contains(activeAlert.id)) + } + + fun `test get alerts by searching monitor name`() { + putAlertMappings() // Required as we do not have a create alert API. + + val monitor = createRandomMonitor(refresh = true) + val monitor2 = createRandomMonitor(refresh = true) + val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + val completedAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.COMPLETED)) + val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + val activeAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.ACTIVE)) + + val inputMap = HashMap() + inputMap["searchString"] = monitor.name + + val responseMap = getAlerts(inputMap).asMap() + val alerts = responseMap["alerts"].toString() + + assertEquals(2, responseMap["totalAlerts"]) + assertTrue( + "Acknowledged alert for matching monitor with id, ${acknowledgedAlert.id}, not found in alert list", + alerts.contains(acknowledgedAlert.id) + ) + assertFalse("Completed sev 3 alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) + assertTrue("Error alert for matching monitor with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) + assertFalse("Active alert sev 2 with id, ${activeAlert.id}, found in alert list", alerts.contains(activeAlert.id)) + } + + fun `test mappings after monitor creation`() { + createRandomMonitor(refresh = true) + + val response = client().makeRequest("GET", "/${ScheduledJob.SCHEDULED_JOBS_INDEX}/_mapping") + val parserMap = createParser(XContentType.JSON.xContent(), response.entity.content).map() as Map> + val mappingsMap = parserMap[ScheduledJob.SCHEDULED_JOBS_INDEX]!!["mappings"] as Map + val expected = createParser( + XContentType.JSON.xContent(), + javaClass.classLoader.getResource("mappings/scheduled-jobs.json").readText() + ) + val expectedMap = expected.map() + + assertEquals("Mappings are different", expectedMap, mappingsMap) + } + + fun `test delete monitor moves alerts`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val monitor = createRandomMonitor(true) + val alert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + refreshIndex("*") + val deleteResponse = client().makeRequest("DELETE", "$ALERTING_BASE_URI/${monitor.id}") + assertEquals("Delete request not successful", RestStatus.OK, deleteResponse.restStatus()) + + // Wait 5 seconds for event to be processed and alerts moved + Thread.sleep(5000) + + val alerts = searchAlerts(monitor) + assertEquals("Active alert was not deleted", 0, alerts.size) + + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) + assertEquals("Alert was not moved to history", 1, historyAlerts.size) + assertEquals( + "Alert data incorrect", + alert.copy(state = Alert.State.DELETED).toJsonString(), + historyAlerts.single().toJsonString() + ) + } + + fun `test delete trigger moves alerts then try to search alert by monitorId to find alert in history index`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val trigger = randomQueryLevelTrigger() + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + val alert = createAlert(randomAlert(monitor).copy(triggerId = trigger.id, state = Alert.State.ACTIVE)) + refreshIndex("*") + val updatedMonitor = monitor.copy(triggers = emptyList()) + val updateResponse = client().makeRequest( + "PUT", + "$ALERTING_BASE_URI/${monitor.id}", + emptyMap(), + updatedMonitor.toHttpEntity() + ) + assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) + + // Wait 5 seconds for event to be processed and alerts moved + Thread.sleep(5000) + + val alerts = searchAlerts(monitor) + assertEquals("Active alert was not deleted", 0, alerts.size) + + // Find alert by id and make sure it checks the history of alerts as well + val inputMap = HashMap() + inputMap["monitorId"] = monitor.id + val responseMap = getAlerts(inputMap).asMap() + + assertEquals(1, responseMap["totalAlerts"]) + } + + fun `test delete trigger moves alerts`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val trigger = randomQueryLevelTrigger() + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) + val alert = createAlert(randomAlert(monitor).copy(triggerId = trigger.id, state = Alert.State.ACTIVE)) + refreshIndex("*") + val updatedMonitor = monitor.copy(triggers = emptyList()) + val updateResponse = client().makeRequest( + "PUT", + "$ALERTING_BASE_URI/${monitor.id}", + emptyMap(), + updatedMonitor.toHttpEntity() + ) + assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) + + // Wait 5 seconds for event to be processed and alerts moved + Thread.sleep(5000) + + val alerts = searchAlerts(monitor) + assertEquals("Active alert was not deleted", 0, alerts.size) + + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) + assertEquals("Alert was not moved to history", 1, historyAlerts.size) + assertEquals( + "Alert data incorrect", + alert.copy(state = Alert.State.DELETED).toJsonString(), + historyAlerts.single().toJsonString() + ) + } + + fun `test delete trigger moves alerts only for deleted trigger`() { + client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) + putAlertMappings() + val triggerToDelete = randomQueryLevelTrigger() + val triggerToKeep = randomQueryLevelTrigger() + val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(triggerToDelete, triggerToKeep))) + val alertKeep = createAlert(randomAlert(monitor).copy(triggerId = triggerToKeep.id, state = Alert.State.ACTIVE)) + val alertDelete = createAlert(randomAlert(monitor).copy(triggerId = triggerToDelete.id, state = Alert.State.ACTIVE)) + refreshIndex("*") + val updatedMonitor = monitor.copy(triggers = listOf(triggerToKeep)) + val updateResponse = client().makeRequest( + "PUT", + "$ALERTING_BASE_URI/${monitor.id}", + emptyMap(), + updatedMonitor.toHttpEntity() + ) + assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) + // Wait until postIndex hook is executed due to monitor update + waitUntil({ + val alerts = searchAlerts(monitor) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 60, TimeUnit.SECONDS) + val alerts = searchAlerts(monitor) + // We have two alerts from above, 1 for each trigger, there should be only 1 left in active index + assertEquals("One alert should be in active index", 1, alerts.size) + assertEquals("Wrong alert in active index", alertKeep.toJsonString(), alerts.single().toJsonString()) + + waitUntil({ + val alerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) + if (alerts.size == 1) { + return@waitUntil true + } + return@waitUntil false + }, 60, TimeUnit.SECONDS) + + val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) + // Only alertDelete should of been moved to history index + assertEquals("One alert should be in history index", 1, historyAlerts.size) + assertEquals( + "Alert data incorrect", + alertDelete.copy(state = Alert.State.DELETED).toJsonString(), + historyAlerts.single().toJsonString() + ) + } + + fun `test update monitor with wrong version`() { + val monitor = createRandomMonitor(refresh = true) + try { + client().makeRequest( + "PUT", + "${monitor.relativeUrl()}?refresh=true&if_seq_no=1234&if_primary_term=1234", + emptyMap(), + monitor.toHttpEntity() + ) + fail("expected 409 ResponseException") + } catch (e: ResponseException) { + assertEquals(RestStatus.CONFLICT, e.response.restStatus()) + } + } + + fun `test monitor stats disable plugin`() { + // Disable the Monitor plugin. + disableScheduledJob() + + val responseMap = getAlertingStats() + assertAlertingStatsSweeperEnabled(responseMap, false) + assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"]) + val _nodes = responseMap["_nodes"] as Map + validateAlertingStatsNodeResponse(_nodes) + } + + fun `test monitor stats when disabling and re-enabling scheduled jobs with existing monitor`() { + // Enable Monitor jobs + enableScheduledJob() + val monitorId = createMonitor(randomQueryLevelMonitor(enabled = true), refresh = true).id + + var alertingStats = getAlertingStats() + assertAlertingStatsSweeperEnabled(alertingStats, true) + assertEquals("Scheduled job index does not exist", true, alertingStats["scheduled_job_index_exists"]) + assertEquals("Scheduled job index is not yellow", "yellow", alertingStats["scheduled_job_index_status"]) + assertEquals("Nodes are not on schedule", numberOfNodes, alertingStats["nodes_on_schedule"]) + + val _nodes = alertingStats["_nodes"] as Map + validateAlertingStatsNodeResponse(_nodes) + + assertTrue( + "Monitor [$monitorId] was not found scheduled based on the alerting stats response: $alertingStats", + isMonitorScheduled(monitorId, alertingStats) + ) + + // Disable Monitor jobs + disableScheduledJob() + + alertingStats = getAlertingStats() + assertAlertingStatsSweeperEnabled(alertingStats, false) + assertFalse( + "Monitor [$monitorId] was still scheduled based on the alerting stats response: $alertingStats", + isMonitorScheduled(monitorId, alertingStats) + ) + + // Re-enable Monitor jobs + enableScheduledJob() + + // Sleep briefly so sweep can reschedule the Monitor + Thread.sleep(2000) + + alertingStats = getAlertingStats() + assertAlertingStatsSweeperEnabled(alertingStats, true) + assertTrue( + "Monitor [$monitorId] was not re-scheduled based on the alerting stats response: $alertingStats", + isMonitorScheduled(monitorId, alertingStats) + ) + } + + fun `test monitor stats no jobs`() { + // Enable the Monitor plugin. + enableScheduledJob() + + val responseMap = getAlertingStats() + assertAlertingStatsSweeperEnabled(responseMap, true) + assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"]) + val _nodes = responseMap["_nodes"] as Map + validateAlertingStatsNodeResponse(_nodes) + } + + fun `test monitor stats jobs`() { + // Enable the Monitor plugin. + enableScheduledJob() + createRandomMonitor(refresh = true) + + val responseMap = getAlertingStats() + assertAlertingStatsSweeperEnabled(responseMap, true) + assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) + assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + assertEquals("Nodes are not on schedule", numberOfNodes, responseMap["nodes_on_schedule"]) + + val _nodes = responseMap["_nodes"] as Map + validateAlertingStatsNodeResponse(_nodes) + } + + @Throws(Exception::class) + fun `test max number of monitors`() { + client().updateSettings(AlertingSettings.ALERTING_MAX_MONITORS.key, "1") + + createRandomMonitor(refresh = true) + try { + createRandomMonitor(refresh = true) + fail("Request should be rejected as there are too many monitors.") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + fun `test monitor specific metric`() { + // Enable the Monitor plugin. + enableScheduledJob() + createRandomMonitor(refresh = true) + + val responseMap = getAlertingStats("/jobs_info") + assertAlertingStatsSweeperEnabled(responseMap, true) + assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) + assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) + assertEquals("Nodes not on schedule", numberOfNodes, responseMap["nodes_on_schedule"]) + + val _nodes = responseMap["_nodes"] as Map + validateAlertingStatsNodeResponse(_nodes) + } + + fun `test monitor stats incorrect metric`() { + try { + getAlertingStats("/foobarzzz") + fail("Incorrect stats metric should have failed") + } catch (e: ResponseException) { + assertEquals("Failed", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + fun `test monitor stats _all and other metric`() { + try { + getAlertingStats("/_all,jobs_info") + fail("Incorrect stats metric should have failed") + } catch (e: ResponseException) { + assertEquals("Failed", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + private fun randomMonitorWithThrottle(value: Int, unit: ChronoUnit = ChronoUnit.MINUTES): Monitor { + val throttle = randomThrottle(value, unit) + val action = randomAction().copy(throttle = throttle) + val trigger = randomQueryLevelTrigger(actions = listOf(action)) + return randomQueryLevelMonitor(triggers = listOf(trigger)) + } + + @Throws(Exception::class) + fun `test search monitors only`() { + // 1. create monitor + val monitor = randomQueryLevelMonitor() + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + + // 2. create destination + val chime = Chime("http://abc.com") + val destination = Destination( + type = DestinationType.CHIME, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = chime, + slack = null, + customWebhook = null, + email = null + ) + createDestination(destination) + + // 3. search - must return only monitors. + val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString() + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Destination objects are also returned by /_search.", 1, numberDocsFound) + + val searchHits = hits["hits"] as List + val hit = searchHits[0] as Map + val monitorHit = hit["_source"] as Map + assertEquals("Type is not monitor", monitorHit[Monitor.TYPE_FIELD], "monitor") + } + + @Throws(Exception::class) + fun `test search monitor with alerting indices only`() { + // 1. search - must return error as invalid index is passed + val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString() + val params: MutableMap = HashMap() + params["index"] = "data-logs" + try { + client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + params, + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + } + } + + @Throws(Exception::class) + fun `test creating a document monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + val actualLocation = createResponse.getHeader("Location") + assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", actualLocation) + } + + @Throws(Exception::class) + fun `test getting a document level monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor( + randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger), user = null) + ) + + val storedMonitor = getMonitor(monitor.id) + + assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) + } + + @Throws(Exception::class) + fun `test updating conditions for a doc-level monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + + val updatedTriggers = listOf( + DocumentLevelTrigger( + name = "foo", + severity = "1", + condition = Script("return true"), + actions = emptyList() + ) + ) + val updateResponse = client().makeRequest( + "PUT", + monitor.relativeUrl(), + emptyMap(), + monitor.copy(triggers = updatedTriggers).toHttpEntity() + ) + + assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) + val responseBody = updateResponse.asMap() + assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) + assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) + + val updatedMonitor = getMonitor(monitor.id) + assertEquals("Monitor trigger not updated", updatedTriggers, updatedMonitor.triggers) + } + + @Throws(Exception::class) + fun `test deleting a document level monitor`() { + val testIndex = createTestIndex() + val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) + + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) + + val deleteResponse = client().makeRequest("DELETE", monitor.relativeUrl()) + assertEquals("Delete failed", RestStatus.OK, deleteResponse.restStatus()) + + val getResponse = client().makeRequest("HEAD", monitor.relativeUrl()) + assertEquals("Deleted monitor still exists", RestStatus.NOT_FOUND, getResponse.restStatus()) + } + + fun `test creating a document monitor with error trigger`() { + val trigger = randomQueryLevelTrigger() + try { + val monitor = randomDocumentLevelMonitor(triggers = listOf(trigger)) + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Monitor with illegal trigger should be rejected.") + } catch (e: IllegalArgumentException) { + assertEquals( + "a document monitor with error trigger", + "Incompatible trigger [${trigger.id}] for monitor type [${Monitor.MonitorType.DOC_LEVEL_MONITOR}]", + e.message + ) + } + } + + fun `test creating a query monitor with error trigger`() { + val trigger = randomBucketLevelTrigger() + try { + val monitor = randomQueryLevelMonitor(triggers = listOf(trigger)) + client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Monitor with illegal trigger should be rejected.") + } catch (e: IllegalArgumentException) { + assertEquals( + "a query monitor with error trigger", + "Incompatible trigger [${trigger.id}] for monitor type [${Monitor.MonitorType.QUERY_LEVEL_MONITOR}]", + e.message + ) + } + } + + /** + * This use case is needed by the frontend plugin for displaying alert counts on the Monitors list page. + * https://github.com/opensearch-project/alerting-dashboards-plugin/blob/main/server/services/MonitorService.js#L235 + */ + fun `test get acknowledged, active, error, and ignored alerts counts`() { + putAlertMappings() + val monitorAlertCounts = hashMapOf>() + val numMonitors = randomIntBetween(1, 10) + repeat(numMonitors) { + val monitor = createRandomMonitor(refresh = true) + + val numAcknowledgedAlerts = randomIntBetween(1, 10) + val numActiveAlerts = randomIntBetween(1, 10) + var numCompletedAlerts = randomIntBetween(1, 10) + val numErrorAlerts = randomIntBetween(1, 10) + val numIgnoredAlerts = randomIntBetween(1, numCompletedAlerts) + numCompletedAlerts -= numIgnoredAlerts + + val alertCounts = hashMapOf( + Alert.State.ACKNOWLEDGED.name to numAcknowledgedAlerts, + Alert.State.ACTIVE.name to numActiveAlerts, + Alert.State.COMPLETED.name to numCompletedAlerts, + Alert.State.ERROR.name to numErrorAlerts, + "IGNORED" to numIgnoredAlerts + ) + monitorAlertCounts[monitor.id] = alertCounts + + repeat(numAcknowledgedAlerts) { + createAlert(randomAlert(monitor).copy(acknowledgedTime = Instant.now(), state = Alert.State.ACKNOWLEDGED)) + } + repeat(numActiveAlerts) { + createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + } + repeat(numCompletedAlerts) { + createAlert(randomAlert(monitor).copy(acknowledgedTime = Instant.now(), state = Alert.State.COMPLETED)) + } + repeat(numErrorAlerts) { + createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + } + repeat(numIgnoredAlerts) { + createAlert(randomAlert(monitor).copy(acknowledgedTime = null, state = Alert.State.COMPLETED)) + } + } + + val sourceBuilder = SearchSourceBuilder() + .size(0) + .query(QueryBuilders.termsQuery("monitor_id", monitorAlertCounts.keys)) + .aggregation( + AggregationBuilders + .terms("uniq_monitor_ids").field("monitor_id") + .subAggregation(AggregationBuilders.filter("active", QueryBuilders.termQuery("state", "ACTIVE"))) + .subAggregation(AggregationBuilders.filter("acknowledged", QueryBuilders.termQuery("state", "ACKNOWLEDGED"))) + .subAggregation(AggregationBuilders.filter("errors", QueryBuilders.termQuery("state", "ERROR"))) + .subAggregation( + AggregationBuilders.filter( + "ignored", + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("state", "COMPLETED")) + .mustNot(QueryBuilders.existsQuery("acknowledged_time")) + ) + ) + .subAggregation(AggregationBuilders.max("last_notification_time").field("last_notification_time")) + .subAggregation( + AggregationBuilders.topHits("latest_alert") + .size(1) + .sort("start_time", SortOrder.DESC) + .fetchSource(arrayOf("last_notification_time", "trigger_name"), null) + ) + ) + + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + hashMapOf("index" to AlertIndices.ALL_ALERT_INDEX_PATTERN), + NStringEntity(sourceBuilder.toString(), ContentType.APPLICATION_JSON) + ) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content).map() + val aggregations = (xcp["aggregations"]!! as Map>) + val uniqMonitorIds = aggregations["uniq_monitor_ids"]!! + val buckets = uniqMonitorIds["buckets"]!! as ArrayList> + + assertEquals("Incorrect number of monitors returned", monitorAlertCounts.keys.size, buckets.size) + buckets.forEach { bucket -> + val id = bucket["key"]!! + val monitorCounts = monitorAlertCounts[id]!! + + val acknowledged = (bucket["acknowledged"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect ${Alert.State.ACKNOWLEDGED} count returned for monitor $id", + monitorCounts[Alert.State.ACKNOWLEDGED.name], acknowledged + ) + + val active = (bucket["active"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect ${Alert.State.ACTIVE} count returned for monitor $id", + monitorCounts[Alert.State.ACTIVE.name], active + ) + + val errors = (bucket["errors"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect ${Alert.State.ERROR} count returned for monitor $id", + monitorCounts[Alert.State.ERROR.name], errors + ) + + val ignored = (bucket["ignored"]!! as Map)["doc_count"]!! + assertEquals( + "Incorrect IGNORED count returned for monitor $id", + monitorCounts["IGNORED"], ignored + ) + } + } + + private fun validateAlertingStatsNodeResponse(nodesResponse: Map) { + assertEquals("Incorrect number of nodes", numberOfNodes, nodesResponse["total"]) + assertEquals("Failed nodes found during monitor stats call", 0, nodesResponse["failed"]) + assertEquals("More than $numberOfNodes successful node", numberOfNodes, nodesResponse["successful"]) + } + + private fun isMonitorScheduled(monitorId: String, alertingStatsResponse: Map): Boolean { + val nodesInfo = alertingStatsResponse["nodes"] as Map + for (nodeId in nodesInfo.keys) { + val nodeInfo = nodesInfo[nodeId] as Map + val jobsInfo = nodeInfo["jobs_info"] as Map + if (jobsInfo.keys.contains(monitorId)) { + return true + } + } + + return false + } + + private fun assertAlertingStatsSweeperEnabled(alertingStatsResponse: Map, expected: Boolean) { + assertEquals( + "Legacy scheduled job enabled field is not set to $expected", + expected, + alertingStatsResponse[statsResponseOpendistroSweeperEnabledField] + ) + assertEquals( + "Scheduled job is not ${if (expected) "enabled" else "disabled"}", + expected, + alertingStatsResponse[statsResponseOpenSearchSweeperEnabledField] + ) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt new file mode 100644 index 000000000..ecfdd6b4f --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt @@ -0,0 +1,159 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.http.HttpHeaders +import org.apache.http.message.BasicHeader +import org.junit.After +import org.junit.Before +import org.junit.BeforeClass +import org.opensearch.alerting.ALERTING_GET_DESTINATION_ACCESS +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.TEST_HR_BACKEND_ROLE +import org.opensearch.alerting.TEST_HR_INDEX +import org.opensearch.alerting.TEST_HR_ROLE +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.model.destination.Chime +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.Slack +import org.opensearch.alerting.randomUser +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.RestClient +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.junit.annotations.TestLogging +import java.time.Instant + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class SecureDestinationRestApiIT : AlertingRestTestCase() { + + companion object { + + @BeforeClass + @JvmStatic + fun setup() { + // things to execute once and keep around for the class + org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) + } + } + + val user = "userA" + var userClient: RestClient? = null + + @Before + fun create() { + if (userClient == null) { + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() + } + } + + @After + fun cleanup() { + userClient?.close() + deleteUser(user) + } + + fun `test create destination with disable filter by`() { + disableFilterBy() + + val chime = Chime("http://abc.com") + val destination = Destination( + type = DestinationType.CHIME, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = chime, + slack = null, + customWebhook = null, + email = null + ) + val createdDestination = createDestination(destination = destination) + assertEquals("Incorrect destination name", createdDestination.name, "test") + assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CHIME) + } + + fun `test get destinations with a destination type and disable filter by`() { + disableFilterBy() + val slack = Slack("url") + val destination = Destination( + type = DestinationType.SLACK, + name = "testSlack", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = slack, + customWebhook = null, + email = null + ) + + // 1. create a destination as admin user + createDestination(destination, true) + + val inputMap = HashMap() + inputMap["missing"] = "_last" + inputMap["destinationType"] = "slack" + + // 2. get destinations as admin user + val adminResponse = getDestinations(client(), inputMap) + assertEquals(1, adminResponse.size) + } + + fun `test get destinations with a destination type and filter by`() { + enableFilterBy() + + val slack = Slack("url") + val destination = Destination( + type = DestinationType.SLACK, + name = "testSlack", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = slack, + customWebhook = null, + email = null + ) + + // 1. create a destination as admin user + createDestination(destination, true) + + val inputMap = HashMap() + inputMap["missing"] = "_last" + inputMap["destinationType"] = "slack" + + // 2. get destinations as admin user + val adminResponse = getDestinations(client(), inputMap) + assertEquals(1, adminResponse.size) + } + + // Destination related tests + + fun `test get destination with an user with get destination role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_DESTINATION_ACCESS) + ) + + createDestination(getTestDestination()) + + try { + val getDestinationResponse = userClient?.makeRequest( + "GET", + AlertingPlugin.DESTINATION_BASE_URI, + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Index Email Group failed", RestStatus.OK, getDestinationResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt new file mode 100644 index 000000000..00b5475c5 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt @@ -0,0 +1,179 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.entity.StringEntity +import org.apache.http.message.BasicHeader +import org.junit.After +import org.junit.Before +import org.junit.BeforeClass +import org.opensearch.alerting.ALERTING_GET_EMAIL_ACCOUNT_ACCESS +import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.TEST_HR_BACKEND_ROLE +import org.opensearch.alerting.TEST_HR_INDEX +import org.opensearch.alerting.TEST_HR_ROLE +import org.opensearch.alerting.makeRequest +import org.opensearch.client.ResponseException +import org.opensearch.client.RestClient +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.rest.RestStatus + +val SEARCH_EMAIL_ACCOUNT_DSL = """ + { + "from": 0, + "size": 20, + "sort": { "email_group.name.keyword": "desc" }, + "query": { + "bool": { + "must": { + "match_all": {} + } + } + } + } +""".trimIndent() + +class SecureEmailAccountRestApiIT : AlertingRestTestCase() { + + companion object { + + @BeforeClass + @JvmStatic + fun setup() { + // things to execute once and keep around for the class + org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) + } + } + + val user = "userB" + var userClient: RestClient? = null + + @Before + fun create() { + if (userClient == null) { + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() + } + } + + @After + fun cleanup() { + userClient?.close() + deleteUser(user) + } + + // Email account related tests. + + fun `test get email accounts with an user with get email account role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_ACCOUNT_ACCESS) + ) + + val emailAccount = createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) + + try { + val emailAccountResponse = userClient?.makeRequest( + "GET", + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/${emailAccount.id}", + StringEntity( + emailAccount.toJsonString(), + ContentType.APPLICATION_JSON + ), + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + + assertEquals("Get Email failed", RestStatus.OK, emailAccountResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test search email accounts with an user with search email account role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS) + ) + + createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(10)) + + try { + val searchEmailAccountResponse = userClient?.makeRequest( + "POST", + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", + StringEntity(SEARCH_EMAIL_ACCOUNT_DSL, ContentType.APPLICATION_JSON), + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Search Email failed", RestStatus.OK, searchEmailAccountResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test get email accounts with an user without get email account role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + val emailAccount = createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) + try { + userClient?.makeRequest( + "GET", + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/${emailAccount.id}", + StringEntity( + emailAccount.toJsonString(), + ContentType.APPLICATION_JSON + ), + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + fun `test search email accounts with an user without search email account role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) + try { + userClient?.makeRequest( + "POST", + "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", + StringEntity(SEARCH_EMAIL_ACCOUNT_DSL, ContentType.APPLICATION_JSON), + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt new file mode 100644 index 000000000..617f652ee --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt @@ -0,0 +1,128 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.entity.StringEntity +import org.apache.http.message.BasicHeader +import org.junit.After +import org.junit.Before +import org.junit.BeforeClass +import org.opensearch.alerting.ALERTING_GET_EMAIL_GROUP_ACCESS +import org.opensearch.alerting.ALERTING_SEARCH_EMAIL_GROUP_ACCESS +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.TEST_HR_BACKEND_ROLE +import org.opensearch.alerting.TEST_HR_INDEX +import org.opensearch.alerting.TEST_HR_ROLE +import org.opensearch.alerting.makeRequest +import org.opensearch.client.RestClient +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.rest.RestStatus +import org.opensearch.test.junit.annotations.TestLogging + +val SEARCH_EMAIL_GROUP_DSL = """ + { + "from": 0, + "size": 20, + "sort": { "email_group.name.keyword": "desc" }, + "query": { + "bool": { + "must": { + "match_all": {} + } + } + } + } +""".trimIndent() + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class SecureEmailGroupsRestApiIT : AlertingRestTestCase() { + companion object { + + @BeforeClass + @JvmStatic + fun setup() { + // things to execute once and keep around for the class + org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) + } + } + + val user = "userC" + var userClient: RestClient? = null + + @Before + fun create() { + if (userClient == null) { + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() + } + } + + @After + fun cleanup() { + userClient?.close() + deleteUser(user) + } + + // Email groups related tests. + + fun `test get email groups with an user with get email group role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_GROUP_ACCESS) + ) + + val emailGroup = createRandomEmailGroupWithGivenName(true, randomAlphaOfLength(5)) + + try { + val getEmailGroupResponse = userClient?.makeRequest( + "GET", + "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/${emailGroup.id}", + StringEntity( + emailGroup.toJsonString(), + ContentType.APPLICATION_JSON + ), + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get Email Group failed", RestStatus.OK, getEmailGroupResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test search email groups with an user with search email group role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_GROUP_ACCESS) + ) + + createRandomEmailGroupWithGivenName(true, randomAlphaOfLength(10)) + + try { + val searchEmailGroupsResponse = userClient?.makeRequest( + "POST", + "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/_search", + StringEntity( + SEARCH_EMAIL_GROUP_DSL, + ContentType.APPLICATION_JSON + ), + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Search Email Group failed", RestStatus.OK, searchEmailGroupsResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt new file mode 100644 index 000000000..86207bab8 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt @@ -0,0 +1,1577 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.entity.StringEntity +import org.apache.http.message.BasicHeader +import org.apache.http.nio.entity.NStringEntity +import org.junit.After +import org.junit.Before +import org.junit.BeforeClass +import org.opensearch.alerting.ADMIN +import org.opensearch.alerting.ALERTING_BASE_URI +import org.opensearch.alerting.ALERTING_DELETE_MONITOR_ACCESS +import org.opensearch.alerting.ALERTING_EXECUTE_MONITOR_ACCESS +import org.opensearch.alerting.ALERTING_FULL_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_GET_ALERTS_ACCESS +import org.opensearch.alerting.ALERTING_GET_MONITOR_ACCESS +import org.opensearch.alerting.ALERTING_INDEX_MONITOR_ACCESS +import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_READ_ONLY_ACCESS +import org.opensearch.alerting.ALERTING_SEARCH_MONITOR_ONLY_ACCESS +import org.opensearch.alerting.ALL_ACCESS_ROLE +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.DRYRUN_MONITOR +import org.opensearch.alerting.READALL_AND_MONITOR_ROLE +import org.opensearch.alerting.TERM_DLS_QUERY +import org.opensearch.alerting.TEST_HR_BACKEND_ROLE +import org.opensearch.alerting.TEST_HR_INDEX +import org.opensearch.alerting.TEST_HR_ROLE +import org.opensearch.alerting.TEST_NON_HR_INDEX +import org.opensearch.alerting.assertUserNull +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.randomAction +import org.opensearch.alerting.randomAlert +import org.opensearch.alerting.randomBucketLevelMonitor +import org.opensearch.alerting.randomBucketLevelTrigger +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.alerting.randomQueryLevelTrigger +import org.opensearch.alerting.randomTemplateScript +import org.opensearch.client.Response +import org.opensearch.client.ResponseException +import org.opensearch.client.RestClient +import org.opensearch.common.settings.Settings +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.authuser.User +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.junit.annotations.TestLogging + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class SecureMonitorRestApiIT : AlertingRestTestCase() { + + companion object { + + @BeforeClass + @JvmStatic + fun setup() { + // things to execute once and keep around for the class + org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) + } + } + + val user = "userD" + var userClient: RestClient? = null + + @Before + fun create() { + if (userClient == null) { + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() + } + } + + @After + fun cleanup() { + userClient?.close() + deleteUser(user) + } + + // Create Monitor related security tests + fun `test create monitor with an user with alerting role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) + ) + try { + // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) + + assertUserNull(createResponse?.asMap()!!["monitor"] as HashMap) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test create monitor with an user without alerting role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + try { + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test create monitor with an user with read-only role`() { + createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) + createUserRolesMapping(ALERTING_READ_ONLY_ACCESS, arrayOf(user)) + + try { + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_READ_ONLY_ACCESS) + } + } + + fun `test query monitors with an user with only search monitor cluster permission`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_SEARCH_MONITOR_ONLY_ACCESS) + ) + val monitor = createRandomMonitor(true) + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + val searchResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + + assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitor not found during search", 1, numberDocsFound) + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test query monitors with an user without search monitor cluster permission`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + try { + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test create monitor with an user without index read role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) + ) + try { + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_NON_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test create monitor with disable filter by`() { + disableFilterBy() + val monitor = randomQueryLevelMonitor() + val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) + assertUserNull(createResponse.asMap()["monitor"] as HashMap) + } + + fun `test get monitor with an user with get monitor role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + + val monitor = createRandomMonitor(true) + + try { + val getMonitorResponse = userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${monitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test get monitor with an user without get monitor role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${monitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun getDocs(response: Response?): Any? { + val hits = createParser( + XContentType.JSON.xContent(), + response?.entity?.content + ).map()["hits"]!! as Map> + return hits["total"]?.get("value") + } + + // Query Monitors related security tests + fun `test update monitor with disable filter by`() { + disableFilterBy() + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitor(monitor = monitor) + + assertNotNull("The monitor was not created", createdMonitor) + assertTrue("The monitor was not enabled", createdMonitor.enabled) + + val monitorV2 = createdMonitor.copy(enabled = false, enabledTime = null) + val updatedMonitor = updateMonitor(monitor = monitorV2) + + assertFalse("The monitor was not disabled", updatedMonitor.enabled) + } + + fun `test update monitor with enable filter by`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitor(monitor = monitor) + + assertNotNull("The monitor was not created", createdMonitor) + assertTrue("The monitor was not enabled", createdMonitor.enabled) + + val monitorV2 = createdMonitor.copy(enabled = false, enabledTime = null) + val updatedMonitor = updateMonitor(monitor = monitorV2) + + assertFalse("The monitor was not disabled", updatedMonitor.enabled) + } + + fun `test create monitor with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role and ensure no access is granted after + patchUserBackendRoles(getUser, arrayOf("role1")) + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test create monitor with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createMonitorWithClient(userClient!!, monitor = monitor, listOf()) + fail("Expected exception since a non-admin user is trying to create a monitor with no backend roles") + } catch (e: ResponseException) { + assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf()) + assertNotNull("The monitor was not created", createdMonitor) + + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor with enable filter by with roles user has no access and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + fail("Expected create monitor to fail as user does not have role1 backend role") + } catch (e: ResponseException) { + assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create monitor as admin with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // user should have access to the admin monitor + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + + val getMonitorResponse = userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove good backend role and ensure no access is granted after + patchUserBackendRoles(user, arrayOf("role5")) + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test update monitor with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role from monitor + val updatedMonitor = updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE)) + + // getUser should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf("role2")) + assertNotNull("The monitor was not created", createdMonitor) + + try { + updateMonitorWithClient(userClient!!, createdMonitor, listOf()) + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + val getMonitorResponse = userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf()) + + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor with enable filter by with updating with a permission user has no access to and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + try { + updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE, "role1")) + fail("Expected update monitor to fail as user doesn't have access to role1") + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as another user with enable filter by with removing a permission and adding permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + // Remove backend role from monitor with new user and add role5 + val updateUser = "updateUser" + createUserWithRoles( + updateUser, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role5"), + false + ) + + val updateUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), updateUser, password) + .setSocketTimeout(60000).build() + val updatedMonitor = updateMonitorWithClient(updateUserClient, createdMonitor, listOf("role5")) + + // old user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteUser(updateUser) + updateUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update monitor as admin with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role1", "role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getMonitorResponse = getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${createdMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) + + // Remove backend role from monitor + val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf("role4")) + + // original user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + + // get user should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$ALERTING_BASE_URI/${updatedMonitor.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test delete monitor with disable filter by`() { + disableFilterBy() + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitor(monitor = monitor) + + assertNotNull("The monitor was not created", createdMonitor) + assertTrue("The monitor was not enabled", createdMonitor.enabled) + + deleteMonitor(monitor = createdMonitor) + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() + // search as "admin" - must get 0 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + + val adminHits = createParser( + XContentType.JSON.xContent(), + adminSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminDocsFound = adminHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminDocsFound) + } + + fun `test delete monitor with enable filter by`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitor(monitor = monitor) + + assertNotNull("The monitor was not created", createdMonitor) + assertTrue("The monitor was not enabled", createdMonitor.enabled) + + deleteMonitor(monitor = createdMonitor) + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() + // search as "admin" - must get 0 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + + val adminHits = createParser( + XContentType.JSON.xContent(), + adminSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminDocsFound = adminHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminDocsFound) + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test query monitors with disable filter by`() { + disableFilterBy() + + // creates monitor as "admin" user. + val monitor = createRandomMonitor(true) + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + + // search as "admin" - must get 1 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) + + // search as userOne without alerting roles - must return 403 Forbidden + try { + userClient?.makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + fail("Expected 403 FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + // add alerting roles and search as userOne - must return 1 docs + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_SEARCH_MONITOR_ONLY_ACCESS) + ) + try { + val userOneSearchResponse = userClient?.makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, userOneSearchResponse?.restStatus()) + assertEquals("Monitor not found during search", 1, getDocs(userOneSearchResponse)) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test query monitors with enable filter by`() { + enableFilterBy() + + // creates monitor as "admin" user. + val monitor = createRandomMonitor(true) + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + + // search as "admin" - must get 1 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) + + // search as userOne without alerting roles - must return 403 Forbidden + try { + userClient?.makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + fail("Expected 403 FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + + // add alerting roles and search as userOne - must return 0 docs + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + val userOneSearchResponse = userClient?.makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, userOneSearchResponse?.restStatus()) + assertEquals("Monitor not found during search", 0, getDocs(userOneSearchResponse)) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + fun `test execute monitor with an user with execute monitor access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_EXECUTE_MONITOR_ACCESS) + ) + + val monitor = createRandomMonitor(true) + + try { + val executeMonitorResponse = userClient?.makeRequest( + "POST", + "$ALERTING_BASE_URI/${monitor.id}/_execute", + mutableMapOf() + ) + assertEquals("Get monitor failed", RestStatus.OK, executeMonitorResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test execute monitor with an user without execute monitor access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + + try { + userClient?.makeRequest( + "POST", + "$ALERTING_BASE_URI/${monitor.id}/_execute", + mutableMapOf() + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test delete monitor with an user with delete monitor access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_DELETE_MONITOR_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val refresh = true + + try { + val deleteMonitorResponse = userClient?.makeRequest( + "DELETE", + "$ALERTING_BASE_URI/${monitor.id}?refresh=$refresh", + emptyMap(), + monitor.toHttpEntity() + ) + assertEquals("Get monitor failed", RestStatus.OK, deleteMonitorResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test delete monitor with an user without delete monitor access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + val refresh = true + + try { + userClient?.makeRequest( + "DELETE", + "$ALERTING_BASE_URI/${monitor.id}?refresh=$refresh", + emptyMap(), + monitor.toHttpEntity() + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test query all alerts in all states with disabled filter by`() { + disableFilterBy() + putAlertMappings() + val monitor = createRandomMonitor(refresh = true) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + randomAlert(monitor).copy(id = "foobar") + + val inputMap = HashMap() + inputMap["missing"] = "_last" + + // search as "admin" - must get 4 docs + val adminResponseMap = getAlerts(client(), inputMap).asMap() + assertEquals(4, adminResponseMap["totalAlerts"]) + + // search as userOne without alerting roles - must return 403 Forbidden + try { + getAlerts(userClient as RestClient, inputMap).asMap() + fail("Expected 403 FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + + // add alerting roles and search as userOne - must return 0 docs + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() + assertEquals(4, responseMap["totalAlerts"]) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + fun `test query all alerts in all states with filter by`() { + enableFilterBy() + putAlertMappings() + val adminUser = User(ADMIN, listOf(ADMIN), listOf(ALL_ACCESS_ROLE), listOf()) + var monitor = createRandomMonitor(refresh = true).copy(user = adminUser) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + randomAlert(monitor).copy(id = "foobar") + + val inputMap = HashMap() + inputMap["missing"] = "_last" + + // search as "admin" - must get 4 docs + val adminResponseMap = getAlerts(client(), inputMap).asMap() + assertEquals(4, adminResponseMap["totalAlerts"]) + + // search as userOne without alerting roles - must return 403 Forbidden + try { + getAlerts(userClient as RestClient, inputMap).asMap() + fail("Expected 403 FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } + // add alerting roles and search as userOne - must return 0 docs + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() + assertEquals(0, responseMap["totalAlerts"]) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + fun `test query all alerts in all states with filter by1`() { + enableFilterBy() + putAlertMappings() + val adminUser = User(ADMIN, listOf(ADMIN), listOf(ALL_ACCESS_ROLE), listOf()) + var monitor = createRandomMonitor(refresh = true).copy(user = adminUser) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + randomAlert(monitor).copy(id = "foobar") + + val inputMap = HashMap() + inputMap["missing"] = "_last" + inputMap["monitorId"] = monitor.id + + // search as "admin" - must get 4 docs + val adminResponseMap = getAlerts(client(), inputMap).asMap() + assertEquals(4, adminResponseMap["totalAlerts"]) + + // search as userOne without alerting roles - must return 403 Forbidden + try { + getAlerts(userClient as RestClient, inputMap).asMap() + fail("Expected 403 FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } +// createUserWithTestDataAndCustomRole( +// user, +// TEST_HR_INDEX, +// TEST_HR_ROLE, +// listOf(ADMIN), +// getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) +// ) + createUserWithRoles(user, listOf(ALERTING_FULL_ACCESS_ROLE), listOf(ADMIN), false) + // add alerting roles and search as userOne - must return 0 docs +// createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() + assertEquals(4, responseMap["totalAlerts"]) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + fun `test get alerts with an user with get alerts role`() { + putAlertMappings() + val ackAlertsUser = User(ADMIN, listOf(ADMIN), listOf(ALERTING_GET_ALERTS_ACCESS), listOf()) + var monitor = createRandomMonitor(refresh = true).copy(user = ackAlertsUser) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) + createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) + randomAlert(monitor).copy(id = "foobar") + + val inputMap = HashMap() + inputMap["missing"] = "_last" + + // search as "admin" - must get 4 docs + val adminResponseMap = getAlerts(client(), inputMap).asMap() + assertEquals(4, adminResponseMap["totalAlerts"]) + + // add alerting roles and search as userOne - must return 1 docs + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_ALERTS_ACCESS) + ) + try { + val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() + assertEquals(4, responseMap["totalAlerts"]) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + // Execute Monitor related security tests + + fun `test execute monitor with elevate permissions`() { + val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) + val inputs = listOf( + SearchInput( + indices = kotlin.collections.listOf(TEST_NON_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + val monitor = randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))), + inputs = inputs + ) + + // Make sure the elevating the permissions fails execute. + val adminUser = User(ADMIN, listOf(ADMIN), listOf(ALL_ACCESS_ROLE), listOf()) + var modifiedMonitor = monitor.copy(user = adminUser) + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + + try { + val response = executeMonitor(userClient as RestClient, modifiedMonitor, params = DRYRUN_MONITOR) + val output = entityAsMap(response) + val inputResults = output.stringMap("input_results") + assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) + assertTrue((inputResults.get("error") as String).contains("no permissions for [indices:data/read/search]")) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + fun `test admin all access with enable filter by`() { + enableFilterBy() + createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + + val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) + val monitorJson = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + createResponse?.entity?.content + ).map() + + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitorJson["_id"])).toString() + + // search as "admin" - must get 1 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) + + // get as "admin" - must get 1 docs + val id: String = monitorJson["_id"] as String + val adminGetResponse = client().makeRequest( + "GET", + "$ALERTING_BASE_URI/$id", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Get monitor failed", RestStatus.OK, adminGetResponse.restStatus()) + + // delete as "admin" + val adminDeleteResponse = client().makeRequest( + "DELETE", + "$ALERTING_BASE_URI/$id", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Delete monitor failed", RestStatus.OK, adminDeleteResponse.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test execute query-level monitor with user having partial index permissions`() { + createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) + createTestIndex(TEST_HR_INDEX) + createIndexRoleWithDocLevelSecurity( + TEST_HR_ROLE, + TEST_HR_INDEX, + TERM_DLS_QUERY, + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) + ) + createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) + + // Add a doc that is accessible to the user + indexDoc( + TEST_HR_INDEX, + "1", + """ + { + "test_field": "a", + "accessible": true + } + """.trimIndent() + ) + + // Add a second doc that is not accessible to the user + indexDoc( + TEST_HR_INDEX, + "2", + """ + { + "test_field": "b", + "accessible": false + } + """.trimIndent() + ) + + val input = SearchInput(indices = listOf(TEST_HR_INDEX), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) + val triggerScript = """ + // make sure there is exactly one hit + return ctx.results[0].hits.hits.size() == 1 + """.trimIndent() + + val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)).copy(actions = listOf()) + val monitor = createMonitorWithClient( + userClient!!, + randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) + ) + + try { + executeMonitor(monitor.id) + val alerts = searchAlerts(monitor) + assertEquals("Incorrect number of alerts", 1, alerts.size) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test execute bucket-level monitor with user having partial index permissions`() { + createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) + createTestIndex(TEST_HR_INDEX) + createIndexRoleWithDocLevelSecurity( + TEST_HR_ROLE, + TEST_HR_INDEX, + TERM_DLS_QUERY, + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) + ) + createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) + + // Add a doc that is accessible to the user + indexDoc( + TEST_HR_INDEX, + "1", + """ + { + "test_field": "a", + "accessible": true + } + """.trimIndent() + ) + + // Add a second doc that is not accessible to the user + indexDoc( + TEST_HR_INDEX, + "2", + """ + { + "test_field": "b", + "accessible": false + } + """.trimIndent() + ) + + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) + ) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ), + actions = listOf() + ) + val monitor = createMonitorWithClient( + userClient!!, + randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger)) + ) + + try { + executeMonitor(monitor.id) + val alerts = searchAlerts(monitor) + assertEquals("Incorrect number of alerts", 1, alerts.size) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /** + * We want to verify that user roles/permissions do not affect clean up of monitors during partial monitor creation failure + */ + fun `test create monitor failure clean up with a user without delete monitor access`() { + enableFilterBy() + createUser(user, listOf(TEST_HR_BACKEND_ROLE, "role2").toTypedArray()) + createTestIndex(TEST_HR_INDEX) + createCustomIndexRole( + ALERTING_INDEX_MONITOR_ACCESS, + TEST_HR_INDEX, + getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) + ) + createUserWithRoles( + user, + listOf(ALERTING_INDEX_MONITOR_ACCESS, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + val docLevelQueryIndex = ".opensearch-alerting-queries-000001" + createIndex( + docLevelQueryIndex, Settings.EMPTY, + """ + "properties" : { + "query": { + "type": "percolator_ext" + }, + "monitor_id": { + "type": "text" + }, + "index": { + "type": "text" + } + } + } + """.trimIndent(), + ".opensearch-alerting-queries" + ) + closeIndex(docLevelQueryIndex) // close index to simulate doc level query indexing failure + try { + val monitor = randomDocumentLevelMonitor( + withMetadata = false, + triggers = listOf(), + inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), emptyList())) + ) + userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + fail("Monitor creation should have failed due to error in indexing doc level queries") + } catch (e: ResponseException) { + val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(10).toString() + val searchResponse = client().makeRequest( + "GET", "$ALERTING_BASE_URI/_search", + emptyMap(), + StringEntity(search, ContentType.APPLICATION_JSON) + ) + val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) + val hits = xcp.map()["hits"]!! as Map> + val numberDocsFound = hits["total"]?.get("value") + assertEquals("Monitors found. Clean up unsuccessful", 0, numberDocsFound) + } finally { + deleteRoleAndRoleMapping(ALERTING_INDEX_MONITOR_ACCESS) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt new file mode 100644 index 000000000..6d0112c52 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt @@ -0,0 +1,1421 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.apache.http.HttpHeaders +import org.apache.http.entity.ContentType +import org.apache.http.message.BasicHeader +import org.apache.http.nio.entity.NStringEntity +import org.junit.After +import org.junit.Before +import org.junit.BeforeClass +import org.opensearch.alerting.ALERTING_BASE_URI +import org.opensearch.alerting.ALERTING_DELETE_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_EXECUTE_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_FULL_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_GET_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_INDEX_MONITOR_ACCESS +import org.opensearch.alerting.ALERTING_INDEX_WORKFLOW_ACCESS +import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE +import org.opensearch.alerting.ALERTING_READ_ONLY_ACCESS +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.READALL_AND_MONITOR_ROLE +import org.opensearch.alerting.TERM_DLS_QUERY +import org.opensearch.alerting.TEST_HR_BACKEND_ROLE +import org.opensearch.alerting.TEST_HR_INDEX +import org.opensearch.alerting.TEST_HR_ROLE +import org.opensearch.alerting.TEST_NON_HR_INDEX +import org.opensearch.alerting.WORKFLOW_ALERTING_BASE_URI +import org.opensearch.alerting.assertUserNull +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.randomBucketLevelMonitor +import org.opensearch.alerting.randomBucketLevelTrigger +import org.opensearch.alerting.randomDocLevelQuery +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.alerting.randomWorkflow +import org.opensearch.client.Response +import org.opensearch.client.ResponseException +import org.opensearch.client.RestClient +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder +import org.opensearch.commons.alerting.model.DataSources +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.rest.SecureRestClientBuilder +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.junit.annotations.TestLogging +import java.time.Instant + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class SecureWorkflowRestApiIT : AlertingRestTestCase() { + + companion object { + + @BeforeClass + @JvmStatic + fun setup() { + // things to execute once and keep around for the class + org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) + } + } + + val user = "userD" + var userClient: RestClient? = null + + @Before + fun create() { + if (userClient == null) { + createUser(user, arrayOf()) + userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() + } + } + + @After + fun cleanup() { + userClient?.close() + deleteUser(user) + } + + // Create Workflow related security tests + fun `test create workflow with an user with alerting role`() { + val clusterPermissions = listOf( + getClusterPermissionsFromCustomRole(ALERTING_INDEX_WORKFLOW_ACCESS) + ) + + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + clusterPermissions + ) + try { + val monitor = createMonitor( + randomQueryLevelMonitor( + inputs = listOf(SearchInput(listOf(TEST_HR_INDEX), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), + ), + true + ) + + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + + val createResponse = userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse?.restStatus()) + + assertUserNull(createResponse?.asMap()!!["workflow"] as HashMap) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test create workflow with an user without alerting role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + try { + val monitor = createRandomMonitor(true) + + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + + userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test create workflow with an user with read-only role`() { + createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) + createUserRolesMapping(ALERTING_READ_ONLY_ACCESS, arrayOf(user)) + + try { + val monitor = createRandomMonitor(true) + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_READ_ONLY_ACCESS) + } + } + + fun `test create workflow with delegate with an user without index read role`() { + createTestIndex(TEST_NON_HR_INDEX) + val clusterPermissions = listOf( + getClusterPermissionsFromCustomRole(ALERTING_INDEX_WORKFLOW_ACCESS) + ) + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + clusterPermissions + ) + try { + val query = randomDocLevelQuery(tags = listOf()) + val triggers = listOf(randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]"))) + + val monitor = createMonitor( + randomDocumentLevelMonitor( + inputs = listOf( + DocLevelMonitorInput( + indices = listOf(TEST_NON_HR_INDEX), + queries = listOf(query) + ) + ), + triggers = triggers + ), + true + ) + + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + + userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteIndex(TEST_NON_HR_INDEX) + } + } + + fun `test create workflow with disable filter by`() { + disableFilterBy() + val monitor = createRandomMonitor(true) + val workflow = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + assertUserNull(createResponse.asMap()["workflow"] as HashMap) + } + + fun `test get workflow with an user with get workflow role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createWorkflow(randomWorkflow(monitorIds = listOf(monitor.id))) + + try { + val getWorkflowResponse = userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + /* + TODO: https://github.com/opensearch-project/alerting/issues/300 + */ + fun `test get workflow with an user without get monitor role`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + val workflow = createWorkflow(randomWorkflow(monitorIds = listOf(monitor.id))) + + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun getDocs(response: Response?): Any? { + val hits = createParser( + XContentType.JSON.xContent(), + response?.entity?.content + ).map()["hits"]!! as Map> + return hits["total"]?.get("value") + } + + // Query Monitors related security tests + fun `test update workflow with disable filter by`() { + disableFilterBy() + + val createdMonitor = createMonitor(monitor = randomQueryLevelMonitor(enabled = true)) + val createdWorkflow = createWorkflow( + randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true, enabledTime = Instant.now()) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + val workflowV2 = createdWorkflow.copy(enabled = false, enabledTime = null) + val updatedWorkflow = updateWorkflow(workflowV2) + + assertFalse("The monitor was not disabled", updatedWorkflow.enabled) + } + + fun `test update workflow with enable filter by`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + + val createdMonitor = createMonitorWithClient( + client = client(), + monitor = randomQueryLevelMonitor(enabled = true), + rbacRoles = listOf("admin") + ) + val createdWorkflow = createWorkflow( + randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true, enabledTime = Instant.now()) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + val workflowV2 = createdWorkflow.copy(enabled = false, enabledTime = null) + val updatedWorkflow = updateWorkflow(workflow = workflowV2) + + assertFalse("The monitor was not disabled", updatedWorkflow.enabled) + } + + fun `test create workflow with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + return + } + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient( + userClient!!, + monitor = randomQueryLevelMonitor(enabled = true), + listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true), + listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove backend role and ensure no access is granted after + patchUserBackendRoles(getUser, arrayOf("role1")) + try { + getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test create workflow with enable filter by with a user with a backend role doesn't have access to monitor`() { + enableFilterBy() + if (!isHttps()) { + return + } + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient( + userClient!!, + monitor = randomQueryLevelMonitor(enabled = true), + listOf("role2") + ) + + assertNotNull("The monitor was not created", createdMonitor) + + val userWithDifferentRole = "role3User" + + createUserWithRoles( + userWithDifferentRole, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role3"), + false + ) + + val userWithDifferentRoleClient = SecureRestClientBuilder( + clusterHosts.toTypedArray(), isHttps(), userWithDifferentRole, password + ) + .setSocketTimeout(60000).build() + + try { + createWorkflowWithClient( + userWithDifferentRoleClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true), + listOf("role3") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + deleteUser(userWithDifferentRole) + userWithDifferentRoleClient?.close() + } + } + + fun `test create workflow with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) + + val workflow = randomWorkflow(monitorIds = listOf(monitor.id)) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createWorkflowWithClient(userClient!!, workflow, listOf()) + fail("Expected exception since a non-admin user is trying to create a workflow with no backend roles") + } catch (e: ResponseException) { + assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create workflow as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitor(monitor = monitor) + val createdWorkflow = createWorkflow(randomWorkflow(monitorIds = listOf(createdMonitor.id))) + assertNotNull("The workflow was not created", createdWorkflow) + + try { + + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create workflow with enable filter by with roles user has no access and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) + val workflow = randomWorkflow(monitorIds = listOf(monitor.id)) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + try { + createWorkflowWithClient(userClient!!, workflow = workflow, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + fail("Expected create workflow to fail as user does not have role1 backend role") + } catch (e: ResponseException) { + assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test create workflow as admin with enable filter by with a user have access and without role has no access`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) + val createdWorkflow = createWorkflowWithClient( + client(), + randomWorkflow(monitorIds = listOf(createdMonitor.id)), + listOf(TEST_HR_BACKEND_ROLE, "role1", "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + // user should have access to the admin monitor + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + + val getWorkflowResponse = userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove good backend role and ensure no access is granted after + patchUserBackendRoles(user, arrayOf("role5")) + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test update workflow with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, randomQueryLevelMonitor(), listOf(TEST_HR_BACKEND_ROLE, "role2")) + val createdWorkflow = createWorkflowWithClient( + client = userClient!!, workflow = randomWorkflow(enabled = true, monitorIds = listOf(createdMonitor.id)), + rbacRoles = listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove backend role from monitor + val updatedWorkflow = updateWorkflowWithClient(userClient!!, createdWorkflow, listOf(TEST_HR_BACKEND_ROLE)) + + // getUser should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf("role2")) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + randomWorkflow(monitorIds = listOf(createdMonitor.id)), + listOf("role2") + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + try { + updateWorkflowWithClient(userClient!!, createdWorkflow, listOf()) + } catch (e: ResponseException) { + assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow as admin with enable filter by with no backend roles`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + val createdMonitorResponse = createMonitor(monitor, true) + assertNotNull("The monitor was not created", createdMonitorResponse) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + val workflow = randomWorkflow( + monitorIds = listOf(createdMonitorResponse.id) + ) + + val createdWorkflow = createWorkflowWithClient( + client(), + workflow = workflow, + rbacRoles = listOf(TEST_HR_BACKEND_ROLE) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + val getWorkflowResponse = userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + val updatedWorkflow = updateWorkflowWithClient(client(), createdWorkflow, listOf()) + + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow with enable filter by with updating with a permission user has no access to and throw exception`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id)), listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + try { + updateWorkflowWithClient(userClient!!, createdWorkflow, listOf(TEST_HR_BACKEND_ROLE, "role1")) + fail("Expected update workflow to fail as user doesn't have access to role1") + } catch (e: ResponseException) { + assertEquals("Update workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow as another user with enable filter by with removing a permission and adding permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true) + ) + + assertNotNull("The workflow was not created", createdWorkflow) + + // Remove backend role from workflow with new user and add role5 + val updateUser = "updateUser" + createUserWithRoles( + updateUser, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role5"), + false + ) + + val updateUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), updateUser, password) + .setSocketTimeout(60000).build() + val updatedWorkflow = updateWorkflowWithClient(updateUserClient, createdWorkflow, listOf("role5")) + + // old user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteUser(updateUser) + updateUserClient?.close() + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + } + + fun `test update workflow as admin with enable filter by with removing a permission`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val monitor = randomQueryLevelMonitor(enabled = true) + + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id)), + listOf(TEST_HR_BACKEND_ROLE, "role2") + ) + assertNotNull("The workflow was not created", createdWorkflow) + + // getUser should have access to the monitor + val getUser = "getUser" + createUserWithTestDataAndCustomRole( + getUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role1", "role2"), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) + .setSocketTimeout(60000).build() + + val getWorkflowResponse = getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) + + // Remove backend role from monitor + val updatedWorkflow = updateWorkflowWithClient(client(), createdWorkflow, listOf("role4")) + + // original user should no longer have access + try { + userClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) + createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) + } + + // get user should no longer have access + try { + getUserClient?.makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", + null, + BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") + ) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteUser(getUser) + getUserClient?.close() + } + } + + fun `test delete workflow with disable filter by`() { + disableFilterBy() + val monitor = randomQueryLevelMonitor(enabled = true) + + val createdMonitor = createMonitor(monitor = monitor) + val createdWorkflow = createWorkflow(workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true)) + + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + deleteWorkflow(workflow = createdWorkflow, deleteDelegates = true) + + val searchMonitor = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() + // Verify if the delegate monitors are deleted + // search as "admin" - must get 0 docs + val adminMonitorSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(searchMonitor, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminMonitorSearchResponse.restStatus()) + + val adminMonitorHits = createParser( + XContentType.JSON.xContent(), + adminMonitorSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminMonitorDocsFound = adminMonitorHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminMonitorDocsFound) + + // Verify workflow deletion + try { + client().makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + emptyMap(), + null + ) + fail("Workflow found during search") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.NOT_FOUND.status, e.response.statusLine.statusCode) + } + } + + fun `test delete workflow with enable filter by`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + val createdMonitor = createMonitorWithClient( + monitor = randomQueryLevelMonitor(), + client = client(), + rbacRoles = listOf("admin") + ) + + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflow(workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true)) + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + deleteWorkflow(workflow = createdWorkflow, true) + + // Verify underlying delegates deletion + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() + // search as "admin" - must get 0 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + + val adminHits = createParser( + XContentType.JSON.xContent(), + adminSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminDocsFound = adminHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminDocsFound) + + // Verify workflow deletion + try { + client().makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", + emptyMap(), + null + ) + fail("Workflow found during search") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.NOT_FOUND.status, e.response.statusLine.statusCode) + } + } + + fun `test delete workflow with enable filter with user that doesn't have delete_monitor cluster privilege failed`() { + enableFilterBy() + if (!isHttps()) { + // if security is disabled and filter by is enabled, we can't create monitor + // refer: `test create monitor with enable filter by` + return + } + createUserWithRoles( + user, + listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), + listOf(TEST_HR_BACKEND_ROLE, "role2"), + false + ) + + val deleteUser = "deleteUser" + createUserWithTestDataAndCustomRole( + deleteUser, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf("role1", "role3"), + listOf( + getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS), + getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) + ) + ) + val deleteUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), deleteUser, password) + .setSocketTimeout(60000).build() + + try { + val createdMonitor = createMonitorWithClient(userClient!!, monitor = randomQueryLevelMonitor()) + + assertNotNull("The monitor was not created", createdMonitor) + + val createdWorkflow = createWorkflowWithClient( + client = userClient!!, + workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true) + ) + assertNotNull("The workflow was not created", createdWorkflow) + assertTrue("The workflow was not enabled", createdWorkflow.enabled) + + try { + deleteWorkflowWithClient(deleteUserClient, workflow = createdWorkflow, true) + fail("Expected Forbidden exception") + } catch (e: ResponseException) { + assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) + } + patchUserBackendRoles(deleteUser, arrayOf("role2")) + + val response = deleteWorkflowWithClient(deleteUserClient!!, workflow = createdWorkflow, true) + assertEquals("Delete workflow failed", RestStatus.OK, response?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + deleteUser(deleteUser) + deleteUserClient?.close() + } + } + + fun `test execute workflow with an user with execute workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_EXECUTE_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(listOf(monitor.id), true) + + try { + val executeWorkflowResponse = userClient?.makeRequest( + "POST", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}/_execute", + mutableMapOf() + ) + assertEquals("Executing workflow failed", RestStatus.OK, executeWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test execute workflow with an user without execute workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(listOf(monitor.id), true) + + try { + userClient?.makeRequest( + "POST", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}/_execute", + mutableMapOf() + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("Execute workflow failed", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test delete workflow with an user with delete workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + val refresh = true + + try { + val deleteWorkflowResponse = userClient?.makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh", + emptyMap(), + monitor.toHttpEntity() + ) + assertEquals("DELETE workflow failed", RestStatus.OK, deleteWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test delete workflow with deleting delegates with an user with delete workflow access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + + try { + val deleteWorkflowResponse = deleteWorkflowWithClient( + userClient!!, + workflow, + deleteDelegates = true, + refresh = true + ) + assertEquals("DELETE workflow failed", RestStatus.OK, deleteWorkflowResponse?.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + // Verify delegate deletion + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() + // search as "admin" - must get 0 docs + val adminSearchResponse = client().makeRequest( + "POST", + "$ALERTING_BASE_URI/_search", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) + + val adminHits = createParser( + XContentType.JSON.xContent(), + adminSearchResponse.entity.content + ).map()["hits"]!! as Map> + val adminDocsFound = adminHits["total"]?.get("value") + assertEquals("Monitor found during search", 0, adminDocsFound) + } + + fun `test delete workflow with an user without delete monitor access`() { + createUserWithTestDataAndCustomRole( + user, + TEST_HR_INDEX, + TEST_HR_ROLE, + listOf(TEST_HR_BACKEND_ROLE), + getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) + ) + + val monitor = createRandomMonitor(true) + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + + try { + userClient?.makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=true", + emptyMap(), + monitor.toHttpEntity() + ) + fail("Expected 403 Method FORBIDDEN response") + } catch (e: ResponseException) { + assertEquals("DELETE workflow failed", RestStatus.FORBIDDEN, e.response.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } + + fun `test admin all access with enable filter by`() { + enableFilterBy() + createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) + createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) + try { + // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. + val monitor = randomQueryLevelMonitor().copy( + inputs = listOf( + SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + + val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) + assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) + val monitorJson = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + createResponse?.entity?.content + ).map() + val monitorId = monitorJson["_id"] as String + + val workflow = randomWorkflow(monitorIds = listOf(monitorId)) + val createWorkflowResponse = userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + assertEquals("Create workflow failed", RestStatus.CREATED, createWorkflowResponse?.restStatus()) + + val workflowJson = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + createWorkflowResponse?.entity?.content + ).map() + + val id: String = workflowJson["_id"] as String + val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", id)).toString() + + // get as "admin" - must get 1 docs + val adminGetResponse = client().makeRequest( + "GET", + "$WORKFLOW_ALERTING_BASE_URI/$id", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Get workflow failed", RestStatus.OK, adminGetResponse.restStatus()) + + // delete as "admin" + val adminDeleteResponse = client().makeRequest( + "DELETE", + "$WORKFLOW_ALERTING_BASE_URI/$id", + emptyMap(), + NStringEntity(search, ContentType.APPLICATION_JSON) + ) + assertEquals("Delete workflow failed", RestStatus.OK, adminDeleteResponse.restStatus()) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) + } + } + + fun `test execute workflow with bucket-level and doc-level chained monitors with user having partial index permissions`() { + createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) + createTestIndex(TEST_HR_INDEX) + + createIndexRoleWithDocLevelSecurity( + TEST_HR_ROLE, + TEST_HR_INDEX, + TERM_DLS_QUERY, + listOf(ALERTING_INDEX_WORKFLOW_ACCESS, ALERTING_INDEX_MONITOR_ACCESS) + ) + createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) + + // Add a doc that is accessible to the user + indexDoc( + TEST_HR_INDEX, + "1", + """ + { + "test_field": "a", + "accessible": true + } + """.trimIndent() + ) + + // Add a second doc that is not accessible to the user + indexDoc( + TEST_HR_INDEX, + "2", + """ + { + "test_field": "b", + "accessible": false + } + """.trimIndent() + ) + + indexDoc( + TEST_HR_INDEX, + "3", + """ + { + "test_field": "c", + "accessible": true + } + """.trimIndent() + ) + + val compositeSources = listOf( + TermsValuesSourceBuilder("test_field").field("test_field") + ) + val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) + val input = SearchInput( + indices = listOf(TEST_HR_INDEX), + query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) + ) + val triggerScript = """ + params.docCount > 0 + """.trimIndent() + + var trigger = randomBucketLevelTrigger() + trigger = trigger.copy( + bucketSelector = BucketSelectorExtAggregationBuilder( + name = trigger.id, + bucketsPathsMap = mapOf("docCount" to "_count"), + script = Script(triggerScript), + parentBucketPath = "composite_agg", + filter = null + ), + actions = listOf() + ) + val bucketMonitor = createMonitorWithClient( + userClient!!, + randomBucketLevelMonitor( + inputs = listOf(input), + enabled = false, + triggers = listOf(trigger), + dataSources = DataSources(findingsEnabled = true) + ) + ) + assertNotNull("The bucket monitor was not created", bucketMonitor) + + val docQuery1 = DocLevelQuery(query = "test_field:\"a\"", name = "3", fields = listOf()) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), listOf(docQuery1))), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) + ) + val docMonitor = createMonitorWithClient(userClient!!, monitor1)!! + assertNotNull("The doc level monitor was not created", docMonitor) + + val workflow = randomWorkflow(monitorIds = listOf(bucketMonitor.id, docMonitor.id)) + val workflowResponse = createWorkflowWithClient(userClient!!, workflow) + assertNotNull("The workflow was not created", workflowResponse) + + try { + executeWorkflow(workflowId = workflowResponse.id) + val bucketAlerts = searchAlerts(bucketMonitor) + assertEquals("Incorrect number of alerts", 0, bucketAlerts.size) + + val docAlerts = searchAlerts(docMonitor) + assertEquals("Incorrect number of alerts", 0, docAlerts.size) + } finally { + deleteRoleAndRoleMapping(TEST_HR_ROLE) + } + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt new file mode 100644 index 000000000..8c073c4b6 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt @@ -0,0 +1,1188 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.resthandler + +import org.junit.Assert +import org.opensearch.alerting.ALWAYS_RUN +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.WORKFLOW_ALERTING_BASE_URI +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.randomBucketLevelMonitor +import org.opensearch.alerting.randomChainedAlertTrigger +import org.opensearch.alerting.randomDocumentLevelMonitor +import org.opensearch.alerting.randomDocumentLevelTrigger +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.alerting.randomQueryLevelTrigger +import org.opensearch.alerting.randomUser +import org.opensearch.alerting.randomWorkflow +import org.opensearch.alerting.randomWorkflowWithDelegates +import org.opensearch.client.ResponseException +import org.opensearch.commons.alerting.model.ChainedAlertTrigger +import org.opensearch.commons.alerting.model.ChainedMonitorFindings +import org.opensearch.commons.alerting.model.CompositeInput +import org.opensearch.commons.alerting.model.Delegate +import org.opensearch.commons.alerting.model.DocLevelMonitorInput +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.rest.RestStatus +import org.opensearch.index.query.QueryBuilders +import org.opensearch.script.Script +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.junit.annotations.TestLogging +import java.time.Instant +import java.time.temporal.ChronoUnit +import java.util.Collections +import java.util.Locale +import java.util.UUID + +@TestLogging("level:DEBUG", reason = "Debug for tests.") +@Suppress("UNCHECKED_CAST") +class WorkflowRestApiIT : AlertingRestTestCase() { + + fun `test create workflow success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + } + + fun `test create workflow with different monitor types success`() { + val index = createTestIndex() + val docQuery = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docLevelMonitorResponse = createMonitor(monitor) + + val bucketLevelMonitor = randomBucketLevelMonitor( + inputs = listOf( + SearchInput( + listOf(index), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) + ) + ) + ) + val bucketLevelMonitorResponse = createMonitor(bucketLevelMonitor) + + val workflow = randomWorkflow( + monitorIds = listOf(docLevelMonitorResponse.id, bucketLevelMonitorResponse.id), + triggers = listOf( + randomChainedAlertTrigger(condition = Script("trigger1")), + randomChainedAlertTrigger(condition = Script("trigger2")) + ) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + val workflowById = getWorkflow(createdId) + assertNotNull(workflowById) + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals("Workflow name not correct", workflow.name, workflowById.name) + assertEquals("Workflow owner not correct", workflow.owner, workflowById.owner) + assertEquals("Workflow input not correct", workflow.inputs, workflowById.inputs) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", docLevelMonitorResponse.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", bucketLevelMonitorResponse.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", docLevelMonitorResponse.id, delegate2.chainedMonitorFindings!!.monitorId + ) + + assertEquals(workflowById.triggers.size, 2) + assertTrue(workflowById.triggers[0] is ChainedAlertTrigger) + assertTrue(workflowById.triggers[1] is ChainedAlertTrigger) + assertTrue((workflowById.triggers[0] as ChainedAlertTrigger).condition == Script("trigger1")) + assertTrue((workflowById.triggers[1] as ChainedAlertTrigger).condition == Script("trigger2")) + } + + fun `test create workflow without delegate failure`() { + val workflow = randomWorkflow( + monitorIds = Collections.emptyList() + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be empty.") + ) + } + } + } + + fun `test create workflow duplicate delegate failure`() { + val workflow = randomWorkflow( + monitorIds = listOf("1", "1", "2") + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test create workflow delegate monitor doesn't exist failure`() { + val index = createTestIndex() + val docQuery = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docLevelMonitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf("-1", docLevelMonitorResponse.id) + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test create workflow sequence order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test create workflow chained findings monitor not in sequence failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test create workflow chained findings order not correct failure`() { + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + fun `test create workflow when monitor index not initialized failure`() { + val delegates = listOf( + Delegate(1, "monitor-1") + ) + val workflow = randomWorkflowWithDelegates( + delegates = delegates + ) + + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Monitors not found") + ) + } + } + } + + fun `test create workflow delegate and chained finding monitor different indices failure`() { + val index = randomAlphaOfLength(10).lowercase(Locale.ROOT) + createTestIndex(index) + + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor) + + val index1 = "$index-1" + createTestIndex(index1) + + val docLevelInput1 = DocLevelMonitorInput( + "description", listOf(index1), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + + val docMonitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger) + ) + val docMonitorResponse1 = createMonitor(docMonitor1) + + val workflow = randomWorkflow( + monitorIds = listOf(docMonitorResponse1.id, docMonitorResponse.id) + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("doesn't query all of chained findings monitor's indices") + ) + } + } + } + + fun `test create workflow query monitor chained findings monitor failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val docMonitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val docMonitorResponse = createMonitor(docMonitor) + + val queryMonitor = randomQueryLevelMonitor() + val queryMonitorResponse = createMonitor(queryMonitor) + + val workflow = randomWorkflow( + monitorIds = listOf(queryMonitorResponse.id, docMonitorResponse.id) + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Query level monitor can't be part of chained findings") + ) + } + } + } + + fun `test create workflow with 26 delegates failure`() { + val monitorsIds = mutableListOf() + for (i in 0..25) { + monitorsIds.add(UUID.randomUUID().toString()) + } + val workflow = randomWorkflow( + monitorIds = monitorsIds + ) + try { + createWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Delegates list can not be larger then 25.") + ) + } + } + } + + fun `test update workflow add monitor success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse2 = createMonitor(monitor2) + + val updatedWorkflow = randomWorkflow( + id = createdId, + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + + val updateResponse = client().makeRequest("PUT", updatedWorkflow.relativeUrl(), emptyMap(), updatedWorkflow.toHttpEntity()) + + assertEquals("Update workflow failed", RestStatus.OK, updateResponse.restStatus()) + + val updateResponseBody = updateResponse.asMap() + val updatedId = updateResponseBody["_id"] as String + val updatedVersion = updateResponseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, updatedId) + assertTrue("incorrect version", updatedVersion > 0) + + val workflowById = getWorkflow(updatedId) + assertNotNull(workflowById) + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow remove monitor success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse2 = createMonitor(monitor2) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id, monitorResponse2.id) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + var workflowById = getWorkflow(createdId) + assertNotNull(workflowById) + // Delegate verification + @Suppress("UNCHECKED_CAST") + var delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val updatedWorkflow = randomWorkflow( + id = createdId, + monitorIds = listOf(monitorResponse.id) + ) + + val updateResponse = client().makeRequest("PUT", updatedWorkflow.relativeUrl(), emptyMap(), updatedWorkflow.toHttpEntity()) + + assertEquals("Update workflow failed", RestStatus.OK, updateResponse.restStatus()) + + val updateResponseBody = updateResponse.asMap() + val updatedId = updateResponseBody["_id"] as String + val updatedVersion = updateResponseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, updatedId) + assertTrue("incorrect version", updatedVersion > 0) + + workflowById = getWorkflow(updatedId) + assertNotNull(workflowById) + // Delegate verification + @Suppress("UNCHECKED_CAST") + delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 1, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse.id, delegate1.monitorId) + } + + fun `test update workflow change order of delegate monitors`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitor2 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse1 = createMonitor(monitor1) + val monitorResponse2 = createMonitor(monitor2) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) + ) + + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + assertNotNull(workflow) + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) + + var workflowById = getWorkflow(workflowResponse.id) + assertNotNull(workflowById) + + val updatedWorkflowResponse = updateWorkflow( + randomWorkflow( + id = workflowById.id, + monitorIds = listOf(monitorResponse2.id, monitorResponse1.id) + ) + ) + + assertNotNull("Workflow creation failed", updatedWorkflowResponse) + assertNotNull(updatedWorkflowResponse) + assertEquals( + "Workflow id changed", + workflowResponse.id, + updatedWorkflowResponse.id + ) + assertTrue("incorrect version", updatedWorkflowResponse.version > 0) + + workflowById = getWorkflow(updatedWorkflowResponse.id) + + // Verify workflow + assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) + assertTrue("incorrect version", workflowById.version > 0) + assertEquals( + "Workflow name not correct", + updatedWorkflowResponse.name, + workflowById.name + ) + assertEquals( + "Workflow owner not correct", + updatedWorkflowResponse.owner, + workflowById.owner + ) + assertEquals( + "Workflow input not correct", + updatedWorkflowResponse.inputs, + workflowById.inputs + ) + + // Delegate verification + @Suppress("UNCHECKED_CAST") + val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } + assertEquals("Delegates size not correct", 2, delegates.size) + + val delegate1 = delegates[0] + assertNotNull(delegate1) + assertEquals("Delegate1 order not correct", 1, delegate1.order) + assertEquals("Delegate1 id not correct", monitorResponse2.id, delegate1.monitorId) + + val delegate2 = delegates[1] + assertNotNull(delegate2) + assertEquals("Delegate2 order not correct", 2, delegate2.order) + assertEquals("Delegate2 id not correct", monitorResponse1.id, delegate2.monitorId) + assertEquals( + "Delegate2 Chained finding not correct", monitorResponse2.id, delegate2.chainedMonitorFindings!!.monitorId + ) + } + + fun `test update workflow doesn't exist failure`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) + ) + + val monitorResponse1 = createMonitor(monitor1) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse1.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + try { + updateWorkflow(workflow.copy(id = "testId")) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow with testId is not found") + ) + } + } + val updatedWorkflow = updateWorkflow(workflowResponse.copy(enabled = true, enabledTime = Instant.now())) + assertNotNull(updatedWorkflow) + val getWorkflow = getWorkflow(workflowId = updatedWorkflow.id) + assertTrue(getWorkflow.enabled) + } + + fun `test update workflow duplicate delegate failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("1", "1", "2") + ) + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Duplicate delegates not allowed") + ) + } + } + } + + fun `test update workflow delegate monitor doesn't exist failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + workflow = randomWorkflow( + id = workflowResponse.id, + monitorIds = listOf("-1", monitorResponse.id) + ) + + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("are not valid monitor ids") + ) + } + } + } + + fun `test update workflow sequence order not correct failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(1, "monitor-2"), + Delegate(2, "monitor-3") + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") + ) + } + } + } + + fun `test update workflow chained findings monitor not in sequence failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") + ) + } + } + } + + fun `test update workflow chained findings order not correct failure`() { + val index = createTestIndex() + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger) + ) + val monitorResponse = createMonitor(monitor) + + var workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id) + ) + val workflowResponse = createWorkflow(workflow) + assertNotNull("Workflow creation failed", workflowResponse) + + val delegates = listOf( + Delegate(1, "monitor-1"), + Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), + Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) + ) + workflow = randomWorkflowWithDelegates( + id = workflowResponse.id, + delegates = delegates + ) + + try { + updateWorkflow(workflow) + } catch (e: ResponseException) { + assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning IndexWorkflow Action error ", + it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") + ) + } + } + } + + @Throws(Exception::class) + fun `test getting a workflow`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + val storedMonitor = getMonitor(monitor.id) + + assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) + + val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) + + val storedWorkflow = getWorkflow(workflow.id) + + assertEquals("Indexed and retrieved workflow differ", workflow.id, storedWorkflow.id) + val delegates = (storedWorkflow.inputs[0] as CompositeInput).sequence.delegates + assertEquals("Delegate list not correct", 1, delegates.size) + assertEquals("Delegate order id not correct", 1, delegates[0].order) + assertEquals("Delegate id list not correct", monitor.id, delegates[0].monitorId) + } + + @Throws(Exception::class) + fun `test getting a workflow that doesn't exist`() { + try { + getWorkflow(randomAlphaOfLength(20)) + fail("expected response exception") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test delete workflow`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val workflowResponse = createWorkflow(workflowRequest) + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflow(workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + client().makeRequest("DELETE", getWorkflowResponse.relativeUrl()) + + // Verify that the workflow is deleted + try { + getWorkflow(workflowId) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + } + + fun `test delete workflow delete delegate monitors`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val workflowResponse = createWorkflow(workflowRequest) + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflow(workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + client().makeRequest("DELETE", getWorkflowResponse.relativeUrl().plus("?deleteDelegateMonitors=true")) + + // Verify that the workflow is deleted + try { + getWorkflow(workflowId) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + + // Verify that delegate monitor is deleted + try { + getMonitor(monitor.id) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Monitor not found.") + ) + } + } + } + + fun `test delete workflow preserve delegate monitors`() { + val query = randomQueryLevelMonitor() + val monitor = createMonitor(query) + + val workflowRequest = randomWorkflow( + monitorIds = listOf(monitor.id) + ) + val workflowResponse = createWorkflow(workflowRequest) + val workflowId = workflowResponse.id + val getWorkflowResponse = getWorkflow(workflowResponse.id) + + assertNotNull(getWorkflowResponse) + assertEquals(workflowId, getWorkflowResponse.id) + + client().makeRequest("DELETE", getWorkflowResponse.relativeUrl().plus("?deleteDelegateMonitors=false")) + + // Verify that the workflow is deleted + try { + getWorkflow(workflowId) + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + e.message?.let { + assertTrue( + "Exception not returning GetWorkflow Action error ", + it.contains("Workflow not found.") + ) + } + } + + // Verify that delegate monitor is not deleted + val delegateMonitor = getMonitor(monitor.id) + assertNotNull(delegateMonitor) + } + + @Throws(Exception::class) + fun `test deleting a workflow that doesn't exist`() { + try { + client().makeRequest("DELETE", "$WORKFLOW_ALERTING_BASE_URI/foobarbaz") + fail("expected 404 ResponseException") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + + fun `test chained alerts and audit alerts for workflows with query level monitor`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "test_field:\"test_value_1\"", name = "3", fields = listOf()) + val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) + val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + var monitor1 = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput1), + triggers = listOf(trigger1), + enabled = false + ) + val monitorResponse = createMonitor(monitor1)!! + var monitor2 = randomQueryLevelMonitor( + triggers = listOf(randomQueryLevelTrigger(condition = Script("return true"))), + enabled = false + ) + + val monitorResponse2 = createMonitor(monitor2)!! + val andTrigger = randomChainedAlertTrigger( + name = "1And2", + condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") + ) + + val workflow = Workflow( + id = "", + version = 2, + name = "test", + enabled = false, + schedule = IntervalSchedule(5, ChronoUnit.MINUTES), + lastUpdateTime = Instant.now(), + enabledTime = null, + workflowType = Workflow.WorkflowType.COMPOSITE, + user = randomUser(), + schemaVersion = -1, + inputs = listOf( + CompositeInput( + org.opensearch.commons.alerting.model.Sequence( + delegates = listOf( + Delegate(1, monitorResponse.id), + Delegate(2, monitorResponse2.id) + ) + ) + ) + ), + owner = "alerting", + triggers = listOf(andTrigger) + ) + val workflowById = createWorkflow(workflow) + assertNotNull(workflowById) + val workflowId = workflowById.id + + insertSampleTimeSerializedData( + index, + listOf( + "test_value_1" + ) + ) + val searchMonitorResponse = searchMonitors() + logger.error(searchMonitorResponse) + val jobsList = searchMonitorResponse.hits.toList() + var numMonitors = 0 + var numWorkflows = 0 + jobsList.forEach { + val map = it.sourceAsMap + if (map["type"] == "workflow") numWorkflows++ + else if (map["type"] == "monitor") numMonitors++ + } + Assert.assertEquals(numMonitors, 2) + Assert.assertEquals(numWorkflows, 1) + val response = executeWorkflow(workflowId = workflowId, params = emptyMap()) + val executeWorkflowResponse = entityAsMap(response) + logger.info(executeWorkflowResponse) + val executionId = executeWorkflowResponse["execution_id"] + Assert.assertTrue(executeWorkflowResponse.containsKey("trigger_results")) + val workflowTriggerResults = executeWorkflowResponse["trigger_results"] as Map + assertEquals(workflowTriggerResults.size, 1) + assertTrue( + (workflowTriggerResults[andTrigger.id] as Map)["triggered"] as Boolean + ) + val res = getWorkflowAlerts(workflowId = workflowId, getAssociatedAlerts = true) + val getWorkflowAlerts = entityAsMap(res) + Assert.assertTrue(getWorkflowAlerts.containsKey("alerts")) + Assert.assertTrue(getWorkflowAlerts.containsKey("associatedAlerts")) + val alerts = getWorkflowAlerts["alerts"] as List> + assertEquals(alerts.size, 1) + Assert.assertEquals(alerts[0]["execution_id"], executionId) + Assert.assertEquals(alerts[0]["workflow_id"], workflowId) + Assert.assertEquals(alerts[0]["monitor_id"], "") + val associatedAlerts = getWorkflowAlerts["associatedAlerts"] as List> + assertEquals(associatedAlerts.size, 2) + + val res1 = getWorkflowAlerts(workflowId = workflowId, alertId = alerts[0]["id"].toString(), getAssociatedAlerts = true) + val getWorkflowAlerts1 = entityAsMap(res1) + Assert.assertTrue(getWorkflowAlerts1.containsKey("alerts")) + Assert.assertTrue(getWorkflowAlerts1.containsKey("associatedAlerts")) + val alerts1 = getWorkflowAlerts1["alerts"] as List> + assertEquals(alerts1.size, 1) + Assert.assertEquals(alerts1[0]["execution_id"], executionId) + Assert.assertEquals(alerts1[0]["workflow_id"], workflowId) + Assert.assertEquals(alerts1[0]["monitor_id"], "") + val associatedAlerts1 = getWorkflowAlerts1["associatedAlerts"] as List> + assertEquals(associatedAlerts1.size, 2) + + val getAlertsRes = getAlerts() + val getAlertsMap = getAlertsRes.asMap() + Assert.assertTrue(getAlertsMap.containsKey("alerts")) + val getAlertsAlerts = (getAlertsMap["alerts"] as ArrayList>) + assertEquals(getAlertsAlerts.size, 1) + Assert.assertEquals(getAlertsAlerts[0]["execution_id"], executionId) + Assert.assertEquals(getAlertsAlerts[0]["workflow_id"], workflowId) + Assert.assertEquals(getAlertsAlerts[0]["monitor_id"], "") + Assert.assertEquals(getAlertsAlerts[0]["id"], alerts1[0]["id"]) + + val ackRes = acknowledgeChainedAlerts(workflowId, alerts1[0]["id"].toString()) + val acknowledgeChainedAlertsResponse = entityAsMap(ackRes) + val acknowledged = acknowledgeChainedAlertsResponse["success"] as List + Assert.assertEquals(acknowledged[0], alerts1[0]["id"]) + } + + fun `test run workflow as scheduled job success`() { + val index = createTestIndex() + val docQuery1 = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) + val docLevelInput = DocLevelMonitorInput( + "description", listOf(index), listOf(docQuery1) + ) + val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) + + val monitor = randomDocumentLevelMonitor( + inputs = listOf(docLevelInput), + triggers = listOf(trigger), + enabled = false + ) + val monitorResponse = createMonitor(monitor) + + val workflow = randomWorkflow( + monitorIds = listOf(monitorResponse.id), + enabled = true, + schedule = IntervalSchedule(1, ChronoUnit.MINUTES) + ) + + val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) + + assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) + + val responseBody = createResponse.asMap() + val createdId = responseBody["_id"] as String + val createdVersion = responseBody["_version"] as Int + + assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) + assertTrue("incorrect version", createdVersion > 0) + assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) + + val testDoc = """{ + "message" : "This is an error from IAD region", + "test_field" : "us-west-2" + }""" + + indexDoc(index, "1", testDoc) + Thread.sleep(80000) + + val findings = searchFindings(monitor.copy(id = monitorResponse.id)) + assertEquals("Findings saved for test monitor", 1, findings.size) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt b/alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt new file mode 100644 index 000000000..6ee8c4997 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt @@ -0,0 +1,189 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.settings + +import org.junit.Before +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings +import org.opensearch.alerting.core.settings.ScheduledJobSettings +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.test.OpenSearchTestCase +import java.util.concurrent.TimeUnit + +class AlertingSettingsTests : OpenSearchTestCase() { + + private lateinit var plugin: AlertingPlugin + + @Before + fun setup() { + plugin = AlertingPlugin() + } + + fun `test all opendistro settings returned`() { + val settings = plugin.settings + assertTrue( + "Legacy Settings are not returned", + settings.containsAll( + listOf( + LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, + LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, + LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, + LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, + LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, + LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, + LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, + LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD, + LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, + LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, + LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT + ) + ) + ) + } + + fun `test all opensearch settings returned`() { + val settings = plugin.settings + assertTrue( + "Opensearch settings not returned", + settings.containsAll( + listOf( + DestinationSettings.ALLOW_LIST, + DestinationSettings.HOST_DENY_LIST, + AlertingSettings.ALERTING_MAX_MONITORS, + AlertingSettings.INPUT_TIMEOUT, + AlertingSettings.INDEX_TIMEOUT, + AlertingSettings.BULK_TIMEOUT, + AlertingSettings.ALERT_BACKOFF_MILLIS, + AlertingSettings.ALERT_BACKOFF_COUNT, + AlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, + AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, + AlertingSettings.ALERT_HISTORY_ENABLED, + AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, + AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, + AlertingSettings.ALERT_HISTORY_MAX_DOCS, + AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, + AlertingSettings.REQUEST_TIMEOUT, + AlertingSettings.MAX_ACTION_THROTTLE_VALUE, + AlertingSettings.FILTER_BY_BACKEND_ROLES, + ScheduledJobSettings.SWEEP_PERIOD, + ScheduledJobSettings.SWEEP_PAGE_SIZE, + ScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, + ScheduledJobSettings.SWEEP_BACKOFF_MILLIS, + ScheduledJobSettings.SWEEPER_ENABLED, + ScheduledJobSettings.REQUEST_TIMEOUT + ) + ) + ) + } + + fun `test opendistro settings fallback`() { + assertEquals( + AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(Settings.EMPTY), + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(Settings.EMPTY) + ) + assertEquals( + ScheduledJobSettings.REQUEST_TIMEOUT.get(Settings.EMPTY), + LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT.get(Settings.EMPTY) + ) + } + + fun `test settings get Value`() { + val settings = Settings.builder().put("plugins.alerting.move_alerts_backoff_count", 1).build() + assertEquals(AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(settings), 1) + assertEquals(LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(settings), 3) + val scheduledJobSettings = Settings.builder().put("plugins.scheduled_jobs.enabled", false).build() + assertEquals(ScheduledJobSettings.SWEEPER_ENABLED.get(scheduledJobSettings), false) + assertEquals(LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED.get(scheduledJobSettings), true) + } + + fun `test settings get value with legacy Fallback`() { + val settings = Settings.builder() + .put("opendistro.alerting.monitor.max_monitors", 1000) + .put("opendistro.alerting.input_timeout", TimeValue.timeValueSeconds(30)) + .put("opendistro.alerting.index_timeout", TimeValue.timeValueSeconds(60)) + .put("opendistro.alerting.bulk_timeout", TimeValue.timeValueSeconds(120)) + .put("opendistro.alerting.alert_backoff_millis", TimeValue.timeValueMillis(50)) + .put("opendistro.alerting.alert_backoff_count", 2) + .put("opendistro.alerting.move_alerts_backoff_millis", TimeValue.timeValueMillis(250)) + .put("opendistro.alerting.move_alerts_backoff_count", 3) + .put("opendistro.alerting.alert_history_enabled", true) + .put("opendistro.alerting.alert_history_rollover_period", TimeValue.timeValueHours(12)) + .put("opendistro.alerting.alert_history_max_age", TimeValue(30, TimeUnit.DAYS)) + .put("opendistro.alerting.alert_history_max_docs", 1000L) + .put("opendistro.alerting.alert_history_retention_period", TimeValue(60, TimeUnit.DAYS)) + .put("opendistro.alerting.request_timeout", TimeValue.timeValueSeconds(10)) + .put("opendistro.alerting.action_throttle_max_value", TimeValue.timeValueHours(24)) + .put("opendistro.alerting.filter_by_backend_roles", false) + .put("opendistro.scheduled_jobs.enabled", false) + .put("opendistro.scheduled_jobs.request_timeout", TimeValue.timeValueSeconds(10)) + .put("opendistro.scheduled_jobs.sweeper.backoff_millis", TimeValue.timeValueMillis(50)) + .put("opendistro.scheduled_jobs.retry_count", 3) + .put("opendistro.scheduled_jobs.sweeper.period", TimeValue.timeValueMinutes(5)) + .put("opendistro.scheduled_jobs.sweeper.page_size", 100).build() + + assertEquals(AlertingSettings.ALERTING_MAX_MONITORS.get(settings), 1000) + assertEquals(AlertingSettings.INPUT_TIMEOUT.get(settings), TimeValue.timeValueSeconds(30)) + assertEquals(AlertingSettings.INDEX_TIMEOUT.get(settings), TimeValue.timeValueSeconds(60)) + assertEquals(AlertingSettings.BULK_TIMEOUT.get(settings), TimeValue.timeValueSeconds(120)) + assertEquals(AlertingSettings.ALERT_BACKOFF_MILLIS.get(settings), TimeValue.timeValueMillis(50)) + assertEquals(AlertingSettings.ALERT_BACKOFF_COUNT.get(settings), 2) + assertEquals(AlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS.get(settings), TimeValue.timeValueMillis(250)) + assertEquals(AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(settings), 3) + assertEquals(AlertingSettings.ALERT_HISTORY_ENABLED.get(settings), true) + assertEquals(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings), TimeValue.timeValueHours(12)) + assertEquals(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings), TimeValue(30, TimeUnit.DAYS)) + assertEquals(AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings), 1000L) + assertEquals(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.get(settings), TimeValue(60, TimeUnit.DAYS)) + assertEquals(AlertingSettings.REQUEST_TIMEOUT.get(settings), TimeValue.timeValueSeconds(10)) + assertEquals(AlertingSettings.MAX_ACTION_THROTTLE_VALUE.get(settings), TimeValue.timeValueHours(24)) + assertEquals(AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings), false) + assertEquals(ScheduledJobSettings.SWEEPER_ENABLED.get(settings), false) + assertEquals(ScheduledJobSettings.REQUEST_TIMEOUT.get(settings), TimeValue.timeValueSeconds(10)) + assertEquals(ScheduledJobSettings.SWEEP_BACKOFF_MILLIS.get(settings), TimeValue.timeValueMillis(50)) + assertEquals(ScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT.get(settings), 3) + assertEquals(ScheduledJobSettings.SWEEP_PERIOD.get(settings), TimeValue.timeValueMinutes(5)) + assertEquals(ScheduledJobSettings.SWEEP_PAGE_SIZE.get(settings), 100) + + assertSettingDeprecationsAndWarnings( + arrayOf( + LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, + LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, + LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, + LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, + LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, + LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, + LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, + LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, + LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, + LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, + LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, + LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT, + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, + LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, + LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD + ) + ) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt b/alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt new file mode 100644 index 000000000..2e96c1fad --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt @@ -0,0 +1,73 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.settings + +import org.junit.Before +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.common.settings.Settings +import org.opensearch.test.OpenSearchTestCase + +class DestinationSettingsTests : OpenSearchTestCase() { + private lateinit var plugin: AlertingPlugin + + @Before + fun setup() { + plugin = AlertingPlugin() + } + + fun `test all opendistro destination settings returned`() { + val settings = plugin.settings + assertTrue( + "Legacy Settings are not returned", + settings.containsAll( + listOf( + LegacyOpenDistroDestinationSettings.ALLOW_LIST, + LegacyOpenDistroDestinationSettings.HOST_DENY_LIST + ) + ) + ) + } + + fun `test all opensearch destination settings returned`() { + val settings = plugin.settings + assertTrue( + "Opensearch settings not returned", + settings.containsAll( + listOf( + DestinationSettings.ALLOW_LIST, + DestinationSettings.HOST_DENY_LIST + ) + ) + ) + } + + fun `test opendistro settings fallback`() { + assertEquals( + DestinationSettings.ALLOW_LIST.get(Settings.EMPTY), + LegacyOpenDistroDestinationSettings.ALLOW_LIST.get(Settings.EMPTY) + ) + assertEquals( + DestinationSettings.HOST_DENY_LIST.get(Settings.EMPTY), + LegacyOpenDistroDestinationSettings.HOST_DENY_LIST.get(Settings.EMPTY) + ) + } + + fun `test settings get Value with legacy fallback`() { + val settings = Settings.builder() + .putList("opendistro.alerting.destination.allow_list", listOf("1")) + .putList("opendistro.destination.host.deny_list", emptyList()).build() + + assertEquals(DestinationSettings.ALLOW_LIST.get(settings), listOf("1")) + assertEquals(DestinationSettings.HOST_DENY_LIST.get(settings), emptyList()) + + assertSettingDeprecationsAndWarnings( + arrayOf( + LegacyOpenDistroDestinationSettings.ALLOW_LIST, + LegacyOpenDistroDestinationSettings.HOST_DENY_LIST + ) + ) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt b/alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt new file mode 100644 index 000000000..f1f8882f7 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt @@ -0,0 +1,503 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.transport + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope +import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexRequest +import org.opensearch.action.admin.indices.get.GetIndexRequestBuilder +import org.opensearch.action.admin.indices.get.GetIndexResponse +import org.opensearch.action.admin.indices.refresh.RefreshAction +import org.opensearch.action.admin.indices.refresh.RefreshRequest +import org.opensearch.action.support.IndicesOptions +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.AlertingPlugin +import org.opensearch.alerting.action.ExecuteMonitorAction +import org.opensearch.alerting.action.ExecuteMonitorRequest +import org.opensearch.alerting.action.ExecuteMonitorResponse +import org.opensearch.alerting.action.ExecuteWorkflowAction +import org.opensearch.alerting.action.ExecuteWorkflowRequest +import org.opensearch.alerting.action.ExecuteWorkflowResponse +import org.opensearch.alerting.alerts.AlertIndices +import org.opensearch.alerting.model.MonitorMetadata +import org.opensearch.alerting.model.WorkflowMetadata +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.xcontent.XContentFactory +import org.opensearch.common.xcontent.XContentType +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.action.AlertingActions +import org.opensearch.commons.alerting.action.DeleteMonitorRequest +import org.opensearch.commons.alerting.action.DeleteWorkflowRequest +import org.opensearch.commons.alerting.action.GetFindingsRequest +import org.opensearch.commons.alerting.action.GetFindingsResponse +import org.opensearch.commons.alerting.action.GetMonitorRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest +import org.opensearch.commons.alerting.action.GetWorkflowAlertsResponse +import org.opensearch.commons.alerting.action.GetWorkflowRequest +import org.opensearch.commons.alerting.action.GetWorkflowResponse +import org.opensearch.commons.alerting.action.IndexMonitorRequest +import org.opensearch.commons.alerting.action.IndexMonitorResponse +import org.opensearch.commons.alerting.action.IndexWorkflowRequest +import org.opensearch.commons.alerting.action.IndexWorkflowResponse +import org.opensearch.commons.alerting.model.Alert +import org.opensearch.commons.alerting.model.Finding +import org.opensearch.commons.alerting.model.Monitor +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.commons.alerting.model.Table +import org.opensearch.commons.alerting.model.Workflow +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.index.IndexService +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.TermQueryBuilder +import org.opensearch.index.reindex.ReindexPlugin +import org.opensearch.index.seqno.SequenceNumbers +import org.opensearch.join.ParentJoinPlugin +import org.opensearch.painless.PainlessPlugin +import org.opensearch.plugins.Plugin +import org.opensearch.rest.RestRequest +import org.opensearch.script.mustache.MustachePlugin +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.fetch.subphase.FetchSourceContext +import org.opensearch.test.OpenSearchSingleNodeTestCase +import java.time.Instant +import java.time.ZonedDateTime +import java.time.format.DateTimeFormatter +import java.time.temporal.ChronoUnit +import java.util.Locale +import java.util.concurrent.TimeUnit + +/** + * A test that keep a singleton node started for all tests that can be used to get + * references to Guice injectors in unit tests. + */ +@ThreadLeakScope(ThreadLeakScope.Scope.NONE) +abstract class AlertingSingleNodeTestCase : OpenSearchSingleNodeTestCase() { + + protected val index: String = randomAlphaOfLength(10).lowercase(Locale.ROOT) + + override fun setUp() { + super.setUp() + createTestIndex() + } + + protected fun getAllIndicesFromPattern(pattern: String): List { + val getIndexResponse = ( + client().admin().indices().prepareGetIndex() + .setIndices(pattern) as GetIndexRequestBuilder + ).get() as GetIndexResponse + getIndexResponse + return getIndexResponse.indices().toList() + } + + protected fun executeMonitor(monitor: Monitor, id: String?, dryRun: Boolean = true): ExecuteMonitorResponse? { + val request = ExecuteMonitorRequest(dryRun, TimeValue(Instant.now().toEpochMilli()), id, monitor) + return client().execute(ExecuteMonitorAction.INSTANCE, request).get() + } + + protected fun insertSampleTimeSerializedData(index: String, data: List) { + data.forEachIndexed { i, value -> + val twoMinsAgo = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.MILLIS) + val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(twoMinsAgo) + val testDoc = """ + { + "test_strict_date_time": "$testTime", + "test_field_1": "$value", + "number": "$i" + } + """.trimIndent() + // Indexing documents with deterministic doc id to allow for easy selected deletion during testing + indexDoc(index, (i + 1).toString(), testDoc) + } + } + + @Suppress("UNCHECKED_CAST") + fun Map.stringMap(key: String): Map? { + val map = this as Map> + return map[key] + } + + /** A test index that can be used across tests. Feel free to add new fields but don't remove any. */ + protected fun createTestIndex() { + val mapping = XContentFactory.jsonBuilder() + mapping.startObject() + .startObject("properties") + .startObject("test_strict_date_time") + .field("type", "date") + .field("format", "strict_date_time") + .endObject() + .startObject("test_field_1") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + + createIndex( + index, Settings.EMPTY, mapping + ) + } + + protected fun createTestIndex(index: String) { + val mapping = XContentFactory.jsonBuilder() + mapping.startObject() + .startObject("properties") + .startObject("test_strict_date_time") + .field("type", "date") + .field("format", "strict_date_time") + .endObject() + .startObject("test_field_1") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + + createIndex( + index, Settings.EMPTY, mapping + ) + } + + private fun createIndex( + index: String?, + settings: Settings?, + mappings: XContentBuilder?, + ): IndexService? { + val createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings) + if (mappings != null) { + createIndexRequestBuilder.setMapping(mappings) + } + return this.createIndex(index, createIndexRequestBuilder) + } + + protected fun indexDoc(index: String, id: String, doc: String) { + client().prepareIndex(index).setId(id) + .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() + } + + protected fun assertIndexExists(index: String) { + val getIndexResponse = + client().admin().indices().getIndex( + GetIndexRequest().indices(index).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) + ).get() + assertTrue(getIndexResponse.indices.size > 0) + } + + protected fun assertIndexNotExists(index: String) { + val getIndexResponse = + client().admin().indices().getIndex( + GetIndexRequest().indices(index).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) + ).get() + assertFalse(getIndexResponse.indices.size > 0) + } + + protected fun assertAliasNotExists(alias: String) { + val aliasesResponse = client().admin().indices().getAliases(GetAliasesRequest()).get() + val foundAlias = aliasesResponse.aliases.values.forEach { + it.forEach { + if (it.alias == alias) { + fail("alias exists, but it shouldn't") + } + } + } + } + + protected fun assertAliasExists(alias: String) { + val aliasesResponse = client().admin().indices().getAliases(GetAliasesRequest()).get() + val foundAlias = aliasesResponse.aliases.values.forEach { + it.forEach { + if (it.alias == alias) { + return + } + } + } + fail("alias doesn't exists, but it should") + } + + protected fun createMonitor(monitor: Monitor): IndexMonitorResponse? { + val request = IndexMonitorRequest( + monitorId = Monitor.NO_ID, + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), + method = RestRequest.Method.POST, + monitor = monitor + ) + return client().execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, request).actionGet() + } + + protected fun updateMonitor(monitor: Monitor, monitorId: String): IndexMonitorResponse? { + val request = IndexMonitorRequest( + monitorId = monitorId, + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), + method = RestRequest.Method.PUT, + monitor = monitor + ) + return client().execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, request).actionGet() + } + + protected fun deleteMonitor(monitorId: String): Boolean { + client().execute( + AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorId, WriteRequest.RefreshPolicy.IMMEDIATE) + ).get() + return true + } + + protected fun searchAlerts( + monitorId: String, + indices: String = AlertIndices.ALERT_INDEX, + refresh: Boolean = true, + executionId: String? = null, + ): List { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return emptyList() + } + val ssb = SearchSourceBuilder() + ssb.version(true) + val bqb = BoolQueryBuilder() + bqb.must(TermQueryBuilder(Alert.MONITOR_ID_FIELD, monitorId)) + if (executionId.isNullOrEmpty() == false) { + bqb.must(TermQueryBuilder(Alert.EXECUTION_ID_FIELD, executionId)) + } + ssb.query(bqb) + val searchResponse = client().prepareSearch(indices).setRouting(monitorId).setSource(ssb).get() + + return searchResponse.hits.hits.map { + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + Alert.parse(xcp, it.id, it.version) + } + } + + protected fun getWorkflowAlerts( + workflowId: String, + getAssociatedAlerts: Boolean? = true, + alertState: Alert.State? = Alert.State.ACTIVE, + alertIndex: String? = "", + associatedAlertsIndex: String? = "", + alertIds: List? = emptyList(), + table: Table? = Table("asc", "monitor_id", null, 100, 0, null), + ): GetWorkflowAlertsResponse { + return client().execute( + AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, + GetWorkflowAlertsRequest( + table = table!!, + severityLevel = "ALL", + alertState = alertState!!.name, + alertIndex = alertIndex, + associatedAlertsIndex = associatedAlertsIndex, + monitorIds = emptyList(), + workflowIds = listOf(workflowId), + alertIds = alertIds, + getAssociatedAlerts = getAssociatedAlerts!! + ) + ).get() + } + + protected fun refreshIndex(index: String) { + client().execute(RefreshAction.INSTANCE, RefreshRequest(index)).get() + } + + protected fun searchFindings( + id: String, + indices: String = AlertIndices.ALL_FINDING_INDEX_PATTERN, + refresh: Boolean = true, + ): List { + if (refresh) refreshIndex(indices) + + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder(Alert.MONITOR_ID_FIELD, id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + Finding.parse(xcp) + }.filter { finding -> finding.monitorId == id } + } + + protected fun getFindings( + findingId: String, + monitorId: String?, + findingIndexName: String?, + ): List { + + val getFindingsRequest = GetFindingsRequest( + findingId, + Table("asc", "monitor_id", null, 100, 0, null), + monitorId, + findingIndexName + ) + val getFindingsResponse: GetFindingsResponse = client().execute(AlertingActions.GET_FINDINGS_ACTION_TYPE, getFindingsRequest).get() + + return getFindingsResponse.findings.map { it.finding }.toList() + } + + protected fun getMonitorResponse( + monitorId: String, + version: Long = 1L, + fetchSourceContext: FetchSourceContext = FetchSourceContext.FETCH_SOURCE, + ) = client().execute( + AlertingActions.GET_MONITOR_ACTION_TYPE, + GetMonitorRequest(monitorId, version, RestRequest.Method.GET, fetchSourceContext) + ).get() + + override fun getPlugins(): List> { + return listOf( + AlertingPlugin::class.java, + ReindexPlugin::class.java, + MustachePlugin::class.java, + PainlessPlugin::class.java, + ParentJoinPlugin::class.java + ) + } + + protected fun deleteIndex(index: String) { + val response = client().admin().indices().delete(DeleteIndexRequest(index)).get() + assertTrue("Unable to delete index", response.isAcknowledged()) + } + + override fun resetNodeAfterTest(): Boolean { + return false + } + + // merged WorkflowSingleNodeTestCase with this class as we are seeing test setup failures + // when multiple test classes implement AlertingSingleNodeTestCase or its child class + protected fun searchWorkflow( + id: String, + indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, + refresh: Boolean = true, + ): Workflow? { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return null + } + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder("_id", id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { it -> + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + lateinit var workflow: Workflow + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "workflow" -> workflow = Workflow.parse(xcp) + } + } + workflow.copy(id = it.id, version = it.version) + }.first() + } + + protected fun searchWorkflowMetadata( + id: String, + indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, + refresh: Boolean = true, + ): WorkflowMetadata? { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return null + } + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder("workflow_metadata.workflow_id", id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { it -> + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + lateinit var workflowMetadata: WorkflowMetadata + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "workflow_metadata" -> workflowMetadata = WorkflowMetadata.parse(xcp) + } + } + workflowMetadata.copy(id = it.id) + }.first() + } + + protected fun searchMonitorMetadata( + id: String, + indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, + refresh: Boolean = true, + ): MonitorMetadata? { + try { + if (refresh) refreshIndex(indices) + } catch (e: Exception) { + logger.warn("Could not refresh index $indices because: ${e.message}") + return null + } + val ssb = SearchSourceBuilder() + ssb.version(true) + ssb.query(TermQueryBuilder("_id", id)) + val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() + + return searchResponse.hits.hits.map { it -> + val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } + lateinit var monitorMetadata: MonitorMetadata + while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { + xcp.nextToken() + when (xcp.currentName()) { + "metadata" -> monitorMetadata = MonitorMetadata.parse(xcp) + } + } + monitorMetadata.copy(id = it.id) + }.first() + } + + protected fun upsertWorkflow( + workflow: Workflow, + id: String = Workflow.NO_ID, + method: RestRequest.Method = RestRequest.Method.POST, + ): IndexWorkflowResponse? { + val request = IndexWorkflowRequest( + workflowId = id, + seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, + primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, + refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), + method = method, + workflow = workflow + ) + + return client().execute(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, request).actionGet() + } + + protected fun getWorkflowById(id: String): GetWorkflowResponse { + return client().execute( + AlertingActions.GET_WORKFLOW_ACTION_TYPE, + GetWorkflowRequest(id, RestRequest.Method.GET) + ).get() + } + + protected fun deleteWorkflow(workflowId: String, deleteDelegateMonitors: Boolean? = null) { + client().execute( + AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, + DeleteWorkflowRequest(workflowId, deleteDelegateMonitors) + ).get() + } + + protected fun executeWorkflow(workflow: Workflow? = null, id: String? = null, dryRun: Boolean = true): ExecuteWorkflowResponse? { + val request = ExecuteWorkflowRequest(dryRun, TimeValue(Instant.now().toEpochMilli()), id, workflow) + return client().execute(ExecuteWorkflowAction.INSTANCE, request).get() + } + + override fun nodeSettings(): Settings { + return Settings.builder() + .put(super.nodeSettings()) + .put("opendistro.scheduled_jobs.sweeper.period", TimeValue(5, TimeUnit.SECONDS)) + .put("opendistro.scheduled_jobs.enabled", true) + .build() + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt b/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt new file mode 100644 index 000000000..d3f4613fe --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt @@ -0,0 +1,76 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggeraction + +import org.junit.Assert +import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser +import org.opensearch.test.OpenSearchTestCase + +class TriggerExpressionParserTests : OpenSearchTestCase() { + + fun `test trigger expression posix parsing simple AND`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456]) && query[name=sigma-789]" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && query[name=sigma-789] && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple AND with parenthesis`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456]) && (query[name=sigma-789] && query[name=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals( + "query[name=sigma-123] query[name=sigma-456] && query[name=sigma-789] query[name=id-2aw34] && && ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple OR`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456]) || query[name=sigma-789]" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || query[name=sigma-789] || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple OR with parenthesis`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456]) || (query[name=sigma-789] || query[name=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals( + "query[name=sigma-123] query[name=sigma-456] || query[name=sigma-789] query[name=id-2aw34] || || ", + equation.toString() + ) + } + + fun `test trigger expression posix parsing simple NOT`() { + val eqString = "(query[name=sigma-123] || !query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] ! || ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple NOT`() { + val eqString = "(query[name=sigma-123] && !query[tag=tag-456]) && !(query[name=sigma-789])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals("query[name=sigma-123] query[tag=tag-456] ! && query[name=sigma-789] ! && ", equation.toString()) + } + + fun `test trigger expression posix parsing multiple operators with parenthesis`() { + val eqString = "(query[name=sigma-123] && query[tag=sev1]) || !(!query[name=sigma-789] || query[name=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + Assert.assertEquals( + "query[name=sigma-123] query[tag=sev1] && query[name=sigma-789] ! query[name=id-2aw34] || ! || ", + equation.toString() + ) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt b/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt new file mode 100644 index 000000000..1dd19d9d1 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt @@ -0,0 +1,124 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.triggeraction + +import org.junit.Assert +import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser +import org.opensearch.commons.alerting.model.DocLevelQuery +import org.opensearch.test.OpenSearchTestCase + +class TriggerExpressionResolverTests : OpenSearchTestCase() { + + fun `test trigger expression evaluation simple AND`() { + val eqString = "(query[name=sigma-123] && query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple AND scenario2`() { + val eqString = "(query[name=sigma-123] && query[id=id1456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[id=id1456] && ", equation.toString()) + Assert.assertEquals(mutableSetOf("3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple AND scenario3`() { + val eqString = "(query[name=sigma-123] && query[tag=sev2])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "8", "7") + queryToDocIds[DocLevelQuery("", "", listOf(), "", mutableListOf("tag=sev2"))] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[tag=sev2] && ", equation.toString()) + Assert.assertEquals(emptySet(), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple OR`() { + val eqString = "(query[name=sigma-123] || query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple OR scenario2`() { + val eqString = "(query[name=sigma-123] || query[id=id1456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + Assert.assertEquals("query[name=sigma-123] query[id=id1456] || ", equation.toString()) + Assert.assertEquals(mutableSetOf("6", "3", "7", "1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple OR scenario3`() { + val eqString = "(query[name=sigma-123] || query[tag=sev2])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "8", "7") + queryToDocIds[DocLevelQuery("", "", listOf(), "", mutableListOf("tag=sev2"))] = emptySet() + Assert.assertEquals("query[name=sigma-123] query[tag=sev2] || ", equation.toString()) + Assert.assertEquals(mutableSetOf("6", "8", "7"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation simple NOT`() { + val eqString = "!(query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("4", "5", "6") + Assert.assertEquals("query[name=sigma-456] ! ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation AND with NOT`() { + val eqString = "(query[name=sigma-123] && !query[name=sigma-456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3", "11") + queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("3", "4", "5") + queryToDocIds[DocLevelQuery("id_new", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") + Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] ! && ", equation.toString()) + Assert.assertEquals(mutableSetOf("1", "2", "11"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation OR with NOT`() { + val eqString = "(query[name=sigma-123] || !query[id=id1456])" + val equation = TriggerExpressionParser(eqString).parse() + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") + queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "15") + queryToDocIds[DocLevelQuery("id_new", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") + Assert.assertEquals("query[name=sigma-123] query[id=id1456] ! || ", equation.toString()) + Assert.assertEquals(mutableSetOf("6", "3", "7", "13"), equation.evaluate(queryToDocIds)) + } + + fun `test trigger expression evaluation with multiple operators with parenthesis`() { + val eqString = "(query[name=sigma-123] && query[tag=sev1]) || !(!query[name=sigma-789] || query[id=id-2aw34])" + val equation = TriggerExpressionParser(eqString).parse() + + val queryToDocIds = mutableMapOf>() + queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") + queryToDocIds[DocLevelQuery("id_random1", "", listOf(), "", mutableListOf("sev1"))] = mutableSetOf("2", "3", "4") + queryToDocIds[DocLevelQuery("", "sigma-789", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") + queryToDocIds[DocLevelQuery("id-2aw34", "", listOf(), "", emptyList())] = mutableSetOf("13", "14", "15") + + Assert.assertEquals( + "query[name=sigma-123] query[tag=sev1] && query[name=sigma-789] ! query[id=id-2aw34] || ! || ", + equation.toString() + ) + + Assert.assertEquals(mutableSetOf("2", "3", "11", "12"), equation.evaluate(queryToDocIds)) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt b/alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt new file mode 100644 index 000000000..f596f1b92 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt @@ -0,0 +1,335 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.junit.Assert +import org.opensearch.action.search.SearchResponse +import org.opensearch.alerting.model.InputRunResults +import org.opensearch.alerting.model.TriggerAfterKey +import org.opensearch.alerting.randomBucketLevelTrigger +import org.opensearch.alerting.randomBucketSelectorExtAggregationBuilder +import org.opensearch.alerting.randomQueryLevelTrigger +import org.opensearch.cluster.ClusterModule +import org.opensearch.common.CheckedFunction +import org.opensearch.common.xcontent.json.JsonXContent +import org.opensearch.commons.alerting.model.Trigger +import org.opensearch.core.ParseField +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.search.aggregations.Aggregation +import org.opensearch.search.aggregations.AggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder +import org.opensearch.search.aggregations.bucket.composite.ParsedComposite +import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder +import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase +import java.io.IOException + +class AggregationQueryRewriterTests : OpenSearchTestCase() { + + fun `test RewriteQuery empty previous result`() { + val triggers: MutableList = mutableListOf() + for (i in 0 until 10) { + triggers.add(randomBucketLevelTrigger()) + } + val queryBuilder = SearchSourceBuilder() + val termAgg: AggregationBuilder = TermsAggregationBuilder("testPath").field("sports") + queryBuilder.aggregation(termAgg) + val prevResult = null + AggregationQueryRewriter.rewriteQuery(queryBuilder, prevResult, triggers) + Assert.assertEquals(queryBuilder.aggregations().pipelineAggregatorFactories.size, 10) + } + + fun `skip test RewriteQuery with non-empty previous result`() { + val triggers: MutableList = mutableListOf() + for (i in 0 until 10) { + triggers.add(randomBucketLevelTrigger()) + } + val queryBuilder = SearchSourceBuilder() + val termAgg: AggregationBuilder = CompositeAggregationBuilder( + "testPath", + listOf(TermsValuesSourceBuilder("k1"), TermsValuesSourceBuilder("k2")) + ) + queryBuilder.aggregation(termAgg) + val aggTriggersAfterKey = mutableMapOf() + for (trigger in triggers) { + aggTriggersAfterKey[trigger.id] = TriggerAfterKey(hashMapOf(Pair("k1", "v1"), Pair("k2", "v2")), false) + } + val prevResult = InputRunResults(emptyList(), null, aggTriggersAfterKey) + AggregationQueryRewriter.rewriteQuery(queryBuilder, prevResult, triggers) + Assert.assertEquals(queryBuilder.aggregations().pipelineAggregatorFactories.size, 10) + queryBuilder.aggregations().aggregatorFactories.forEach { + if (it.name.equals("testPath")) { +// val compAgg = it as CompositeAggregationBuilder + // TODO: This is calling forbidden API and causing build failures, need to find an alternative + // instead of trying to access private member variables +// val afterField = CompositeAggregationBuilder::class.java.getDeclaredField("after") +// afterField.isAccessible = true +// Assert.assertEquals(afterField.get(compAgg), hashMapOf(Pair("k1", "v1"), Pair("k2", "v2"))) + } + } + } + + fun `test RewriteQuery with non aggregation trigger`() { + val triggers: MutableList = mutableListOf() + for (i in 0 until 10) { + triggers.add(randomQueryLevelTrigger()) + } + val queryBuilder = SearchSourceBuilder() + val termAgg: AggregationBuilder = TermsAggregationBuilder("testPath").field("sports") + queryBuilder.aggregation(termAgg) + val prevResult = null + AggregationQueryRewriter.rewriteQuery(queryBuilder, prevResult, triggers) + Assert.assertEquals(queryBuilder.aggregations().pipelineAggregatorFactories.size, 0) + } + + fun `test after keys from search response`() { + val responseContent = """ + { + "took" : 97, + "timed_out" : false, + "_shards" : { + "total" : 3, + "successful" : 3, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 20, + "relation" : "eq" + }, + "max_score" : null, + "hits" : [ ] + }, + "aggregations" : { + "composite#testPath" : { + "after_key" : { + "sport" : "Basketball" + }, + "buckets" : [ + { + "key" : { + "sport" : "Basketball" + }, + "doc_count" : 5 + } + ] + } + } + } + """.trimIndent() + + val aggTriggers: MutableList = mutableListOf(randomBucketLevelTrigger()) + val tradTriggers: MutableList = mutableListOf(randomQueryLevelTrigger()) + + val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, responseContent)) + val afterKeys = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, aggTriggers, null) + Assert.assertEquals(afterKeys[aggTriggers[0].id]?.afterKey, hashMapOf(Pair("sport", "Basketball"))) + + val afterKeys2 = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, tradTriggers, null) + Assert.assertEquals(afterKeys2.size, 0) + } + + fun `test after keys from search responses for multiple bucket paths and different page counts`() { + val firstResponseContent = """ + { + "took" : 0, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 4675, + "relation" : "eq" + }, + "max_score" : null, + "hits" : [ ] + }, + "aggregations" : { + "composite2#smallerResults" : { + "after_key" : { + "category" : "Women's Shoes" + }, + "buckets" : [ + { + "key" : { + "category" : "Women's Shoes" + }, + "doc_count" : 1136 + } + ] + }, + "composite3#largerResults" : { + "after_key" : { + "user" : "abigail" + }, + "buckets" : [ + { + "key" : { + "user" : "abd" + }, + "doc_count" : 188 + }, + { + "key" : { + "user" : "abigail" + }, + "doc_count" : 128 + } + ] + } + } + } + """.trimIndent() + + val secondResponseContent = """ + { + "took" : 0, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 4675, + "relation" : "eq" + }, + "max_score" : null, + "hits" : [ ] + }, + "aggregations" : { + "composite2#smallerResults" : { + "buckets" : [ ] + }, + "composite3#largerResults" : { + "after_key" : { + "user" : "boris" + }, + "buckets" : [ + { + "key" : { + "user" : "betty" + }, + "doc_count" : 148 + }, + { + "key" : { + "user" : "boris" + }, + "doc_count" : 74 + } + ] + } + } + } + """.trimIndent() + + val thirdResponseContent = """ + { + "took" : 0, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 4675, + "relation" : "eq" + }, + "max_score" : null, + "hits" : [ ] + }, + "aggregations" : { + "composite2#smallerResults" : { + "buckets" : [ ] + }, + "composite3#largerResults" : { + "buckets" : [ ] + } + } + } + """.trimIndent() + + val bucketLevelTriggers: MutableList = mutableListOf( + randomBucketLevelTrigger(bucketSelector = randomBucketSelectorExtAggregationBuilder(parentBucketPath = "smallerResults")), + randomBucketLevelTrigger(bucketSelector = randomBucketSelectorExtAggregationBuilder(parentBucketPath = "largerResults")) + ) + + var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, firstResponseContent)) + val afterKeys = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, bucketLevelTriggers, null) + assertEquals(hashMapOf(Pair("category", "Women's Shoes")), afterKeys[bucketLevelTriggers[0].id]?.afterKey) + assertEquals(false, afterKeys[bucketLevelTriggers[0].id]?.lastPage) + assertEquals(hashMapOf(Pair("user", "abigail")), afterKeys[bucketLevelTriggers[1].id]?.afterKey) + assertEquals(false, afterKeys[bucketLevelTriggers[1].id]?.lastPage) + + searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, secondResponseContent)) + val afterKeys2 = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, bucketLevelTriggers, afterKeys) + assertEquals(hashMapOf(Pair("category", "Women's Shoes")), afterKeys2[bucketLevelTriggers[0].id]?.afterKey) + assertEquals(true, afterKeys2[bucketLevelTriggers[0].id]?.lastPage) + assertEquals(hashMapOf(Pair("user", "boris")), afterKeys2[bucketLevelTriggers[1].id]?.afterKey) + assertEquals(false, afterKeys2[bucketLevelTriggers[1].id]?.lastPage) + + searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, thirdResponseContent)) + val afterKeys3 = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, bucketLevelTriggers, afterKeys2) + assertEquals(hashMapOf(Pair("category", "Women's Shoes")), afterKeys3[bucketLevelTriggers[0].id]?.afterKey) + assertEquals(true, afterKeys3[bucketLevelTriggers[0].id]?.lastPage) + assertEquals(hashMapOf(Pair("user", "boris")), afterKeys3[bucketLevelTriggers[1].id]?.afterKey) + assertEquals(true, afterKeys3[bucketLevelTriggers[1].id]?.lastPage) + } + + override fun xContentRegistry(): NamedXContentRegistry { + val entries = ClusterModule.getNamedXWriteables() + entries.add( + NamedXContentRegistry.Entry( + Aggregation::class.java, + ParseField(CompositeAggregationBuilder.NAME), + CheckedFunction { parser: XContentParser? -> + ParsedComposite.fromXContent( + parser, + "testPath" + ) + } + ) + ) + entries.add( + NamedXContentRegistry.Entry( + Aggregation::class.java, + ParseField(CompositeAggregationBuilder.NAME + "2"), + CheckedFunction { parser: XContentParser? -> + ParsedComposite.fromXContent( + parser, + "smallerResults" + ) + } + ) + ) + entries.add( + NamedXContentRegistry.Entry( + Aggregation::class.java, + ParseField(CompositeAggregationBuilder.NAME + "3"), + CheckedFunction { parser: XContentParser? -> + ParsedComposite.fromXContent( + parser, + "largerResults" + ) + } + ) + ) + return NamedXContentRegistry(entries) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt b/alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt new file mode 100644 index 000000000..3555a5c38 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt @@ -0,0 +1,162 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.opensearch.alerting.ANOMALY_RESULT_INDEX +import org.opensearch.alerting.randomQueryLevelMonitor +import org.opensearch.commons.alerting.model.Input +import org.opensearch.commons.alerting.model.SearchInput +import org.opensearch.commons.authuser.User +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.test.OpenSearchTestCase + +class AnomalyDetectionUtilsTests : OpenSearchTestCase() { + + fun `test is ad monitor`() { + val monitor = randomQueryLevelMonitor( + inputs = listOf( + SearchInput( + listOf(ANOMALY_RESULT_INDEX), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + assertTrue(isADMonitor(monitor)) + } + + fun `test not ad monitor if monitor have no inputs`() { + val monitor = randomQueryLevelMonitor( + inputs = listOf() + ) + assertFalse(isADMonitor(monitor)) + } + + fun `test not ad monitor if monitor input is not search input`() { + val monitor = randomQueryLevelMonitor( + inputs = listOf(object : Input { + override fun name(): String { + TODO("Not yet implemented") + } + + override fun writeTo(out: StreamOutput?) { + TODO("Not yet implemented") + } + + override fun toXContent(builder: XContentBuilder?, params: ToXContent.Params?): XContentBuilder { + TODO("Not yet implemented") + } + }) + ) + assertFalse(isADMonitor(monitor)) + } + + fun `test not ad monitor if monitor input has more than 1 indices`() { + val monitor = randomQueryLevelMonitor( + inputs = listOf( + SearchInput( + listOf(randomAlphaOfLength(5), randomAlphaOfLength(5)), + SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) + ) + ) + ) + assertFalse(isADMonitor(monitor)) + } + + fun `test not ad monitor if monitor input's index name is not AD result index`() { + val monitor = randomQueryLevelMonitor( + inputs = listOf(SearchInput(listOf(randomAlphaOfLength(5)), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))) + ) + assertFalse(isADMonitor(monitor)) + } + + fun `test add user role filter with null user`() { + val searchSourceBuilder = SearchSourceBuilder() + addUserBackendRolesFilter(null, searchSourceBuilder) + assertEquals( + "{\"query\":{\"bool\":{\"must_not\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + + "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true," + + "\"boost\":1.0}}}", + searchSourceBuilder.toString() + ) + } + + fun `test add user role filter with user with empty name`() { + val searchSourceBuilder = SearchSourceBuilder() + addUserBackendRolesFilter(User("", mutableListOf(), mutableListOf(), mutableListOf()), searchSourceBuilder) + assertEquals( + "{\"query\":{\"bool\":{\"must_not\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + + "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true," + + "\"boost\":1.0}}}", + searchSourceBuilder.toString() + ) + } + + fun `test add user role filter with null user backend role`() { + val searchSourceBuilder = SearchSourceBuilder() + addUserBackendRolesFilter( + User( + randomAlphaOfLength(5), + null, + listOf(randomAlphaOfLength(5)), + listOf(randomAlphaOfLength(5)) + ), + searchSourceBuilder + ) + assertEquals( + "{\"query\":{\"bool\":{\"must\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + + "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"must_not\":[{\"nested\":" + + "{\"query\":{\"exists\":{\"field\":\"user.backend_roles.keyword\",\"boost\":1.0}},\"path\":\"user\",\"ignore_unmapped\"" + + ":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}}}", + searchSourceBuilder.toString() + ) + } + + fun `test add user role filter with empty user backend role`() { + val searchSourceBuilder = SearchSourceBuilder() + addUserBackendRolesFilter( + User( + randomAlphaOfLength(5), + listOf(), + listOf(randomAlphaOfLength(5)), + listOf(randomAlphaOfLength(5)) + ), + searchSourceBuilder + ) + assertEquals( + "{\"query\":{\"bool\":{\"must\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + + "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"must_not\":[{\"nested\":" + + "{\"query\":{\"exists\":{\"field\":\"user.backend_roles.keyword\",\"boost\":1.0}},\"path\":\"user\",\"ignore_unmapped\"" + + ":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}}}", + searchSourceBuilder.toString() + ) + } + + fun `test add user role filter with normal user backend role`() { + val searchSourceBuilder = SearchSourceBuilder() + val backendRole1 = randomAlphaOfLength(5) + val backendRole2 = randomAlphaOfLength(5) + addUserBackendRolesFilter( + User( + randomAlphaOfLength(5), + listOf(backendRole1, backendRole2), + listOf(randomAlphaOfLength(5)), + listOf(randomAlphaOfLength(5)) + ), + searchSourceBuilder + ) + assertEquals( + "{\"query\":{\"bool\":{\"must\":[{\"nested\":{\"query\":{\"terms\":{\"user.backend_roles.keyword\":" + + "[\"$backendRole1\",\"$backendRole2\"]," + + "\"boost\":1.0}},\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}]," + + "\"adjust_pure_negative\":true,\"boost\":1.0}}}", + searchSourceBuilder.toString() + ) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt b/alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt new file mode 100644 index 000000000..e4db20639 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt @@ -0,0 +1,91 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util + +import org.opensearch.alerting.parser +import org.opensearch.cluster.metadata.IndexMetadata +import org.opensearch.test.OpenSearchTestCase +import java.lang.NumberFormatException +import kotlin.test.assertFailsWith + +class IndexUtilsTests : OpenSearchTestCase() { + + fun `test get schema version`() { + val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"schema_version\": 1}}" + + val schemaVersion = IndexUtils.getSchemaVersion(message) + assertEquals(1, schemaVersion) + } + + fun `test get schema version without _meta`() { + val message = "{\"user\":{ \"name\":\"test\"}}" + + val schemaVersion = IndexUtils.getSchemaVersion(message) + assertEquals(0, schemaVersion) + } + + fun `test get schema version without schema_version`() { + val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"test\": 1}}" + + val schemaVersion = IndexUtils.getSchemaVersion(message) + assertEquals(0, schemaVersion) + } + + fun `test get schema version with negative schema_version`() { + val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"schema_version\": -1}}" + + assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { + IndexUtils.getSchemaVersion(message) + } + } + + fun `test get schema version with wrong schema_version`() { + val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"schema_version\": \"wrong\"}}" + + assertFailsWith(NumberFormatException::class, "Expected NumberFormatException") { + IndexUtils.getSchemaVersion(message) + } + } + + fun `test should update index without original version`() { + val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + + "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + + "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":123," + + "\"settings_version\":123,\"mappings\":{\"_doc\":{\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}" + val newMapping = "{\"_meta\":{\"schema_version\":10},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" + val index: IndexMetadata = IndexMetadata.fromXContent(parser(indexContent)) + + val shouldUpdateIndex = IndexUtils.shouldUpdateIndex(index, newMapping) + assertTrue(shouldUpdateIndex) + } + + fun `test should update index with lagged version`() { + val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + + "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + + "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":123," + + "\"settings_version\":123,\"mappings\":{\"_doc\":{\"_meta\":{\"schema_version\":1},\"properties\":" + + "{\"name\":{\"type\":\"keyword\"}}}}}}" + val newMapping = "{\"_meta\":{\"schema_version\":10},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" + val index: IndexMetadata = IndexMetadata.fromXContent(parser(indexContent)) + + val shouldUpdateIndex = IndexUtils.shouldUpdateIndex(index, newMapping) + assertTrue(shouldUpdateIndex) + } + + fun `test should update index with same version`() { + val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + + "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + + "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":\"1\"," + + "\"settings_version\":\"1\",\"aliases_version\":\"1\",\"mappings\":" + + "{\"_doc\":{\"_meta\":{\"schema_version\":1},\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}" + val newMapping = "{\"_meta\":{\"schema_version\":1},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" + val xContentParser = parser(indexContent) + val index: IndexMetadata = IndexMetadata.fromXContent(xContentParser) + + val shouldUpdateIndex = IndexUtils.shouldUpdateIndex(index, newMapping) + assertFalse(shouldUpdateIndex) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt new file mode 100644 index 000000000..9712b4213 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt @@ -0,0 +1,173 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.randomClusterMetricsInput +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatIndicesResponseWrapper.Companion.WRAPPER_FIELD +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.core.common.Strings +import org.opensearch.test.OpenSearchSingleNodeTestCase + +class CatIndicesWrappersIT : OpenSearchSingleNodeTestCase() { + private val path = ClusterMetricsInput.ClusterMetricType.CAT_INDICES.defaultPath + + fun `test CatIndicesRequestWrapper validate valid pathParams`() { + // GIVEN + val pathParams = "index1,index-name-2,index-3" + + // WHEN + val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(3, requestWrapper.clusterHealthRequest.indices().size) + assertEquals(3, requestWrapper.clusterStateRequest.indices().size) + assertEquals(3, requestWrapper.indexSettingsRequest.indices().size) + assertEquals(3, requestWrapper.indicesStatsRequest.indices().size) + } + + fun `test CatIndicesRequestWrapper validate without providing pathParams`() { + // GIVEN & WHEN + val requestWrapper = CatIndicesRequestWrapper() + + // THEN + assertNull(requestWrapper.clusterHealthRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatIndicesRequestWrapper validate blank pathParams`() { + // GIVEN + val pathParams = " " + + // WHEN + val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) + + // THEN + assertNull(requestWrapper.clusterHealthRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatIndicesRequestWrapper validate empty pathParams`() { + // GIVEN + val pathParams = "" + + // WHEN + val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) + + // THEN + assertNull(requestWrapper.clusterHealthRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatIndicesRequestWrapper validate invalid pathParams`() { + // GIVEN + val pathParams = "_index1,index^2" + + // WHEN & THEN + assertThrows(IllegalArgumentException::class.java) { CatIndicesRequestWrapper(pathParams = pathParams) } + } + + suspend fun `test CatIndicesResponseWrapper returns with only indices in pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + /* + Creating a subset of indices to use for the pathParams to test that all indices on the cluster ARE NOT returned. + */ + val pathParamsIndices = testIndices.keys.toList().subList(1, testIndices.size - 1) + val pathParams = pathParamsIndices.joinToString(",") + val input = randomClusterMetricsInput(path = path, pathParams = pathParams) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(pathParamsIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + if (pathParamsIndices.contains(indexName)) { + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.DOCS_COUNT_FIELD) as String + ) + } + } + } + + suspend fun `test CatIndicesResponseWrapper returns with all indices when empty pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + val input = randomClusterMetricsInput(path = path) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(testIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.DOCS_COUNT_FIELD) as String + ) + } + } + + private fun indexDoc(index: String, id: String, doc: String) { + client().prepareIndex(index).setId(id) + .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt new file mode 100644 index 000000000..c8b5db561 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt @@ -0,0 +1,165 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.action.support.WriteRequest +import org.opensearch.alerting.randomClusterMetricsInput +import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatShardsResponseWrapper.Companion.WRAPPER_FIELD +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ClusterMetricsInput +import org.opensearch.core.common.Strings +import org.opensearch.test.OpenSearchSingleNodeTestCase + +class CatShardsWrappersIT : OpenSearchSingleNodeTestCase() { + private val path = ClusterMetricsInput.ClusterMetricType.CAT_SHARDS.defaultPath + + fun `test CatShardsRequestWrapper validate valid pathParams`() { + // GIVEN + val pathParams = "index1,index_2,index-3" + + // WHEN + val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(3, requestWrapper.clusterStateRequest.indices().size) + assertEquals(3, requestWrapper.indicesStatsRequest.indices().size) + } + + fun `test CatShardsRequestWrapper validate without providing pathParams`() { + // GIVEN & WHEN + val requestWrapper = CatShardsRequestWrapper() + + // THEN + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatShardsRequestWrapper validate blank pathParams`() { + // GIVEN + val pathParams = " " + + // WHEN + val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatShardsRequestWrapper validate empty pathParams`() { + // GIVEN + val pathParams = "" + + // WHEN + val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) + + // THEN + assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) + assertNull(requestWrapper.indicesStatsRequest.indices()) + } + + fun `test CatShardsRequestWrapper validate invalid pathParams`() { + // GIVEN + val pathParams = "_index1,index^2" + + // WHEN & THEN + assertThrows(IllegalArgumentException::class.java) { CatShardsRequestWrapper(pathParams = pathParams) } + } + + suspend fun `test CatShardsResponseWrapper returns with only indices in pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + /* + Creating a subset of indices to use for the pathParams to test that all indices on the cluster ARE NOT returned. + */ + val pathParamsIndices = testIndices.keys.toList().subList(1, testIndices.size - 1) + val pathParams = pathParamsIndices.joinToString(",") + val input = randomClusterMetricsInput(path = path, pathParams = pathParams) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatShardsResponseWrapper.ShardInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(pathParamsIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + if (pathParamsIndices.contains(indexName)) { + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.DOCS_FIELD) as String + ) + } + } + } + + suspend fun `test CatShardsResponseWrapper returns with all indices when empty pathParams`() { + // GIVEN + val testIndices = (1..5).map { + "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) + }.toMap() + + testIndices.forEach { (indexName, docCount) -> + repeat(docCount) { + val docId = (it + 1).toString() + val docMessage = """ + { + "message": "$indexName doc num $docId" + } + """.trimIndent() + indexDoc(indexName, docId, docMessage) + } + } + + val input = randomClusterMetricsInput(path = path) + + // WHEN + val responseMap = (executeTransportAction(input, client())).toMap() + + // THEN + val shards = responseMap[WRAPPER_FIELD] as List> + val returnedIndices = + shards.map { (it[CatShardsResponseWrapper.ShardInfo.INDEX_FIELD] as String) to it }.toMap() + + assertEquals(testIndices.size, returnedIndices.keys.size) + testIndices.forEach { (indexName, docCount) -> + assertEquals( + indexName, + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.INDEX_FIELD) as String + ) + assertEquals( + docCount.toString(), + returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.DOCS_FIELD) as String + ) + } + } + + private fun indexDoc(index: String, id: String, doc: String) { + client().prepareIndex(index).setId(id) + .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt new file mode 100644 index 000000000..bfe5b8dce --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt @@ -0,0 +1,122 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.clusterMetricsMonitorHelpers + +import org.opensearch.test.OpenSearchTestCase + +class SupportedClusterMetricsSettingsExtensionsTests : OpenSearchTestCase() { + private var expectedResponse = hashMapOf() + private var mappedResponse = hashMapOf() + private var supportedJsonPayload = hashMapOf>() + + fun `test redactFieldsFromResponse with non-empty supportedJsonPayload`() { + // GIVEN + mappedResponse = hashMapOf( + ( + "pathRoot1" to hashMapOf( + ("pathRoot1_subPath1" to 11), + ( + "pathRoot1_subPath2" to hashMapOf( + ("pathRoot1_subPath2_subPath1" to 121), + ( + "pathRoot1_subPath2_subPath2" to hashMapOf( + ("pathRoot1_subPath2_subPath2_subPath1" to 1221) + ) + ) + ) + ) + ) + ), + ("pathRoot2" to hashMapOf(("pathRoot2_subPath1" to 21), ("pathRoot2_subPath2" to setOf(221, 222, "223string")))), + ("pathRoot3" to hashMapOf(("pathRoot3_subPath1" to 31), ("pathRoot3_subPath2" to setOf(321, 322, "323string")))) + ) + + supportedJsonPayload = hashMapOf( + ( + "pathRoot1" to arrayListOf( + "pathRoot1_subPath1", + "pathRoot1_subPath2.pathRoot1_subPath2_subPath2.pathRoot1_subPath2_subPath2_subPath1" + ) + ), + ("pathRoot2" to arrayListOf("pathRoot2_subPath2")), + ("pathRoot3" to arrayListOf()) + ) + + expectedResponse = hashMapOf( + ( + "pathRoot1" to hashMapOf( + ("pathRoot1_subPath1" to 11), + ( + "pathRoot1_subPath2" to hashMapOf( + ( + "pathRoot1_subPath2_subPath2" to hashMapOf( + ("pathRoot1_subPath2_subPath2_subPath1" to 1221) + ) + ) + ) + ) + ) + ), + ("pathRoot2" to hashMapOf(("pathRoot2_subPath2" to setOf(221, 222, "223string")))), + ("pathRoot3" to hashMapOf(("pathRoot3_subPath1" to 31), ("pathRoot3_subPath2" to setOf(321, 322, "323string")))) + ) + + // WHEN + val result = redactFieldsFromResponse(mappedResponse, supportedJsonPayload) + + // THEN + assertEquals(expectedResponse, result) + } + + fun `test redactFieldsFromResponse with empty supportedJsonPayload`() { + // GIVEN + mappedResponse = hashMapOf( + ( + "pathRoot1" to hashMapOf( + ("pathRoot1_subPath1" to 11), + ( + "pathRoot1_subPath2" to hashMapOf( + ("pathRoot1_subPath2_subPath1" to 121), + ( + "pathRoot1_subPath2_subPath2" to hashMapOf( + ("pathRoot1_subPath2_subPath2_subPath1" to 1221) + ) + ) + ) + ) + ) + ), + ("pathRoot2" to hashMapOf(("pathRoot2_subPath1" to 21), ("pathRoot2_subPath2" to setOf(221, 222, "223string")))), + ("pathRoot3" to 3) + ) + + expectedResponse = hashMapOf( + ( + "pathRoot1" to hashMapOf( + ("pathRoot1_subPath1" to 11), + ( + "pathRoot1_subPath2" to hashMapOf( + ("pathRoot1_subPath2_subPath1" to 121), + ( + "pathRoot1_subPath2_subPath2" to hashMapOf( + ("pathRoot1_subPath2_subPath2_subPath1" to 1221) + ) + ) + ) + ) + ) + ), + ("pathRoot2" to hashMapOf(("pathRoot2_subPath1" to 21), ("pathRoot2_subPath2" to setOf(221, 222, "223string")))), + ("pathRoot3" to 3) + ) + + // WHEN + val result = redactFieldsFromResponse(mappedResponse, supportedJsonPayload) + + // THEN + assertEquals(expectedResponse, result) + } +} diff --git a/alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt b/alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt new file mode 100644 index 000000000..f9c40e465 --- /dev/null +++ b/alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt @@ -0,0 +1,104 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.util.destinationmigration + +import org.opensearch.alerting.AlertingRestTestCase +import org.opensearch.alerting.makeRequest +import org.opensearch.alerting.model.destination.Destination +import org.opensearch.alerting.model.destination.email.Email +import org.opensearch.alerting.model.destination.email.EmailAccount +import org.opensearch.alerting.model.destination.email.EmailEntry +import org.opensearch.alerting.model.destination.email.EmailGroup +import org.opensearch.alerting.model.destination.email.Recipient +import org.opensearch.alerting.randomUser +import org.opensearch.alerting.util.DestinationType +import org.opensearch.client.ResponseException +import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX +import org.opensearch.core.rest.RestStatus +import java.time.Instant +import java.util.UUID + +class DestinationMigrationUtilServiceIT : AlertingRestTestCase() { + + fun `test migrateData`() { + if (isNotificationPluginInstalled()) { + // Create alerting config index + createRandomMonitor() + + val emailAccount = EmailAccount( + name = "test", + email = "test@email.com", + host = "smtp.com", + port = 25, + method = EmailAccount.MethodType.NONE, + username = null, + password = null + ) + val emailAccountDoc = "{\"email_account\" : ${emailAccount.toJsonString()}}" + val emailGroup = EmailGroup( + name = "test", + emails = listOf(EmailEntry("test@email.com")) + ) + val emailGroupDoc = "{\"email_group\" : ${emailGroup.toJsonString()}}" + val emailAccountId = UUID.randomUUID().toString() + val emailGroupId = UUID.randomUUID().toString() + indexDocWithAdminClient(SCHEDULED_JOBS_INDEX, emailAccountId, emailAccountDoc) + indexDocWithAdminClient(SCHEDULED_JOBS_INDEX, emailGroupId, emailGroupDoc) + + val recipient = Recipient(Recipient.RecipientType.EMAIL, null, "test@email.com") + val email = Email(emailAccountId, listOf(recipient)) + val emailDest = Destination( + id = UUID.randomUUID().toString(), + type = DestinationType.EMAIL, + name = "test", + user = randomUser(), + lastUpdateTime = Instant.now(), + chime = null, + slack = null, + customWebhook = null, + email = email + ) + val slackDestination = getSlackDestination().copy(id = UUID.randomUUID().toString()) + val chimeDestination = getChimeDestination().copy(id = UUID.randomUUID().toString()) + val customWebhookDestination = getCustomWebhookDestination().copy(id = UUID.randomUUID().toString()) + + val destinations = listOf(emailDest, slackDestination, chimeDestination, customWebhookDestination) + + val ids = mutableListOf(emailAccountId, emailGroupId) + for (destination in destinations) { + val dest = """ + { + "destination" : ${destination.toJsonString()} + } + """.trimIndent() + indexDocWithAdminClient(SCHEDULED_JOBS_INDEX, destination.id, dest) + ids.add(destination.id) + } + + // Create cluster change event and wait for migration service to complete migrating data over + client().updateSettings("indices.recovery.max_bytes_per_sec", "40mb") + Thread.sleep(120000) + + for (id in ids) { + val response = client().makeRequest( + "GET", + "_plugins/_notifications/configs/$id" + ) + assertEquals(RestStatus.OK, response.restStatus()) + + try { + client().makeRequest( + "GET", + ".opendistro-alerting-config/_doc/$id" + ) + fail("Expecting ResponseException") + } catch (e: ResponseException) { + assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) + } + } + } + } +} diff --git a/alerting/bin/test/plugin-security.policy b/alerting/bin/test/plugin-security.policy new file mode 100644 index 000000000..bcee5e9e6 --- /dev/null +++ b/alerting/bin/test/plugin-security.policy @@ -0,0 +1,8 @@ +grant { + // needed to find the classloader to load whitelisted classes. + permission java.lang.RuntimePermission "createClassLoader"; + permission java.lang.RuntimePermission "getClassLoader"; + + permission java.net.SocketPermission "*", "connect,resolve"; + permission java.net.NetPermission "getProxySelector"; +}; diff --git a/alerting/bin/test/root-ca.pem b/alerting/bin/test/root-ca.pem new file mode 100644 index 000000000..4015d866e --- /dev/null +++ b/alerting/bin/test/root-ca.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIBATANBgkqhkiG9w0BAQsFADCBjzETMBEGCgmSJomT8ixk +ARkWA2NvbTEXMBUGCgmSJomT8ixkARkWB2V4YW1wbGUxGTAXBgNVBAoMEEV4YW1w +bGUgQ29tIEluYy4xITAfBgNVBAsMGEV4YW1wbGUgQ29tIEluYy4gUm9vdCBDQTEh +MB8GA1UEAwwYRXhhbXBsZSBDb20gSW5jLiBSb290IENBMB4XDTE4MDQyMjAzNDM0 +NloXDTI4MDQxOTAzNDM0NlowgY8xEzARBgoJkiaJk/IsZAEZFgNjb20xFzAVBgoJ +kiaJk/IsZAEZFgdleGFtcGxlMRkwFwYDVQQKDBBFeGFtcGxlIENvbSBJbmMuMSEw +HwYDVQQLDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0ExITAfBgNVBAMMGEV4YW1w +bGUgQ29tIEluYy4gUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAK/u+GARP5innhpXK0c0q7s1Su1VTEaIgmZr8VWI6S8amf5cU3ktV7WT9SuV +TsAm2i2A5P+Ctw7iZkfnHWlsC3HhPUcd6mvzGZ4moxnamM7r+a9otRp3owYoGStX +ylVTQusAjbq9do8CMV4hcBTepCd+0w0v4h6UlXU8xjhj1xeUIz4DKbRgf36q0rv4 +VIX46X72rMJSETKOSxuwLkov1ZOVbfSlPaygXIxqsHVlj1iMkYRbQmaTib6XWHKf +MibDaqDejOhukkCjzpptGZOPFQ8002UtTTNv1TiaKxkjMQJNwz6jfZ53ws3fh1I0 +RWT6WfM4oeFRFnyFRmc4uYTUgAkCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAf +BgNVHSMEGDAWgBSSNQzgDx4rRfZNOfN7X6LmEpdAczAdBgNVHQ4EFgQUkjUM4A8e +K0X2TTnze1+i5hKXQHMwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4IB +AQBoQHvwsR34hGO2m8qVR9nQ5Klo5HYPyd6ySKNcT36OZ4AQfaCGsk+SecTi35QF +RHL3g2qffED4tKR0RBNGQSgiLavmHGCh3YpDupKq2xhhEeS9oBmQzxanFwWFod4T +nnsG2cCejyR9WXoRzHisw0KJWeuNlwjUdJY0xnn16srm1zL/M/f0PvCyh9HU1mF1 +ivnOSqbDD2Z7JSGyckgKad1Omsg/rr5XYtCeyJeXUPcmpeX6erWJJNTUh6yWC/hY +G/dFC4xrJhfXwz6Z0ytUygJO32bJG4Np2iGAwvvgI9EfxzEv/KP+FGrJOvQJAq4/ +BU36ZAa80W/8TBnqZTkNnqZV +-----END CERTIFICATE----- diff --git a/alerting/bin/test/sample.pem b/alerting/bin/test/sample.pem new file mode 100644 index 000000000..a1fc20a77 --- /dev/null +++ b/alerting/bin/test/sample.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEPDCCAySgAwIBAgIUZjrlDPP8azRDPZchA/XEsx0X2iIwDQYJKoZIhvcNAQEL +BQAwgY8xEzARBgoJkiaJk/IsZAEZFgNjb20xFzAVBgoJkiaJk/IsZAEZFgdleGFt +cGxlMRkwFwYDVQQKDBBFeGFtcGxlIENvbSBJbmMuMSEwHwYDVQQLDBhFeGFtcGxl +IENvbSBJbmMuIFJvb3QgQ0ExITAfBgNVBAMMGEV4YW1wbGUgQ29tIEluYy4gUm9v +dCBDQTAeFw0yMzA4MjkwNDIzMTJaFw0zMzA4MjYwNDIzMTJaMFcxCzAJBgNVBAYT +AmRlMQ0wCwYDVQQHDAR0ZXN0MQ0wCwYDVQQKDARub2RlMQ0wCwYDVQQLDARub2Rl +MRswGQYDVQQDDBJub2RlLTAuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCm93kXteDQHMAvbUPNPW5pyRHKDD42XGWSgq0k1D29C/Ud +yL21HLzTJa49ZU2ldIkSKs9JqbkHdyK0o8MO6L8dotLoYbxDWbJFW8bp1w6tDTU0 +HGkn47XVu3EwbfrTENg3jFu+Oem6a/501SzITzJWtS0cn2dIFOBimTVpT/4Zv5qr +XA6Cp4biOmoTYWhi/qQl8d0IaADiqoZ1MvZbZ6x76qTrRAbg+UWkpTEXoH1xTc8n +dibR7+HP6OTqCKvo1NhE8uP4pY+fWd6b6l+KLo3IKpfTbAIJXIO+M67FLtWKtttD +ao94B069skzKk6FPgW/OZh6PRCD0oxOavV+ld2SjAgMBAAGjgcYwgcMwRwYDVR0R +BEAwPogFKgMEBQWCEm5vZGUtMC5leGFtcGxlLmNvbYIJbG9jYWxob3N0hxAAAAAA +AAAAAAAAAAAAAAABhwR/AAABMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEF +BQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU0/qDQaY10jIo +wCjLUpz/HfQXyt8wHwYDVR0jBBgwFoAUF4ffoFrrZhKn1dD4uhJFPLcrAJwwDQYJ +KoZIhvcNAQELBQADggEBAD2hkndVih6TWxoe/oOW0i2Bq7ScNO/n7/yHWL04HJmR +MaHv/Xjc8zLFLgHuHaRvC02ikWIJyQf5xJt0Oqu2GVbqXH9PBGKuEP2kCsRRyU27 +zTclAzfQhqmKBTYQ/3lJ3GhRQvXIdYTe+t4aq78TCawp1nSN+vdH/1geG6QjMn5N +1FU8tovDd4x8Ib/0dv8RJx+n9gytI8n/giIaDCEbfLLpe4EkV5e5UNpOnRgJjjuy +vtZutc81TQnzBtkS9XuulovDE0qI+jQrKkKu8xgGLhgH0zxnPkKtUg2I3Aq6zl1L +zYkEOUF8Y25J6WeY88Yfnc0iigI+Pnz5NK8R9GL7TYo= +-----END CERTIFICATE----- diff --git a/alerting/bin/test/test-kirk.jks b/alerting/bin/test/test-kirk.jks new file mode 100644 index 0000000000000000000000000000000000000000..6dbc51e714784fa58a4209c75deab8b9ed1698ff GIT binary patch literal 4504 zcma)AXEYp+vt7GZ$?DyT=tPUf>Rt32Rtcg+B4PQKLo)5nT`xBt(f8 zz4zYx{`1az=l47B(|aH0%$a-V&c}OZ28N+d1QLK?7-~f#Qh{)-@KbUEVuBnDwFn`G zTJSH-2g86X{uc$#Cd7a<{=zALBY_C=KPs|Y1i%~&Sotp~4}12H0!$9GfJy&blEDNC z=>%hA9@l)1y-8vD6#cH^U}=KBI0FdeqXH7J!^nt8{(B;j6byi|5|P@4YY{kr2nhrT zsl1TD93_M516EPM#9d4EG(rsFKtBW4^r*(5KwKbTLB){+^0E(}Q+A7HoW0lrA)@i+ zydGtY^95cAh7C?*2qIcESObb&7%#|($|(-eXIiQ#0>bYpj@=?*4?U=5@-ISTdSa4x zOtEjIWb0hr)D^1HVpX7-CjwnsDG8#WM@AVZvyufeW?}`^GtGW7WcGsVl)G*$?lP3S z^GYelg04B!ZBp4GnwCzq@uOLfB4xY#hE;StB61*Yd8?%(Nl9NW{s3+HODy#ik72s%Hj($a8 zhF0>hs}=106=eHlR<&9zT@LuHAUIZWLFWrKQ#$R3^=pv*&-7e6{O_Ji`|s`^^4v@-Hr>`?(V#!ktZ-$-0?Jt1G-G? zE9HvN@-0iPpKSDRsLacPB>#JY4d$KM!zs7xPBvUu4HQ}!Bz$qc)A`=Ver4EBC?!g7b zuW7GvE*puJA=;!bv2_S?8ZQx_n`M?F&kkb{-h zKwO=OA_@auvAUmAsQW~NjYK|}m{>`{*n^45MJ^ph*%K9}8GnxA%-;D^^-}ih8oWP* zXJ#vzJY3e4?&oSey+_=qv19lq zeLI>%Gjx=y!qVzf%Y&c7dgkjEw?^rl8^KxGs^%{Fd_(b51&l(wYCO&Rc~ZUl5^~y> zc}BJ!4+n2KaS|<{vd#M44my1W|M0Y-gfk9<&l%IBje@31-Sr1Mt!fvT(Pe+Gt$Bz? z_up@HJf$b!)YfI|4{%l^JDxgWvp75|nMzg7E)(qZ%=alvt zXMfZg7Z=_eanGP?tBXFKyvFRu$?uMAzg|k-(32orZccxnHGr$(gM%4Hgc&3blJCi; z6j@^Y3XVg*doBz7pms~Jn7 z9>1&oI7bPBOnn7vyV1x>YahPMDy_bySw!71ij);ebzBEUSZK&o1y43I-AuJKXJ~C3 z{ScF0neCZB8?5r>Px#3V%} zq$OY&i2FZH#6&q5i2Yy421o$-o6P@Z2>vgd4p$sB)+@I7CAQvk>m=OVG#EC`^#8Hx zXo}&oS5+Eg(sw4>QN4_Cy_0U!W9o!pxS@}|4s+L{ow)59*P>fYuDV~JqCwTL5s{)3(v zzbM`$E?)E;`zu*Kjpah> zgQl1ucOJOd1|%MDBk_Lsu64*-#r>9orWT19xT!DnCoNv_AnWczl?5a3@Sd4mtPrx@ z;QPqXK#%ve%3=_Sa$)(zJ)mvCYW0$Uim6bQ!S}#H@uPFY+qvmT_x`cr%&q*~6sufG zKKVZ8ebd?WhVYT)or=?jzV*~PLH&t?CH^KO=IX%=oHNr75%vVz=nN9ipHOrX*7{h! zNkaI3@a@JfTINcbD<@;DNwqa&=S5v4pM=tBEMN8HU3}euq?(dEFWfNC>H+2C+1dBA zFs|s&27315cK^vG`LRKX~{Ugw!|2K~TP_VAqXtzNY6)j={rQ zv73v$!psb1ph9o6`kKlGjC8GEdFX9+@{I}q{33}%?v>$a-cw6HGOOLVnv3ITN_D~k zo^QL%)6K#_{j)b&>8Qy@Eweq=Ne8rKsjJTe)mfDw?scqlc&US2dxU0@o5$(Zu(GB4 zujr5^yZdwlP>E{wrkq=NiW~PQZm5`fJz5m&9I}B^zPVNSSa9vWcXu^m%+bU|aOg5q zK%|a72J^vxGy)&3GlNod=Wt|FBG=mgP)o%{(2PCL$9s$dMvIcv^FdM?hbNYQrX%I| z{binoW_?J27M3L2H_Y4n0!3PGL#b*UxRbpd3l$RLC#I})-32((m#4}vP%kHB3Q7PGLpvuro4~7i2u6z$3ar+YSP2?_%+^%f* zR}5Rl@nUnDVdT&uE_ZP%NU-(Zn*^k2*4S;xubW_f3f-cK+=>uy-sK;&F{mRdpgwIgSHfJSw=22paH-mu>R=3Kf9cR*A_Sjg7q#MM< zqobyHu#q_oM3;REOf&nTGa=n6MK4QZ{pey;iGwX&bnAUCVq`=c0{gykLm{VZo%ulF z*n_LEk%}KbmVW1)L+Ab3sSZPR+Fe*5p$^HC|Oyb{_is> zsuD42;l;BT-a#X6fP(~C+`TP&(``5KD7dp9)GD&EVfNN4Bf@5N63j4c_IOZZ`^gF1 zphj9>;b1JVOWrk`HhO{mmk*Lp>wXpL*r|VQth!^2ajO2-Q$=;E0ZcMzj9V;D}3k7ej?g$MEOSvfr*p<&b z6B?7p3F^a78y9pEd$#q2Pm1b zU#?c^Op~TXSZ`3z2a{A=UzcS`zB%Z|XG2xth@1`h=wY$wyp|u2)s&QN#af+k>`vF! z&{oB;K{Wblwtcc`JH%E!TwV2q%vd}p>iZ9d@C(kwR>Dm)p? zV-i0tv8PP66)jD1#I*Qm*`@U`^o)}|58+bGD1y(EEM_dJh-O9xP^xdF-_Z#qZ&m{c zbC6W;iNU!24Cvnj14>>_V8a{IB$GXu&z39rEKNX_07*3xp*W3rJo!}pp2M0Hwe$#* zi#HgV_>>SSD;YT=uK8*Lu|$a+IIXPF$${!eaPU%X#jh@y96VcWEFGqB#<_hE8QPmQ zO_C$p_nXzGgQtqVrC1t-5`*juoj0Q%VLnw`@Yt&eCg!x)84Pq&N%`@t**O@LYz3OR(@+})Hu&$>gJ;6oxdO{ z&KR3!hDx52>YBb*JE@4B`8}j*yOg=37>&zbSN}#T@GA6n9+dFcA*9q_l2eI%Xh*7~ ziU87?k{%5!@e5oasj8xTY|ysPyOMR3W;w?vvG}prD%~$8wf$j!6&K4LI%aD1$6B&8 zG|Bq_{em<75I~pVeMNJ6Dv9e{<=x@Es?2r|L;d(lJhNv+5~$`ps7`1lAq>B{Ot5Ga z6qD6CeNHKADuYBeC(!$C>E5yJ7O5IFfdN*2lPV*LTj(fX$`T*h6!l7_BFQ%HhbJFp zKUVk@Dl`5ZH)LoQ^{7N6?HyY_;Jo?*Uu#dn_XW`49o!xdK!+JJN_3KD7k@2J((0h0 z?0!++a*3VkR_Y8-s+o<1M(>PCz=|sJMqa z0+r0sNH_$gvD_@AC}TCb8}m~2v}_leWOtWdheZwxJl0i{OGIRcO0iVJ-B>5CgP^O-M7OYVJ*8(0|euX~UGp`sq@@gaEw*bHD4*Dj8_ zPO4*=dce-k-f;9Xl`P>A2U6SzIPhFWQT>2(PjqTMlBf}zL3<&dS*!E0mM}&jbXhc- zAb9}5!V(`=H1zl4fM|8TdAE{XwAuTJ>dTw3o}wzSb&xhxCijhe4Q#{|l(FXGy+A)j zH>IZrWy4|#?wJ-1?zBm;cKLHK*H5ngXeiJE?k?6Lz1i+02rcMG7kNDQlDJ_??0D#; z(Bju>vbV@>IGl97vC?TD(|fa!E?NjDA;*m&#_ZiX>Vgi+wr`atYOngkRp_w%?M~sv zUVImV4>dX4Ih+MO4LU`Ui=K%20a~JOwq1$6)KUw@81y#uUGKMV4>O0ioDGDvtZ{Jl zmay)x!zLD>Hl1jqnzX9b_da}w9xr9S`kQwUZPAei4I5Ao#$N}f9I10=!}MXIF!F!C z6+i+ofRKI2Rvlk8erCmgYu2%A6S_nSX7!cGJQ6pQ{xw*Iw(KXQGft90Ft(YQ<7nw! ROz*Khv5A{`^It3We*oUlR=)rM literal 0 HcmV?d00001 diff --git a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt index bdaa7e1cc..7359d60ea 100644 --- a/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt +++ b/alerting/src/main/kotlin/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt @@ -26,6 +26,7 @@ import org.opensearch.commons.alerting.model.Workflow import org.opensearch.commons.authuser.User import org.opensearch.commons.utils.recreateObject import org.opensearch.core.action.ActionListener +import org.opensearch.core.common.io.stream.NamedWriteableRegistry import org.opensearch.index.query.BoolQueryBuilder import org.opensearch.index.query.ExistsQueryBuilder import org.opensearch.index.query.MatchQueryBuilder @@ -40,8 +41,8 @@ class TransportSearchMonitorAction @Inject constructor( val settings: Settings, val client: Client, clusterService: ClusterService, - actionFilters: ActionFilters - + actionFilters: ActionFilters, + val namedWriteableRegistry: NamedWriteableRegistry ) : HandledTransportAction( AlertingActions.SEARCH_MONITORS_ACTION_NAME, transportService, actionFilters, ::SearchMonitorRequest ), @@ -54,7 +55,7 @@ class TransportSearchMonitorAction @Inject constructor( override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { val transformedRequest = request as? SearchMonitorRequest - ?: recreateObject(request) { + ?: recreateObject(request, namedWriteableRegistry) { SearchMonitorRequest(it) } diff --git a/core/bin/main/mappings/doc-level-queries.json b/core/bin/main/mappings/doc-level-queries.json new file mode 100644 index 000000000..7f0602df7 --- /dev/null +++ b/core/bin/main/mappings/doc-level-queries.json @@ -0,0 +1,16 @@ +{ + "_meta": { + "schema_version": 1 + }, + "properties": { + "query": { + "type": "percolator_ext" + }, + "monitor_id": { + "type": "text" + }, + "index": { + "type": "text" + } + } +} \ No newline at end of file diff --git a/core/bin/main/mappings/scheduled-jobs.json b/core/bin/main/mappings/scheduled-jobs.json new file mode 100644 index 000000000..2651c862e --- /dev/null +++ b/core/bin/main/mappings/scheduled-jobs.json @@ -0,0 +1,694 @@ +{ + "_meta" : { + "schema_version": 8 + }, + "properties": { + "monitor": { + "dynamic": "false", + "properties": { + "schema_version": { + "type": "integer" + }, + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "owner": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "monitor_type": { + "type": "keyword" + }, + "user": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "backend_roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "custom_attribute_names": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "type": { + "type": "keyword" + }, + "enabled": { + "type": "boolean" + }, + "enabled_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "last_update_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "schedule": { + "properties": { + "period": { + "properties": { + "interval": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + }, + "cron": { + "properties": { + "expression": { + "type": "text" + }, + "timezone": { + "type": "keyword" + } + } + } + } + }, + "inputs": { + "type": "nested", + "properties": { + "search": { + "properties": { + "indices": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "query": { + "type": "object", + "enabled": false + } + } + } + } + }, + "data_sources": { + "properties": { + "alerts_index": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "findings_index": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "query_index": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "query_index_mapping": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "group_by_fields": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "triggers": { + "type": "nested", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "min_time_between_executions": { + "type": "integer" + }, + "condition": { + "type": "object", + "enabled": false + }, + "actions": { + "type": "nested", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "destination_id": { + "type": "keyword" + }, + "subject_template": { + "type": "object", + "enabled": false + }, + "message_template": { + "type": "object", + "enabled": false + }, + "throttle_enabled": { + "type": "boolean" + }, + "throttle": { + "properties": { + "value": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + } + } + }, + "query_level_trigger": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "min_time_between_executions": { + "type": "integer" + }, + "condition": { + "type": "object", + "enabled": false + }, + "actions": { + "type": "nested", + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "destination_id": { + "type": "keyword" + }, + "subject_template": { + "type": "object", + "enabled": false + }, + "message_template": { + "type": "object", + "enabled": false + }, + "throttle_enabled": { + "type": "boolean" + }, + "throttle": { + "properties": { + "value": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + } + } + } + } + } + } + }, + "ui_metadata": { + "type": "object", + "enabled": false + } + } + }, + "workflow": { + "dynamic": "false", + "properties": { + "schema_version": { + "type": "integer" + }, + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "owner": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "workflow_type": { + "type": "keyword" + }, + "user": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "backend_roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "custom_attribute_names": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "type": { + "type": "keyword" + }, + "enabled": { + "type": "boolean" + }, + "audit_delegate_monitor_alerts": { + "type": "boolean" + }, + "enabled_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "last_update_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "schedule": { + "properties": { + "period": { + "properties": { + "interval": { + "type": "integer" + }, + "unit": { + "type": "keyword" + } + } + }, + "cron": { + "properties": { + "expression": { + "type": "text" + }, + "timezone": { + "type": "keyword" + } + } + } + } + }, + "inputs": { + "type": "nested", + "properties": { + "composite_input": { + "type": "nested", + "properties": { + "sequence": { + "properties": { + "delegates": { + "type": "nested", + "properties": { + "order": { + "type": "integer" + }, + "monitor_id": { + "type": "keyword" + }, + "chained_monitor_findings": { + "properties": { + "monitor_id": { + "type": "keyword" + } + } + } + } + } + } + } + } + } + } + }, + "group_by_fields": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "destination": { + "dynamic": "false", + "properties": { + "schema_version": { + "type": "integer" + }, + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "user": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "backend_roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "roles": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + }, + "custom_attribute_names": { + "type" : "text", + "fields" : { + "keyword" : { + "type" : "keyword" + } + } + } + } + }, + "type": { + "type": "keyword" + }, + "last_update_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "chime": { + "properties": { + "url": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "slack": { + "properties": { + "url": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "custom_webhook": { + "properties": { + "url": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "scheme": { + "type": "keyword" + }, + "host": { + "type": "text" + }, + "port": { + "type": "integer" + }, + "path": { + "type": "keyword" + }, + "query_params": { + "type": "object", + "enabled": false + }, + "header_params": { + "type": "object", + "enabled": false + }, + "username": { + "type": "text" + }, + "password": { + "type": "text" + } + } + }, + "email": { + "properties": { + "email_account_id": { + "type": "keyword" + }, + "recipients": { + "type": "nested", + "properties": { + "type": { + "type": "keyword" + }, + "email_group_id": { + "type": "keyword" + }, + "email": { + "type": "text" + } + } + } + } + } + } + }, + "email_account": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "host": { + "type": "text" + }, + "port": { + "type": "integer" + }, + "method": { + "type": "text" + }, + "from": { + "type": "text" + } + } + }, + "email_group": { + "properties": { + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "emails": { + "type": "nested", + "properties": { + "email": { + "type": "text" + } + } + } + } + }, + "metadata" : { + "properties": { + "monitor_id": { + "type": "keyword" + }, + "last_action_execution_times": { + "type": "nested", + "properties": { + "action_id": { + "type": "keyword" + }, + "execution_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + } + } + }, + "last_run_context": { + "type": "object", + "enabled": false + }, + "source_to_query_index_mapping": { + "type": "object", + "enabled": false + } + } + }, + "workflow_metadata" : { + "properties": { + "workflow_id": { + "type": "keyword" + }, + "monitor_ids": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 1000 + } + } + }, + "latest_run_time": { + "type": "date", + "format": "strict_date_time||epoch_millis" + }, + "latest_execution_id": { + "type": "keyword" + } + } + } + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/JobRunner.kt b/core/bin/main/org/opensearch/alerting/core/JobRunner.kt new file mode 100644 index 000000000..c251c8c6a --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/JobRunner.kt @@ -0,0 +1,17 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core + +import org.opensearch.commons.alerting.model.ScheduledJob +import java.time.Instant + +interface JobRunner { + fun postDelete(jobId: String) + + fun postIndex(job: ScheduledJob) + + fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) +} diff --git a/core/bin/main/org/opensearch/alerting/core/JobSweeper.kt b/core/bin/main/org/opensearch/alerting/core/JobSweeper.kt new file mode 100644 index 000000000..6ba910707 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/JobSweeper.kt @@ -0,0 +1,512 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.action.search.SearchRequest +import org.opensearch.alerting.core.schedule.JobScheduler +import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.REQUEST_TIMEOUT +import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEPER_ENABLED +import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_BACKOFF_MILLIS +import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_BACKOFF_RETRY_COUNT +import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_PAGE_SIZE +import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_PERIOD +import org.opensearch.alerting.opensearchapi.firstFailureOrNull +import org.opensearch.alerting.opensearchapi.retry +import org.opensearch.client.Client +import org.opensearch.cluster.ClusterChangedEvent +import org.opensearch.cluster.ClusterStateListener +import org.opensearch.cluster.routing.IndexShardRoutingTable +import org.opensearch.cluster.routing.Murmur3HashFunction +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.lifecycle.LifecycleListener +import org.opensearch.common.logging.Loggers +import org.opensearch.common.lucene.uid.Versions +import org.opensearch.common.settings.Settings +import org.opensearch.common.unit.TimeValue +import org.opensearch.common.util.concurrent.OpenSearchExecutors +import org.opensearch.common.xcontent.LoggingDeprecationHandler +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.common.Strings +import org.opensearch.core.common.bytes.BytesReference +import org.opensearch.core.index.shard.ShardId +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.xcontent.NamedXContentRegistry +import org.opensearch.core.xcontent.XContentParser +import org.opensearch.core.xcontent.XContentParserUtils +import org.opensearch.index.engine.Engine +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.index.shard.IndexingOperationListener +import org.opensearch.search.builder.SearchSourceBuilder +import org.opensearch.search.sort.FieldSortBuilder +import org.opensearch.threadpool.Scheduler +import org.opensearch.threadpool.ThreadPool +import java.util.TreeMap +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.Executors + +typealias JobId = String +typealias JobVersion = Long + +/** + * 'Sweeping' is the process of listening for new and updated [ScheduledJob]s and deciding if they should be scheduled for + * execution on this node. The [JobSweeper] runs on every node, sweeping all local active shards that are present on the node. + * + * A [consistent hash][ShardNodes] is used to distribute jobs across all nodes that contain an active instance of the same shard. + * This minimizes any interruptions in job execution when the cluster configuration changes. + * + * There are two types of sweeps: + * - *Full sweeps* occur when the [routing table][IndexShardRoutingTable] for the shard changes (for e.g. a replica has been + * added or removed). The full sweep re-reads all jobs in the shard, deciding which ones to run locally. All full sweeps + * happen asynchronously in the background in a serial manner. See the [sweepAllShards] method. + * - *Single job sweeps* occur when a new version of the job is indexed or deleted. An [IndexingOperationListener] listens + * for index changes and synchronously schedules or removes the job from the scheduler. + */ +class JobSweeper( + private val settings: Settings, + private val client: Client, + private val clusterService: ClusterService, + private val threadPool: ThreadPool, + private val xContentRegistry: NamedXContentRegistry, + private val scheduler: JobScheduler, + private val sweepableJobTypes: List +) : ClusterStateListener, IndexingOperationListener, LifecycleListener() { + private val logger = LogManager.getLogger(javaClass) + + private val fullSweepExecutor = Executors.newSingleThreadExecutor(OpenSearchExecutors.daemonThreadFactory("opendistro_job_sweeper")) + + private val sweptJobs = ConcurrentHashMap>() + + private var scheduledFullSweep: Scheduler.Cancellable? = null + + @Volatile private var lastFullSweepTimeNano = System.nanoTime() + + @Volatile private var requestTimeout = REQUEST_TIMEOUT.get(settings) + + @Volatile private var sweepPeriod = SWEEP_PERIOD.get(settings) + + @Volatile private var sweeperEnabled = SWEEPER_ENABLED.get(settings) + + @Volatile private var sweepPageSize = SWEEP_PAGE_SIZE.get(settings) + + @Volatile private var sweepBackoffMillis = SWEEP_BACKOFF_MILLIS.get(settings) + + @Volatile private var sweepBackoffRetryCount = SWEEP_BACKOFF_RETRY_COUNT.get(settings) + + @Volatile private var sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) + + init { + clusterService.addListener(this) + clusterService.addLifecycleListener(this) + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_PERIOD) { + // if sweep period change, restart background sweep with new sweep period + logger.debug("Reinitializing background full sweep with period: ${sweepPeriod.minutes()}") + sweepPeriod = it + initBackgroundSweep() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEPER_ENABLED) { + sweeperEnabled = it + if (!sweeperEnabled) disable() else enable() + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_BACKOFF_MILLIS) { + sweepBackoffMillis = it + sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_BACKOFF_RETRY_COUNT) { + sweepBackoffRetryCount = it + sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) + } + clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_PAGE_SIZE) { sweepPageSize = it } + clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } + } + + override fun afterStart() { + initBackgroundSweep() + } + + override fun beforeStop() { + scheduledFullSweep?.cancel() + } + + override fun beforeClose() { + fullSweepExecutor.shutdown() + } + + /** + * Initiates a full sweep of all local shards when the index routing table is changed (for e.g. when the node joins + * the cluster, a replica is added, removed or promoted to primary). + * + * This callback won't be invoked concurrently since cluster state changes are applied serially to the node + * in the order they occur on the cluster manager. However we can't block this callback for the duration of a full sweep so + * we perform the sweep in the background in a single threaded executor [fullSweepExecutor]. + */ + override fun clusterChanged(event: ClusterChangedEvent) { + if (!isSweepingEnabled()) return + + if (!event.indexRoutingTableChanged(ScheduledJob.SCHEDULED_JOBS_INDEX)) return + + logger.debug("Scheduled Jobs routing table changed. Running full sweep...") + fullSweepExecutor.submit { + sweepAllShards() + } + } + + /** + * This callback is invoked when a new job (or new version of a job) is indexed. If the job is assigned to the node + * it is scheduled. Relies on all indexing operations using optimistic concurrency control to ensure that stale versions + * of jobs are not scheduled. It schedules job only if it is one of the [sweepableJobTypes] + * + */ + override fun postIndex(shardId: ShardId, index: Engine.Index, result: Engine.IndexResult) { + if (!isSweepingEnabled()) return + + if (result.resultType != Engine.Result.Type.SUCCESS) { + val shardJobs = sweptJobs[shardId] ?: emptyMap() + val currentVersion = shardJobs[index.id()] ?: Versions.NOT_FOUND + logger.debug("Indexing failed for ScheduledJob: ${index.id()}. Continuing with current version $currentVersion") + return + } + + if (isOwningNode(shardId, index.id())) { + val xcp = XContentHelper.createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, index.source(), XContentType.JSON) + if (isSweepableJobType(xcp)) { + val job = parseAndSweepJob(xcp, shardId, index.id(), result.version, index.source(), true) + if (job != null) scheduler.postIndex(job) + } else { + logger.debug("Not a valid job type in document ${index.id()} to sweep.") + } + } + } + + /** + * This callback is invoked when a job is deleted from a shard. The job is descheduled. Relies on all delete operations + * using optimistic concurrency control to ensure that stale versions of jobs are not scheduled. + */ + override fun postDelete(shardId: ShardId, delete: Engine.Delete, result: Engine.DeleteResult) { + if (!isSweepingEnabled()) return + + if (result.resultType != Engine.Result.Type.SUCCESS) { + val shardJobs = sweptJobs[shardId] ?: emptyMap() + val currentVersion = shardJobs[delete.id()] ?: Versions.NOT_FOUND + logger.debug("Deletion failed for ScheduledJob: ${delete.id()}. Continuing with current version $currentVersion") + return + } + + if (isOwningNode(shardId, delete.id())) { + if (scheduler.scheduledJobs().contains(delete.id())) { + sweep(shardId, delete.id(), result.version, null) + } + scheduler.postDelete(delete.id()) + } + } + + fun enable() { + // initialize background sweep + initBackgroundSweep() + // set sweeperEnabled flag to true to make the listeners aware of this setting + sweeperEnabled = true + } + + fun disable() { + // cancel background sweep + scheduledFullSweep?.cancel() + // deschedule existing jobs on this node + logger.info("Descheduling all jobs as sweeping is disabled") + scheduler.deschedule(scheduler.scheduledJobs()) + // set sweeperEnabled flag to false to make the listeners aware of this setting + sweeperEnabled = false + } + + public fun isSweepingEnabled(): Boolean { + // Although it is a single link check, keeping it as a separate function, so we + // can abstract out logic of finding out whether to proceed or not + return sweeperEnabled == true + } + + private fun initBackgroundSweep() { + // if sweeping disabled, background sweep should not be triggered + if (!isSweepingEnabled()) return + + // cancel existing background thread if present + scheduledFullSweep?.cancel() + + // Manually sweep all shards before scheduling the background sweep so it picks up any changes immediately + // since the first run of a task submitted with scheduleWithFixedDelay() happens after the interval has passed. + logger.debug("Performing sweep of scheduled jobs.") + fullSweepExecutor.submit { + sweepAllShards() + } + + // Setup an anti-entropy/self-healing background sweep, in case a sweep that was triggered by an event fails. + val scheduledSweep = Runnable { + val elapsedTime = getFullSweepElapsedTime() + + // Rate limit to at most one full sweep per sweep period + // The schedule runs may wake up a few milliseconds early. + // Delta will be giving some buffer on the schedule to allow waking up slightly earlier. + val delta = sweepPeriod.millis - elapsedTime.millis + if (delta < 20L) { // give 20ms buffer. + fullSweepExecutor.submit { + logger.debug("Performing background sweep of scheduled jobs.") + sweepAllShards() + } + } + } + scheduledFullSweep = threadPool.scheduleWithFixedDelay(scheduledSweep, sweepPeriod, ThreadPool.Names.SAME) + } + + private fun sweepAllShards() { + val clusterState = clusterService.state() + if (!clusterState.routingTable.hasIndex(ScheduledJob.SCHEDULED_JOBS_INDEX)) { + scheduler.deschedule(scheduler.scheduledJobs()) + sweptJobs.clear() + lastFullSweepTimeNano = System.nanoTime() + return + } + + // Find all shards that are currently assigned to this node. + val localNodeId = clusterState.nodes.localNodeId + val localShards = clusterState.routingTable.allShards(ScheduledJob.SCHEDULED_JOBS_INDEX) + // Find all active shards + .filter { it.active() } + // group by shardId + .groupBy { it.shardId() } + // assigned to local node + .filter { (_, shards) -> shards.any { it.currentNodeId() == localNodeId } } + + // Remove all jobs on shards that are no longer assigned to this node. + val removedShards = sweptJobs.keys - localShards.keys + removedShards.forEach { shardId -> + val shardJobs = sweptJobs.remove(shardId) ?: emptyMap() + scheduler.deschedule(shardJobs.keys) + } + + // resweep all shards that are assigned to this node. + localShards.forEach { (shardId, shards) -> + try { + sweepShard(shardId, ShardNodes(localNodeId, shards.map { it.currentNodeId() })) + } catch (e: Exception) { + val shardLogger = Loggers.getLogger(javaClass, shardId) + shardLogger.error("Error while sweeping shard $shardId", e) + } + } + lastFullSweepTimeNano = System.nanoTime() + } + + private fun sweepShard(shardId: ShardId, shardNodes: ShardNodes, startAfter: String = "") { + val logger = Loggers.getLogger(javaClass, shardId) + logger.debug("Sweeping shard $shardId") + + // Remove any jobs that are currently scheduled that are no longer owned by this node + val currentJobs = sweptJobs.getOrPut(shardId) { ConcurrentHashMap() } + currentJobs.keys.filterNot { shardNodes.isOwningNode(it) }.forEach { + scheduler.deschedule(it) + currentJobs.remove(it) + } + + // sweep the shard for new and updated jobs. Uses a search after query to paginate, assuming that any concurrent + // updates and deletes are handled by the index operation listener. + var searchAfter: String? = startAfter + while (searchAfter != null) { + val boolQueryBuilder = BoolQueryBuilder() + sweepableJobTypes.forEach { boolQueryBuilder.should(QueryBuilders.existsQuery(it)) } + val jobSearchRequest = SearchRequest() + .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) + .preference("_shards:${shardId.id}|_only_local") + .source( + SearchSourceBuilder.searchSource() + .version(true) + .sort( + FieldSortBuilder("_id") + .unmappedType("keyword") + .missing("_last") + ) + .searchAfter(arrayOf(searchAfter)) + .size(sweepPageSize) + .query(boolQueryBuilder) + ) + + val response = sweepSearchBackoff.retry { + client.search(jobSearchRequest).actionGet(requestTimeout) + } + if (response.status() != RestStatus.OK) { + logger.error("Error sweeping shard $shardId.", response.firstFailureOrNull()) + return + } + for (hit in response.hits) { + if (shardNodes.isOwningNode(hit.id)) { + val xcp = XContentHelper.createParser( + xContentRegistry, + LoggingDeprecationHandler.INSTANCE, + hit.sourceRef, + XContentType.JSON + ) + parseAndSweepJob(xcp, shardId, hit.id, hit.version, hit.sourceRef) + } + } + searchAfter = response.hits.lastOrNull()?.id + } + } + + private fun sweep( + shardId: ShardId, + jobId: JobId, + newVersion: JobVersion, + job: ScheduledJob?, + failedToParse: Boolean = false + ) { + sweptJobs.getOrPut(shardId) { ConcurrentHashMap() } + // Use [compute] to update atomically in case another thread concurrently indexes/deletes the same job + .compute(jobId) { _, currentVersion -> + val jobCurrentlyScheduled = scheduler.scheduledJobs().contains(jobId) + + if (newVersion <= (currentVersion ?: Versions.NOT_FOUND)) { + if (unchangedJobToBeRescheduled(newVersion, currentVersion, jobCurrentlyScheduled, job)) { + logger.debug("Not skipping job $jobId since it is an unchanged job slated to be rescheduled") + } else { + logger.debug("Skipping job $jobId, $newVersion <= $currentVersion") + return@compute currentVersion + } + } + + // deschedule the currently scheduled version + if (jobCurrentlyScheduled) { + scheduler.deschedule(jobId) + } + + if (failedToParse) { + return@compute currentVersion + } + if (job != null) { + if (job.enabled) { + scheduler.schedule(job) + } + return@compute newVersion + } else { + return@compute null + } + } + } + + /* + * During the job sweep, normally jobs where the currentVersion is equal to the newVersion are skipped since + * there was no change. + * + * However, there exists an edge-case where a job could have been de-scheduled by flipping [SWEEPER_ENABLED] + * to false and then not have undergone any changes when the sweeper is re-enabled. In this case, the job should + * not be skipped so it can be re-scheduled. This utility method checks for this condition so the sweep() method + * can account for it. + */ + private fun unchangedJobToBeRescheduled( + newVersion: JobVersion, + currentVersion: JobVersion?, + jobCurrentlyScheduled: Boolean, + job: ScheduledJob? + ): Boolean { + // newVersion should not be [Versions.NOT_FOUND] here since it's passed in from existing search hits + // or successful doc delete operations + val versionWasUnchanged = newVersion == (currentVersion ?: Versions.NOT_FOUND) + val jobEnabled = job?.enabled ?: false + + return versionWasUnchanged && !jobCurrentlyScheduled && jobEnabled + } + + private fun parseAndSweepJob( + xcp: XContentParser, + shardId: ShardId, + jobId: JobId, + jobVersion: JobVersion, + jobSource: BytesReference, + typeIsParsed: Boolean = false + ): ScheduledJob? { + return try { + val job = parseScheduledJob(xcp, jobId, jobVersion, typeIsParsed) + sweep(shardId, jobId, jobVersion, job) + job + } catch (e: Exception) { + logger.warn( + "Unable to parse ScheduledJob source: {}", + Strings.cleanTruncate(jobSource.utf8ToString(), 1000) + ) + sweep(shardId, jobId, jobVersion, null, true) + null + } + } + + private fun parseScheduledJob(xcp: XContentParser, jobId: JobId, jobVersion: JobVersion, typeIsParsed: Boolean): ScheduledJob { + return if (typeIsParsed) { + ScheduledJob.parse(xcp, xcp.currentName(), jobId, jobVersion) + } else { + ScheduledJob.parse(xcp, jobId, jobVersion) + } + } + + private fun getFullSweepElapsedTime(): TimeValue { + return TimeValue.timeValueNanos(System.nanoTime() - lastFullSweepTimeNano) + } + + fun getJobSweeperMetrics(): JobSweeperMetrics { + if (!isSweepingEnabled()) { + return JobSweeperMetrics(-1, true) + } + val elapsedTime = getFullSweepElapsedTime() + return JobSweeperMetrics(elapsedTime.millis, elapsedTime.millis <= sweepPeriod.millis) + } + + private fun isSweepableJobType(xcp: XContentParser): Boolean { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) + val jobType = xcp.currentName() + return sweepableJobTypes.contains(jobType) + } + + private fun isOwningNode(shardId: ShardId, jobId: JobId): Boolean { + val localNodeId = clusterService.localNode().id + val shardNodeIds = clusterService.state().routingTable.shardRoutingTable(shardId) + .filter { it.active() } + .map { it.currentNodeId() } + val shardNodes = ShardNodes(localNodeId, shardNodeIds) + return shardNodes.isOwningNode(jobId) + } +} + +/** + * A group of nodes in the cluster that contain active instances of a single OpenSearch shard. This uses a consistent hash to divide + * the jobs indexed in that shard amongst the nodes such that each job is "owned" by exactly one of the nodes. + * The local node must have an active instance of the shard. + * + * Implementation notes: This class is not thread safe. It uses the same [hash function][Murmur3HashFunction] that OpenSearch uses + * for routing. For each real node `100` virtual nodes are added to provide a good distribution. + */ +private class ShardNodes(val localNodeId: String, activeShardNodeIds: Collection) { + + private val circle = TreeMap() + + companion object { + private const val VIRTUAL_NODE_COUNT = 100 + } + + init { + for (node in activeShardNodeIds) { + for (i in 0 until VIRTUAL_NODE_COUNT) { + circle[Murmur3HashFunction.hash(node + i)] = node + } + } + } + + fun isOwningNode(id: JobId): Boolean { + if (circle.isEmpty()) { + return false + } + val hash = Murmur3HashFunction.hash(id) + val nodeId = (circle.higherEntry(hash) ?: circle.firstEntry()).value + return (localNodeId == nodeId) + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt b/core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt new file mode 100644 index 000000000..9a10586d1 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt @@ -0,0 +1,29 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core + +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder + +data class JobSweeperMetrics(val lastFullSweepTimeMillis: Long, val fullSweepOnTime: Boolean) : ToXContentFragment, Writeable { + + constructor(si: StreamInput) : this(si.readLong(), si.readBoolean()) + + override fun writeTo(out: StreamOutput) { + out.writeLong(lastFullSweepTimeMillis) + out.writeBoolean(fullSweepOnTime) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.field("last_full_sweep_time_millis", lastFullSweepTimeMillis) + builder.field("full_sweep_on_time", fullSweepOnTime) + return builder + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt b/core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt new file mode 100644 index 000000000..a71a7e64f --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt @@ -0,0 +1,65 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core + +import org.opensearch.action.admin.indices.create.CreateIndexRequest +import org.opensearch.action.admin.indices.create.CreateIndexResponse +import org.opensearch.client.AdminClient +import org.opensearch.cluster.health.ClusterIndexHealth +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.action.ActionListener + +/** + * Initialize the OpenSearch components required to run [ScheduledJobs]. + * + * [initScheduledJobIndex] is called before indexing a new scheduled job. It verifies that the index exists before + * allowing the index to go through. This is to ensure the correct mappings exist for [ScheduledJob]. + */ +class ScheduledJobIndices(private val client: AdminClient, private val clusterService: ClusterService) { + + companion object { + @JvmStatic + fun scheduledJobMappings(): String { + return ScheduledJobIndices::class.java.classLoader.getResource("mappings/scheduled-jobs.json").readText() + } + } + /** + * Initialize the indices required for scheduled jobs. + * First check if the index exists, and if not create the index with the provided callback listeners. + * + * @param actionListener A callback listener for the index creation call. Generally in the form of onSuccess, onFailure + */ + fun initScheduledJobIndex(actionListener: ActionListener) { + if (!scheduledJobIndexExists()) { + var indexRequest = CreateIndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) + .mapping(scheduledJobMappings()) + .settings(Settings.builder().put("index.hidden", true).build()) + client.indices().create(indexRequest, actionListener) + } + } + + fun scheduledJobIndexExists(): Boolean { + val clusterState = clusterService.state() + return clusterState.routingTable.hasIndex(ScheduledJob.SCHEDULED_JOBS_INDEX) + } + + /** + * Check if the index exists. If the index does not exist, return null. + */ + fun scheduledJobIndexHealth(): ClusterIndexHealth? { + var indexHealth: ClusterIndexHealth? = null + + if (scheduledJobIndexExists()) { + val indexRoutingTable = clusterService.state().routingTable.index(ScheduledJob.SCHEDULED_JOBS_INDEX) + val indexMetaData = clusterService.state().metadata().index(ScheduledJob.SCHEDULED_JOBS_INDEX) + + indexHealth = ClusterIndexHealth(indexMetaData, indexRoutingTable) + } + return indexHealth + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt new file mode 100644 index 000000000..07792d553 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt @@ -0,0 +1,88 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.action.node + +import org.opensearch.action.support.nodes.BaseNodeResponse +import org.opensearch.alerting.core.JobSweeperMetrics +import org.opensearch.alerting.core.resthandler.RestScheduledJobStatsHandler +import org.opensearch.alerting.core.schedule.JobSchedulerMetrics +import org.opensearch.cluster.node.DiscoveryNode +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder +import java.util.Locale + +/** + * Scheduled job stat that will be generated by each node. + */ +class ScheduledJobStats : BaseNodeResponse, ToXContentFragment { + + enum class ScheduleStatus(val status: String) { + RED("red"), + GREEN("green"); + + override fun toString(): String { + return status + } + } + + var status: ScheduleStatus + var jobSweeperMetrics: JobSweeperMetrics? = null + var jobInfos: Array? = null + + constructor(si: StreamInput) : super(si) { + this.status = si.readEnum(ScheduleStatus::class.java) + this.jobSweeperMetrics = si.readOptionalWriteable { JobSweeperMetrics(it) } + this.jobInfos = si.readOptionalArray({ sti: StreamInput -> JobSchedulerMetrics(sti) }, { size -> arrayOfNulls(size) }) + } + + constructor( + node: DiscoveryNode, + status: ScheduleStatus, + jobSweeperMetrics: JobSweeperMetrics?, + jobsInfo: Array? + ) : super(node) { + this.status = status + this.jobSweeperMetrics = jobSweeperMetrics + this.jobInfos = jobsInfo + } + + companion object { + @JvmStatic + fun readScheduledJobStatus(si: StreamInput) = ScheduledJobStats(si) + } + + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeEnum(status) + out.writeOptionalWriteable(jobSweeperMetrics) + out.writeOptionalArray(jobInfos) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.field("name", node.name) + builder.field("schedule_status", status) + builder.field("roles", node.roles.map { it.roleName().uppercase(Locale.getDefault()) }) + if (jobSweeperMetrics != null) { + builder.startObject(RestScheduledJobStatsHandler.JOB_SCHEDULING_METRICS) + jobSweeperMetrics!!.toXContent(builder, params) + builder.endObject() + } + + if (jobInfos != null) { + builder.startObject(RestScheduledJobStatsHandler.JOBS_INFO) + for (job in jobInfos!!) { + builder.startObject(job.scheduledJobId) + job.toXContent(builder, params) + builder.endObject() + } + builder.endObject() + } + return builder + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt new file mode 100644 index 000000000..698c6c44e --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt @@ -0,0 +1,25 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.action.node + +import org.opensearch.action.ActionType +import org.opensearch.core.common.io.stream.Writeable + +class ScheduledJobsStatsAction : ActionType(NAME, reader) { + companion object { + val INSTANCE = ScheduledJobsStatsAction() + const val NAME = "cluster:admin/opendistro/_scheduled_jobs/stats" + + val reader = Writeable.Reader { + val response = ScheduledJobsStatsResponse(it) + response + } + } + + override fun getResponseReader(): Writeable.Reader { + return reader + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt new file mode 100644 index 000000000..6a82e8204 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt @@ -0,0 +1,45 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.action.node + +import org.opensearch.action.support.nodes.BaseNodesRequest +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import java.io.IOException + +/** + * A request to get node (cluster) level ScheduledJobsStatus. + * By default all the parameters will be true. + */ +class ScheduledJobsStatsRequest : BaseNodesRequest { + var jobSchedulingMetrics: Boolean = true + var jobsInfo: Boolean = true + + constructor(si: StreamInput) : super(si) { + jobSchedulingMetrics = si.readBoolean() + jobsInfo = si.readBoolean() + } + constructor(nodeIds: Array) : super(*nodeIds) + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + out.writeBoolean(jobSchedulingMetrics) + out.writeBoolean(jobsInfo) + } + + fun all(): ScheduledJobsStatsRequest { + jobSchedulingMetrics = true + jobsInfo = true + return this + } + + fun clear(): ScheduledJobsStatsRequest { + jobSchedulingMetrics = false + jobsInfo = false + return this + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt new file mode 100644 index 000000000..edfcc0cce --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt @@ -0,0 +1,78 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.action.node + +import org.opensearch.action.FailedNodeException +import org.opensearch.action.support.nodes.BaseNodesResponse +import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings +import org.opensearch.alerting.core.settings.ScheduledJobSettings +import org.opensearch.cluster.ClusterName +import org.opensearch.cluster.health.ClusterIndexHealth +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder + +/** + * ScheduledJobsStatsResponse is a class that will contain all the response from each node. + */ +class ScheduledJobsStatsResponse : BaseNodesResponse, ToXContentFragment { + + private var scheduledJobEnabled: Boolean = false + private var indexExists: Boolean? = null + private var indexHealth: ClusterIndexHealth? = null + + constructor(si: StreamInput) : super(si) { + this.scheduledJobEnabled = si.readBoolean() + this.indexExists = si.readBoolean() + this.indexHealth = si.readOptionalWriteable { ClusterIndexHealth(si) } + } + + constructor( + clusterName: ClusterName, + nodeResponses: List, + failures: List, + scheduledJobEnabled: Boolean, + indexExists: Boolean, + indexHealth: ClusterIndexHealth? + ) : super(clusterName, nodeResponses, failures) { + this.scheduledJobEnabled = scheduledJobEnabled + this.indexExists = indexExists + this.indexHealth = indexHealth + } + + override fun writeNodesTo( + out: StreamOutput, + nodes: MutableList + ) { + out.writeList(nodes) + } + + override fun readNodesFrom(si: StreamInput): MutableList { + return si.readList { ScheduledJobStats.readScheduledJobStatus(it) } + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + builder.field(LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED.key, scheduledJobEnabled) + builder.field(ScheduledJobSettings.SWEEPER_ENABLED.key, scheduledJobEnabled) + builder.field("scheduled_job_index_exists", indexExists) + builder.field("scheduled_job_index_status", indexHealth?.status?.name?.lowercase()) + val nodesOnSchedule = nodes.count { it.status == ScheduledJobStats.ScheduleStatus.GREEN } + val nodesNotOnSchedule = nodes.count { it.status == ScheduledJobStats.ScheduleStatus.RED } + builder.field("nodes_on_schedule", nodesOnSchedule) + builder.field("nodes_not_on_schedule", nodesNotOnSchedule) + builder.startObject("nodes") + for (scheduledJobStatus in nodes) { + builder.startObject(scheduledJobStatus.node.id) + scheduledJobStatus.toXContent(builder, params) + builder.endObject() + } + builder.endObject() + + return builder + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt new file mode 100644 index 000000000..ac6f8f3a1 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt @@ -0,0 +1,139 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.action.node + +import org.apache.logging.log4j.LogManager +import org.opensearch.action.FailedNodeException +import org.opensearch.action.support.ActionFilters +import org.opensearch.action.support.nodes.BaseNodeRequest +import org.opensearch.action.support.nodes.TransportNodesAction +import org.opensearch.alerting.core.JobSweeper +import org.opensearch.alerting.core.JobSweeperMetrics +import org.opensearch.alerting.core.ScheduledJobIndices +import org.opensearch.alerting.core.schedule.JobScheduler +import org.opensearch.alerting.core.schedule.JobSchedulerMetrics +import org.opensearch.cluster.health.ClusterIndexHealth +import org.opensearch.cluster.service.ClusterService +import org.opensearch.common.inject.Inject +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.threadpool.ThreadPool +import org.opensearch.transport.TransportService +import java.io.IOException + +private val log = LogManager.getLogger(ScheduledJobsStatsTransportAction::class.java) + +class ScheduledJobsStatsTransportAction : TransportNodesAction { + + private val jobSweeper: JobSweeper + private val jobScheduler: JobScheduler + private val scheduledJobIndices: ScheduledJobIndices + + @Inject + constructor( + threadPool: ThreadPool, + clusterService: ClusterService, + transportService: TransportService, + actionFilters: ActionFilters, + jobSweeper: JobSweeper, + jobScheduler: JobScheduler, + scheduledJobIndices: ScheduledJobIndices + ) : super( + ScheduledJobsStatsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + { ScheduledJobsStatsRequest(it) }, + { ScheduledJobStatusRequest(it) }, + ThreadPool.Names.MANAGEMENT, + ScheduledJobStats::class.java + ) { + this.jobSweeper = jobSweeper + this.jobScheduler = jobScheduler + this.scheduledJobIndices = scheduledJobIndices + } + + override fun newNodeRequest(request: ScheduledJobsStatsRequest): ScheduledJobStatusRequest { + return ScheduledJobStatusRequest(request) + } + + override fun newNodeResponse(si: StreamInput): ScheduledJobStats { + return ScheduledJobStats(si) + } + + override fun newResponse( + request: ScheduledJobsStatsRequest, + responses: MutableList, + failures: MutableList + ): ScheduledJobsStatsResponse { + val scheduledJobEnabled = jobSweeper.isSweepingEnabled() + val scheduledJobIndexExist = scheduledJobIndices.scheduledJobIndexExists() + val indexHealth: ClusterIndexHealth? = if (scheduledJobIndexExist) scheduledJobIndices.scheduledJobIndexHealth() else null + + return ScheduledJobsStatsResponse( + clusterService.clusterName, + responses, + failures, + scheduledJobEnabled, + scheduledJobIndexExist, + indexHealth + ) + } + + override fun nodeOperation(request: ScheduledJobStatusRequest): ScheduledJobStats { + return createScheduledJobStatus(request.request) + } + + private fun createScheduledJobStatus( + scheduledJobsStatusRequest: ScheduledJobsStatsRequest + ): ScheduledJobStats { + val jobSweeperMetrics = jobSweeper.getJobSweeperMetrics() + val jobSchedulerMetrics = jobScheduler.getJobSchedulerMetric() + + val status: ScheduledJobStats.ScheduleStatus = evaluateStatus(jobSchedulerMetrics, jobSweeperMetrics) + return ScheduledJobStats( + this.transportService.localNode, + status, + if (scheduledJobsStatusRequest.jobSchedulingMetrics) jobSweeperMetrics else null, + if (scheduledJobsStatusRequest.jobsInfo) jobSchedulerMetrics.toTypedArray() else null + ) + } + + private fun evaluateStatus( + jobsInfo: List, + jobSweeperMetrics: JobSweeperMetrics + ): ScheduledJobStats.ScheduleStatus { + val allJobsRunningOnTime = jobsInfo.all { it.runningOnTime } + if (allJobsRunningOnTime && jobSweeperMetrics.fullSweepOnTime) { + return ScheduledJobStats.ScheduleStatus.GREEN + } + log.info("Jobs Running on time: $allJobsRunningOnTime, Sweeper on time: ${jobSweeperMetrics.fullSweepOnTime}") + return ScheduledJobStats.ScheduleStatus.RED + } + + class ScheduledJobStatusRequest : BaseNodeRequest { + + lateinit var request: ScheduledJobsStatsRequest + + constructor() : super() + + constructor(si: StreamInput) : super(si) { + request = ScheduledJobsStatsRequest(si) + } + + constructor(request: ScheduledJobsStatsRequest) : super() { + this.request = request + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + super.writeTo(out) + request.writeTo(out) + } + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt b/core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt new file mode 100644 index 000000000..c4f800ab3 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt @@ -0,0 +1,121 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.resthandler + +import org.opensearch.alerting.core.action.node.ScheduledJobsStatsAction +import org.opensearch.alerting.core.action.node.ScheduledJobsStatsRequest +import org.opensearch.client.node.NodeClient +import org.opensearch.core.common.Strings +import org.opensearch.rest.BaseRestHandler +import org.opensearch.rest.RestHandler +import org.opensearch.rest.RestHandler.Route +import org.opensearch.rest.RestRequest +import org.opensearch.rest.RestRequest.Method.GET +import org.opensearch.rest.action.RestActions +import java.util.Locale +import java.util.TreeSet + +/** + * RestScheduledJobStatsHandler is handler for getting ScheduledJob Stats. + */ +class RestScheduledJobStatsHandler(private val path: String) : BaseRestHandler() { + + companion object { + const val JOB_SCHEDULING_METRICS: String = "job_scheduling_metrics" + const val JOBS_INFO: String = "jobs_info" + private val METRICS = mapOf Unit>( + JOB_SCHEDULING_METRICS to { it -> it.jobSchedulingMetrics = true }, + JOBS_INFO to { it -> it.jobsInfo = true } + ) + } + + override fun getName(): String { + return "${path}_jobs_stats" + } + + override fun routes(): List { + return listOf() + } + + override fun replacedRoutes(): MutableList { + return mutableListOf( + RestHandler.ReplacedRoute( + GET, + "/_plugins/$path/{nodeId}/stats/", + GET, + "/_opendistro/$path/{nodeId}/stats/" + ), + RestHandler.ReplacedRoute( + GET, + "/_plugins/$path/{nodeId}/stats/{metric}", + GET, + "/_opendistro/$path/{nodeId}/stats/{metric}" + ), + RestHandler.ReplacedRoute( + GET, + "/_plugins/$path/stats/", + GET, + "/_opendistro/$path/stats/" + ), + RestHandler.ReplacedRoute( + GET, + "/_plugins/$path/stats/{metric}", + GET, + "/_opendistro/$path/stats/{metric}" + ) + ) + } + + override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { + val scheduledJobNodesStatsRequest = getRequest(request) + return RestChannelConsumer { channel -> + client.execute( + ScheduledJobsStatsAction.INSTANCE, + scheduledJobNodesStatsRequest, + RestActions.NodesResponseRestListener(channel) + ) + } + } + + private fun getRequest(request: RestRequest): ScheduledJobsStatsRequest { + val nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")) + val metrics = Strings.tokenizeByCommaToSet(request.param("metric")) + val scheduledJobsStatsRequest = ScheduledJobsStatsRequest(nodesIds) + scheduledJobsStatsRequest.timeout(request.param("timeout")) + + if (metrics.isEmpty()) { + return scheduledJobsStatsRequest + } else if (metrics.size == 1 && metrics.contains("_all")) { + scheduledJobsStatsRequest.all() + } else if (metrics.contains("_all")) { + throw IllegalArgumentException( + String.format( + Locale.ROOT, + "request [%s] contains _all and individual metrics [%s]", + request.path(), + request.param("metric") + ) + ) + } else { + // use a sorted set so the unrecognized parameters appear in a reliable sorted order + scheduledJobsStatsRequest.clear() + val invalidMetrics = TreeSet() + for (metric in metrics) { + val handler = METRICS[metric] + if (handler != null) { + handler.invoke(scheduledJobsStatsRequest) + } else { + invalidMetrics.add(metric) + } + } + + if (!invalidMetrics.isEmpty()) { + throw IllegalArgumentException(unrecognized(request, invalidMetrics, METRICS.keys, "metric")) + } + } + return scheduledJobsStatsRequest + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt b/core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt new file mode 100644 index 000000000..a4a729121 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt @@ -0,0 +1,228 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.schedule + +import org.apache.logging.log4j.LogManager +import org.opensearch.alerting.core.JobRunner +import org.opensearch.common.unit.TimeValue +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.threadpool.Scheduler +import org.opensearch.threadpool.ThreadPool +import java.time.Duration +import java.time.Instant +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.TimeUnit +import java.util.stream.Collectors + +/** + * JobScheduler is a class for scheduling and descheduling ScheduleJobs. This class keeps list of ScheduledJob Ids that are currently scheduled. + * + * JobScheduler is unaware of the ScheduledJob version and it is up to callers to ensure that the older version of ScheduledJob to be descheduled and schedule the new version. + */ +class JobScheduler(private val threadPool: ThreadPool, private val jobRunner: JobRunner) { + private val logger = LogManager.getLogger(JobScheduler::class.java) + + /** + * Map of ScheduledJobName to Info of the ScheduledJob. + */ + private val scheduledJobIdToInfo = ConcurrentHashMap() + + /** + * Schedules the jobs in [jobsToSchedule] for execution. + * + * @return List of jobs that could not be scheduled + */ + fun schedule(vararg jobsToSchedule: ScheduledJob): List { + return jobsToSchedule.filter { + !this.schedule(it) + } + } + + /** + * Schedules a single [scheduledJob] + * + * The [schedule] does not check for new version of the ScheduledJob. + * The caller should be aware of the update that happened in [ScheduledJob] and must first call [deschedule] if the Job version is updated and then followed by [schedule] + * + * [schedule] is considered successfully scheduled when + * 1. Cron expression is out of Scheduled. eg. past year 2016. + * 2. If the schedule already exists. This is to keep the function idempotent. + * 3. we are able to schedule the job in the [ThreadPool.schedule] + * + * [schedule] is considered unsuccessfully schedule when + * 1. Schedule is disabled. + * 2. In rare race condition where scheduledJob is already marked [ScheduledJobInfo.descheduled] true at the time of making [ThreadPool.schedule] + * 3. any unexpected failures. + * + * @return true if the ScheduledJob is scheduled successfully; + * false otherwise. + */ + fun schedule(scheduledJob: ScheduledJob): Boolean { + logger.info("Scheduling jobId : ${scheduledJob.id}, name: ${scheduledJob.name}") + + if (!scheduledJob.enabled) { + // ensure that the ScheduledJob is not enabled. The caller should be also checking this before calling this function. + return false + } + + val scheduledJobInfo = scheduledJobIdToInfo.getOrPut(scheduledJob.id) { + ScheduledJobInfo(scheduledJob.id, scheduledJob) + } + if (scheduledJobInfo.scheduledCancellable != null) { + // This means that the given ScheduledJob already has schedule running. We should not schedule any more. + return true + } + + // Start the first schedule. + return this.reschedule(scheduledJob, scheduledJobInfo) + } + + /** + * Deschedules the jobs given ScheduledJob [ids]. + * + * caller should retry [deschedule] that failed. + * + * @return List of job ids failed to deschedule. + */ + fun deschedule(ids: Collection): List { + return ids.filter { + !this.deschedule(it) + }.also { + if (it.isNotEmpty()) { + logger.error("Unable to deschedule jobs $it") + } + } + } + + /** + * Mark the scheduledJob as descheduled and try to cancel any future schedule for given scheduledJob id. + * + * [deschedule] is considered successful when + * 1. ScheduledJob id does not exist. + * 2. ScheduledJob is complete. + * 3. ScheduledJob is not complete and is successfully cancelled. + * + * Caller should retry if ScheduledJob [deschedule] fails. + * + * @return true if job is successfully descheduled; + * false otherwise. + */ + fun deschedule(id: String): Boolean { + val scheduledJobInfo = scheduledJobIdToInfo[id] + if (scheduledJobInfo == null) { + logger.info("JobId $id does not exist.") + return true + } else { + logger.info("Descheduling jobId : $id") + scheduledJobInfo.descheduled = true + scheduledJobInfo.actualPreviousExecutionTime = null + scheduledJobInfo.expectedNextExecutionTime = null + var result = true + val scheduledFuture = scheduledJobInfo.scheduledCancellable + + if (scheduledFuture != null && !scheduledFuture.isCancelled) { + result = scheduledFuture.cancel() + } + + if (result) { + // If we have successfully descheduled the job, remove from the info map. + scheduledJobIdToInfo.remove(scheduledJobInfo.scheduledJobId, scheduledJobInfo) + } + return result + } + } + + /** + * @return list of jobIds that are scheduled. + */ + fun scheduledJobs(): Set { + return scheduledJobIdToInfo.keys + } + + private fun reschedule(scheduleJob: ScheduledJob, scheduledJobInfo: ScheduledJobInfo): Boolean { + if (scheduleJob.enabledTime == null) { + logger.info("${scheduleJob.name} there is no enabled time. This job should never have been scheduled.") + return false + } + scheduledJobInfo.expectedNextExecutionTime = scheduleJob.schedule.getExpectedNextExecutionTime( + scheduleJob.enabledTime!!, scheduledJobInfo.expectedNextExecutionTime + ) + + // Validate if there is next execution that needs to happen. + // e.g cron job that is expected to run in 30th of Feb (which doesn't exist). "0/5 * 30 2 *" + if (scheduledJobInfo.expectedNextExecutionTime == null) { + logger.info("${scheduleJob.name} there is no next execution time.") + return true + } + + val duration = Duration.between(Instant.now(), scheduledJobInfo.expectedNextExecutionTime) + + // Create anonymous runnable. + val runnable = Runnable { + // Check again if the scheduled job is marked descheduled. + if (scheduledJobInfo.descheduled) { + return@Runnable // skip running job if job is marked descheduled. + } + + // Order of operations inside here matter, we specifically call getPeriodEndingAt before reschedule because + // reschedule will update expectedNextExecutionTime to the next one which would throw off the startTime/endTime + val (startTime, endTime) = scheduleJob.schedule.getPeriodEndingAt(scheduledJobInfo.expectedNextExecutionTime) + scheduledJobInfo.actualPreviousExecutionTime = Instant.now() + + this.reschedule(scheduleJob, scheduledJobInfo) + + jobRunner.runJob(scheduleJob, startTime, endTime) + } + + // Check descheduled flag as close as possible before we actually schedule a job. + // This way we will can minimize race conditions. + if (scheduledJobInfo.descheduled) { + // Do not reschedule if schedule has been marked descheduled. + return false + } + + // Finally schedule the job in the ThreadPool with next time to execute. + val scheduledCancellable = threadPool.schedule(runnable, TimeValue(duration.toNanos(), TimeUnit.NANOSECONDS), ThreadPool.Names.SAME) + scheduledJobInfo.scheduledCancellable = scheduledCancellable + + return true + } + + fun getJobSchedulerMetric(): List { + return scheduledJobIdToInfo.entries.stream() + .map { entry -> + JobSchedulerMetrics( + entry.value.scheduledJobId, + entry.value.actualPreviousExecutionTime?.toEpochMilli(), + entry.value.scheduledJob.schedule.runningOnTime(entry.value.actualPreviousExecutionTime) + ) + } + .collect(Collectors.toList()) + } + + fun postIndex(job: ScheduledJob) { + jobRunner.postIndex(job) + } + + fun postDelete(jobId: String) { + jobRunner.postDelete(jobId) + } + + /** + * ScheduledJobInfo which we can use to check if the job should be descheduled. + * Some Idea for more use of this class is + * 1. Total number of runs. + * 2. Tracking of number of failed runs (helps to control error handling.) + */ + private data class ScheduledJobInfo( + val scheduledJobId: String, + val scheduledJob: ScheduledJob, + var descheduled: Boolean = false, + var actualPreviousExecutionTime: Instant? = null, + var expectedNextExecutionTime: Instant? = null, + var scheduledCancellable: Scheduler.ScheduledCancellable? = null + ) +} diff --git a/core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt b/core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt new file mode 100644 index 000000000..dff1ecd52 --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt @@ -0,0 +1,48 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.schedule + +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.common.io.stream.Writeable +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.ToXContentFragment +import org.opensearch.core.xcontent.XContentBuilder +import java.time.Instant + +class JobSchedulerMetrics : ToXContentFragment, Writeable { + val scheduledJobId: String + val lastExecutionTime: Long? + val runningOnTime: Boolean + + constructor(scheduledJobId: String, lastExecutionTime: Long?, runningOnTime: Boolean) { + this.scheduledJobId = scheduledJobId + this.lastExecutionTime = lastExecutionTime + this.runningOnTime = runningOnTime + } + + constructor(si: StreamInput) { + scheduledJobId = si.readString() + lastExecutionTime = si.readOptionalLong() + runningOnTime = si.readBoolean() + } + + override fun writeTo(out: StreamOutput) { + out.writeString(scheduledJobId) + out.writeOptionalLong(lastExecutionTime) + out.writeBoolean(runningOnTime) + } + + override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { + if (lastExecutionTime != null) + builder.timeField( + "last_execution_time", "last_execution_time_in_millis", + Instant.ofEpochMilli(lastExecutionTime).toEpochMilli() + ) + builder.field("running_on_time", runningOnTime) + return builder + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt b/core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt new file mode 100644 index 000000000..3a37ff97f --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt @@ -0,0 +1,49 @@ +package org.opensearch.alerting.core.settings + +import org.opensearch.common.settings.Setting +import org.opensearch.common.unit.TimeValue + +/** + * Legacy Opendistro settings used for [ScheduledJob]'s. These include back off settings, retry counts, timeouts etc... + */ + +class LegacyOpenDistroScheduledJobSettings { + + companion object { + val SWEEPER_ENABLED = Setting.boolSetting( + "opendistro.scheduled_jobs.enabled", + true, + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( + "opendistro.scheduled_jobs.request_timeout", + TimeValue.timeValueSeconds(10), + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + + val SWEEP_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "opendistro.scheduled_jobs.sweeper.backoff_millis", + TimeValue.timeValueMillis(50), + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + + val SWEEP_BACKOFF_RETRY_COUNT = Setting.intSetting( + "opendistro.scheduled_jobs.retry_count", + 3, + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + + val SWEEP_PERIOD = Setting.positiveTimeSetting( + "opendistro.scheduled_jobs.sweeper.period", + TimeValue.timeValueMinutes(5), + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + + val SWEEP_PAGE_SIZE = Setting.intSetting( + "opendistro.scheduled_jobs.sweeper.page_size", + 100, + Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated + ) + } +} diff --git a/core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt b/core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt new file mode 100644 index 000000000..6bdb18bec --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt @@ -0,0 +1,51 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.settings + +import org.opensearch.common.settings.Setting + +/** + * settings used for [ScheduledJob]'s. These include back off settings, retry counts, timeouts etc... + */ +class ScheduledJobSettings { + + companion object { + val SWEEPER_ENABLED = Setting.boolSetting( + "plugins.scheduled_jobs.enabled", + LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + val REQUEST_TIMEOUT = Setting.positiveTimeSetting( + "plugins.scheduled_jobs.request_timeout", + LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + + val SWEEP_BACKOFF_MILLIS = Setting.positiveTimeSetting( + "plugins.scheduled_jobs.sweeper.backoff_millis", + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + + val SWEEP_BACKOFF_RETRY_COUNT = Setting.intSetting( + "plugins.scheduled_jobs.retry_count", + LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + + val SWEEP_PERIOD = Setting.positiveTimeSetting( + "plugins.scheduled_jobs.sweeper.period", + LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + + val SWEEP_PAGE_SIZE = Setting.intSetting( + "plugins.scheduled_jobs.sweeper.page_size", + LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, + Setting.Property.NodeScope, Setting.Property.Dynamic + ) + } +} diff --git a/core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt b/core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt new file mode 100644 index 000000000..3e87f207f --- /dev/null +++ b/core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt @@ -0,0 +1,207 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.opensearchapi + +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.ThreadContextElement +import kotlinx.coroutines.delay +import kotlinx.coroutines.withContext +import org.apache.logging.log4j.Logger +import org.opensearch.OpenSearchException +import org.opensearch.action.bulk.BackoffPolicy +import org.opensearch.action.search.SearchResponse +import org.opensearch.action.search.ShardSearchFailure +import org.opensearch.client.OpenSearchClient +import org.opensearch.common.settings.Settings +import org.opensearch.common.util.concurrent.ThreadContext +import org.opensearch.common.xcontent.XContentHelper +import org.opensearch.common.xcontent.XContentType +import org.opensearch.commons.InjectSecurity +import org.opensearch.commons.authuser.User +import org.opensearch.commons.notifications.NotificationsPluginInterface +import org.opensearch.core.action.ActionListener +import org.opensearch.core.rest.RestStatus +import org.opensearch.core.rest.RestStatus.BAD_GATEWAY +import org.opensearch.core.rest.RestStatus.GATEWAY_TIMEOUT +import org.opensearch.core.rest.RestStatus.SERVICE_UNAVAILABLE +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.index.query.BoolQueryBuilder +import org.opensearch.index.query.QueryBuilders +import org.opensearch.search.builder.SearchSourceBuilder +import kotlin.coroutines.CoroutineContext +import kotlin.coroutines.resume +import kotlin.coroutines.resumeWithException +import kotlin.coroutines.suspendCoroutine + +/** Convert an object to maps and lists representation */ +fun ToXContent.convertToMap(): Map { + val bytesReference = XContentHelper.toXContent(this, XContentType.JSON, false) + return XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2() +} + +/** + * Backs off and retries a lambda that makes a request. This should not be called on any of the [standard][ThreadPool] + * executors since those executors are not meant to be blocked by sleeping. + */ +fun BackoffPolicy.retry(block: () -> T): T { + val iter = iterator() + do { + try { + return block() + } catch (e: OpenSearchException) { + if (iter.hasNext() && e.isRetriable()) { + Thread.sleep(iter.next().millis) + } else { + throw e + } + } + } while (true) +} + +/** + * Backs off and retries a lambda that makes a request. This retries on any Exception unless it detects the + * Notification plugin is not installed. + * + * @param logger - logger used to log intermediate failures + * @param block - the block of code to retry. This should be a suspend function. + */ +suspend fun BackoffPolicy.retryForNotification( + logger: Logger, + block: suspend () -> T +): T { + val iter = iterator() + do { + try { + return block() + } catch (e: java.lang.Exception) { + val isMissingNotificationPlugin = e.message?.contains("failed to find action") ?: false + if (isMissingNotificationPlugin) { + throw OpenSearchException("Notification plugin is not installed. Please install the Notification plugin.", e) + } else if (iter.hasNext()) { + val backoff = iter.next() + logger.warn("Notification operation failed. Retrying in $backoff.", e) + delay(backoff.millis) + } else { + throw e + } + } + } while (true) +} + +/** + * Retries the given [block] of code as specified by the receiver [BackoffPolicy], if [block] throws an [OpenSearchException] + * that is retriable (502, 503, 504). + * + * If all retries fail the final exception will be rethrown. Exceptions caught during intermediate retries are + * logged as warnings to [logger]. Similar to [org.opensearch.action.bulk.Retry], except this retries on + * 502, 503, 504 error codes as well as 429. + * + * @param logger - logger used to log intermediate failures + * @param retryOn - any additional [RestStatus] values that should be retried + * @param block - the block of code to retry. This should be a suspend function. + */ +suspend fun BackoffPolicy.retry( + logger: Logger, + retryOn: List = emptyList(), + block: suspend () -> T +): T { + val iter = iterator() + do { + try { + return block() + } catch (e: OpenSearchException) { + if (iter.hasNext() && (e.isRetriable() || retryOn.contains(e.status()))) { + val backoff = iter.next() + logger.warn("Operation failed. Retrying in $backoff.", e) + delay(backoff.millis) + } else { + throw e + } + } + } while (true) +} + +/** + * Retries on 502, 503 and 504 per elastic client's behavior: https://github.com/elastic/elasticsearch-net/issues/2061 + * 429 must be retried manually as it's not clear if it's ok to retry for requests other than Bulk requests. + */ +fun OpenSearchException.isRetriable(): Boolean { + return (status() in listOf(BAD_GATEWAY, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT)) +} + +fun SearchResponse.firstFailureOrNull(): ShardSearchFailure? { + return shardFailures?.getOrNull(0) +} + +fun addFilter(user: User, searchSourceBuilder: SearchSourceBuilder, fieldName: String) { + val filterBackendRoles = QueryBuilders.termsQuery(fieldName, user.backendRoles) + val queryBuilder = searchSourceBuilder.query() as BoolQueryBuilder + searchSourceBuilder.query(queryBuilder.filter(filterBackendRoles)) +} + +/** + * Converts [OpenSearchClient] methods that take a callback into a kotlin suspending function. + * + * @param block - a block of code that is passed an [ActionListener] that should be passed to the OpenSearch client API. + */ +suspend fun C.suspendUntil(block: C.(ActionListener) -> Unit): T = + suspendCoroutine { cont -> + block(object : ActionListener { + override fun onResponse(response: T) = cont.resume(response) + + override fun onFailure(e: Exception) = cont.resumeWithException(e) + }) + } + +/** + * Converts [NotificationsPluginInterface] methods that take a callback into a kotlin suspending function. + * + * @param block - a block of code that is passed an [ActionListener] that should be passed to the NotificationsPluginInterface API. + */ +suspend fun NotificationsPluginInterface.suspendUntil(block: NotificationsPluginInterface.(ActionListener) -> Unit): T = + suspendCoroutine { cont -> + block(object : ActionListener { + override fun onResponse(response: T) = cont.resume(response) + + override fun onFailure(e: Exception) = cont.resumeWithException(e) + }) + } + +class InjectorContextElement( + id: String, + settings: Settings, + threadContext: ThreadContext, + private val roles: List?, + private val user: User? = null +) : ThreadContextElement { + + companion object Key : CoroutineContext.Key + override val key: CoroutineContext.Key<*> + get() = Key + + var rolesInjectorHelper = InjectSecurity(id, settings, threadContext) + + override fun updateThreadContext(context: CoroutineContext) { + rolesInjectorHelper.injectRoles(roles) + // This is from where plugins extract backend roles. It should be passed when calling APIs of other plugins + rolesInjectorHelper.injectUserInfo(user) + } + + override fun restoreThreadContext(context: CoroutineContext, oldState: Unit) { + rolesInjectorHelper.close() + } +} + +suspend fun withClosableContext( + context: InjectorContextElement, + block: suspend CoroutineScope.() -> T +): T { + try { + return withContext(context) { block() } + } finally { + context.rolesInjectorHelper.close() + } +} diff --git a/core/bin/main/settings/doc-level-queries.json b/core/bin/main/settings/doc-level-queries.json new file mode 100644 index 000000000..c5cbfa445 --- /dev/null +++ b/core/bin/main/settings/doc-level-queries.json @@ -0,0 +1,10 @@ +{ + "index": { + "mapping": { + "total_fields": { + "limit": 10000 + } + }, + "hidden": true + } +} \ No newline at end of file diff --git a/core/bin/test/org/opensearch/alerting/core/WriteableTests.kt b/core/bin/test/org/opensearch/alerting/core/WriteableTests.kt new file mode 100644 index 000000000..f48ffa370 --- /dev/null +++ b/core/bin/test/org/opensearch/alerting/core/WriteableTests.kt @@ -0,0 +1,26 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core + +import org.joda.time.DateTime +import org.junit.Test +import org.opensearch.alerting.core.schedule.JobSchedulerMetrics +import org.opensearch.common.io.stream.BytesStreamOutput +import org.opensearch.core.common.io.stream.StreamInput +import org.opensearch.test.OpenSearchTestCase.assertEquals + +class WriteableTests { + + @Test + fun `test jobschedule metrics as stream`() { + val metrics = JobSchedulerMetrics("test", DateTime.now().millis, false) + val out = BytesStreamOutput() + metrics.writeTo(out) + val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) + val newMetrics = JobSchedulerMetrics(sin) + assertEquals("Round tripping metrics doesn't work", metrics.scheduledJobId, newMetrics.scheduledJobId) + } +} diff --git a/core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt b/core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt new file mode 100644 index 000000000..08e3fb8c4 --- /dev/null +++ b/core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt @@ -0,0 +1,33 @@ +package org.opensearch.alerting.core.model + +import org.opensearch.commons.alerting.model.Schedule +import org.opensearch.commons.alerting.model.ScheduledJob +import org.opensearch.core.common.io.stream.StreamOutput +import org.opensearch.core.xcontent.ToXContent +import org.opensearch.core.xcontent.XContentBuilder +import java.io.IOException +import java.time.Instant + +class MockScheduledJob( + override val id: String, + override val version: Long, + override val name: String, + override val type: String, + override val enabled: Boolean, + override val schedule: Schedule, + override var lastUpdateTime: Instant, + override val enabledTime: Instant? +) : ScheduledJob { + override fun fromDocument(id: String, version: Long): ScheduledJob { + TODO("not implemented") + } + + override fun toXContent(builder: XContentBuilder?, params: ToXContent.Params?): XContentBuilder { + TODO("not implemented") + } + + @Throws(IOException::class) + override fun writeTo(out: StreamOutput) { + TODO("not implemented") + } +} diff --git a/core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt b/core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt new file mode 100644 index 000000000..a0453e935 --- /dev/null +++ b/core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt @@ -0,0 +1,190 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.schedule + +import org.junit.Before +import org.opensearch.alerting.core.model.MockScheduledJob +import org.opensearch.common.settings.Settings +import org.opensearch.commons.alerting.model.CronSchedule +import org.opensearch.commons.alerting.model.IntervalSchedule +import org.opensearch.threadpool.ThreadPool +import java.time.Instant +import java.time.ZoneId +import java.time.temporal.ChronoUnit +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlin.test.assertFalse +import kotlin.test.assertTrue + +class JobSchedulerTest { + + private var testSettings: Settings = Settings.builder().put("node.name", "node-0").build() + private val testThreadPool = ThreadPool(testSettings) + private var jobRunner: MockJobRunner = MockJobRunner() + private var jobScheduler: JobScheduler = JobScheduler(ThreadPool(testSettings), jobRunner) + + @Before + fun `setup`() { + jobRunner = MockJobRunner() + jobScheduler = JobScheduler(ThreadPool(testSettings), jobRunner) + } + + @Test + fun `schedule and deschedule`() { + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + IntervalSchedule(1, ChronoUnit.MINUTES), + Instant.now(), + Instant.now() + ) + + assertTrue(jobScheduler.schedule(mockScheduledJob)) + + assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") + assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") + } + + @Test + fun `schedule cron past year`() { + // This is to run cron in Feb 30 which we should never run. + val cronExpression = "0/5 * 30 2 *" + val jobRunner = MockJobRunner() + val jobScheduler = JobScheduler(testThreadPool, jobRunner) + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now() + ) + + assertTrue(jobScheduler.schedule(mockScheduledJob)) + assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + + assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") + + assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") + } + + @Test + fun `schedule disabled`() { + val cronExpression = "0/5 * * * *" + val jobRunner = MockJobRunner() + val jobScheduler = JobScheduler(testThreadPool, jobRunner) + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + false, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now() + ) + + assertFalse(jobScheduler.schedule(mockScheduledJob), "We should return false if we try to schedule disabled schedule.") + assertEquals(setOf(), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + } + + @Test + fun `deschedule non existing schedule`() { + val cronExpression = "0/5 * * * *" + val jobRunner = MockJobRunner() + val jobScheduler = JobScheduler(testThreadPool, jobRunner) + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now() + ) + + assertTrue(jobScheduler.schedule(mockScheduledJob)) + assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") + + assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") + + assertTrue(jobScheduler.deschedule("mockScheduledJob-invalid"), "Descheduling should be true.") + assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") + } + + @Test + fun `schedule multiple jobs`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob1 = MockScheduledJob( + "mockScheduledJob-1", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now() + ) + val mockScheduledJob2 = MockScheduledJob( + "mockScheduledJob-2", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now() + ) + + assertTrue(jobScheduler.schedule(mockScheduledJob1, mockScheduledJob2).isEmpty()) + } + + @Test + fun `schedule null enabled time job`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob2 = MockScheduledJob( + "mockScheduledJob-2", 1L, "mockScheduledJob-name", "MockScheduledJob", true, + CronSchedule(cronExpression, ZoneId.of("UTC")), Instant.now(), null + ) + + assertFalse(jobScheduler.schedule(mockScheduledJob2)) + } + + @Test + fun `schedule disabled job`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob1 = MockScheduledJob( + "mockScheduledJob-1", 1L, "mockScheduledJob-name", "MockScheduledJob", false, + CronSchedule(cronExpression, ZoneId.of("UTC")), Instant.now(), Instant.now() + ) + + assertFalse(jobScheduler.schedule(mockScheduledJob1)) + } + + @Test + fun `run Job`() { + val cronExpression = "0/5 * * * *" + val mockScheduledJob = MockScheduledJob( + "mockScheduledJob-id", + 1L, + "mockScheduledJob-name", + "MockScheduledJob", + true, + CronSchedule(cronExpression, ZoneId.of("UTC")), + Instant.now(), + Instant.now() + ) + + jobRunner.runJob(mockScheduledJob, Instant.now(), Instant.now()) + } +} diff --git a/core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt b/core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt new file mode 100644 index 000000000..15fe770b9 --- /dev/null +++ b/core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt @@ -0,0 +1,31 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.alerting.core.schedule + +import org.opensearch.alerting.core.JobRunner +import org.opensearch.commons.alerting.model.ScheduledJob +import java.time.Instant + +class MockJobRunner : JobRunner { + var numberOfRun: Int = 0 + private set + var numberOfIndex: Int = 0 + private set + var numberOfDelete: Int = 0 + private set + + override fun postDelete(jobId: String) { + numberOfDelete++ + } + + override fun postIndex(job: ScheduledJob) { + numberOfIndex++ + } + + override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { + numberOfRun++ + } +} From 7d525d29f981b4ed286897255a2887f6576e702a Mon Sep 17 00:00:00 2001 From: Tyler Ohlsen Date: Mon, 22 Jan 2024 17:32:36 +0000 Subject: [PATCH 2/3] remove bin files Signed-off-by: Tyler Ohlsen --- alerting/bin/main/DUMMY-FILE | 1 - ....opensearch.painless.spi.PainlessExtension | 4 - .../org/opensearch/alerting/AlertService.kt | 892 --- .../org/opensearch/alerting/AlertingPlugin.kt | 387 -- .../alerting/BucketLevelMonitorRunner.kt | 503 -- .../alerting/DocumentLevelMonitorRunner.kt | 804 --- .../org/opensearch/alerting/InputService.kt | 226 - .../alerting/MonitorMetadataService.kt | 274 - .../org/opensearch/alerting/MonitorRunner.kt | 194 - .../alerting/MonitorRunnerExecutionContext.kt | 51 - .../alerting/MonitorRunnerService.kt | 353 - .../alerting/QueryLevelMonitorRunner.kt | 101 - .../org/opensearch/alerting/TriggerService.kt | 186 - .../alerting/WorkflowMetadataService.kt | 174 - .../opensearch/alerting/WorkflowService.kt | 144 - .../alerting/action/ExecuteMonitorAction.kt | 15 - .../alerting/action/ExecuteMonitorRequest.kt | 60 - .../alerting/action/ExecuteMonitorResponse.kt | 39 - .../alerting/action/ExecuteWorkflowAction.kt | 15 - .../alerting/action/ExecuteWorkflowRequest.kt | 70 - .../action/ExecuteWorkflowResponse.kt | 39 - .../alerting/action/GetDestinationsAction.kt | 15 - .../alerting/action/GetDestinationsRequest.kt | 61 - .../action/GetDestinationsResponse.kt | 66 - .../alerting/action/GetEmailAccountAction.kt | 15 - .../alerting/action/GetEmailAccountRequest.kt | 60 - .../action/GetEmailAccountResponse.kt | 86 - .../alerting/action/GetEmailGroupAction.kt | 15 - .../alerting/action/GetEmailGroupRequest.kt | 60 - .../alerting/action/GetEmailGroupResponse.kt | 86 - .../action/SearchEmailAccountAction.kt | 16 - .../alerting/action/SearchEmailGroupAction.kt | 16 - .../alerting/alerts/AlertIndices.kt | 588 -- .../opensearch/alerting/alerts/AlertMover.kt | 250 - .../alerting/alerts/alert_mapping.json | 174 - .../alerting/alerts/finding_mapping.json | 71 - .../parsers/ChainedAlertExpressionParser.kt | 53 - .../ChainedAlertExpressionRPNBaseParser.kt | 114 - .../parsers/ExpressionParser.kt | 12 - .../resolvers/ChainedAlertRPNResolver.kt | 110 - .../ChainedAlertTriggerExpression.kt | 32 - .../resolvers/ChainedAlertTriggerResolver.kt | 11 - .../tokens/CAExpressionOperator.kt | 20 - .../tokens/CAExpressionToken.kt | 11 - .../tokens/ChainedAlertExpressionConstant.kt | 24 - .../tokens/ExpressionToken.kt | 8 - .../alerting/model/AlertingConfigAccessor.kt | 63 - .../model/BucketLevelTriggerRunResult.kt | 58 - .../model/ChainedAlertTriggerRunResult.kt | 69 - .../model/DocumentExecutionContext.kt | 14 - .../model/DocumentLevelTriggerRunResult.kt | 52 - .../alerting/model/MonitorMetadata.kt | 198 - .../alerting/model/MonitorRunResult.kt | 216 - .../model/QueryLevelTriggerRunResult.kt | 66 - .../alerting/model/TriggerRunResult.kt | 55 - .../alerting/model/WorkflowMetadata.kt | 105 - .../alerting/model/WorkflowRunResult.kt | 82 - .../alerting/model/destination/Chime.kt | 74 - .../model/destination/CustomWebhook.kt | 143 - .../alerting/model/destination/Destination.kt | 308 - .../model/destination/DestinationContext.kt | 17 - .../destination/DestinationContextFactory.kt | 80 - .../alerting/model/destination/SNS.kt | 63 - .../alerting/model/destination/Slack.kt | 74 - .../alerting/model/destination/email/Email.kt | 188 - .../model/destination/email/EmailAccount.kt | 175 - .../model/destination/email/EmailGroup.kt | 190 - .../alerting/org.opensearch.alerting.txt | 52 - .../resthandler/AsyncActionHandler.kt | 17 - .../resthandler/RestAcknowledgeAlertAction.kt | 92 - .../RestAcknowledgeChainedAlertsAction.kt | 82 - .../resthandler/RestDeleteMonitorAction.kt | 66 - .../resthandler/RestDeleteWorkflowAction.kt | 60 - .../resthandler/RestExecuteMonitorAction.kt | 77 - .../resthandler/RestExecuteWorkflowAction.kt | 59 - .../resthandler/RestGetAlertsAction.kt | 82 - .../resthandler/RestGetDestinationsAction.kt | 96 - .../resthandler/RestGetEmailAccountAction.kt | 68 - .../resthandler/RestGetEmailGroupAction.kt | 68 - .../resthandler/RestGetFindingsAction.kt | 67 - .../resthandler/RestGetMonitorAction.kt | 75 - .../resthandler/RestGetWorkflowAction.kt | 59 - .../RestGetWorkflowAlertsAction.kt | 92 - .../resthandler/RestIndexMonitorAction.kt | 174 - .../resthandler/RestIndexWorkflowAction.kt | 99 - .../RestSearchEmailAccountAction.kt | 108 - .../resthandler/RestSearchEmailGroupAction.kt | 109 - .../resthandler/RestSearchMonitorAction.kt | 136 - .../BucketLevelTriggerExecutionContext.kt | 51 - .../ChainedAlertTriggerExecutionContext.kt | 40 - .../DocumentLevelTriggerExecutionContext.kt | 44 - .../QueryLevelTriggerExecutionContext.kt | 50 - .../script/TriggerExecutionContext.kt | 43 - .../alerting/script/TriggerScript.kt | 46 - .../alerting/service/DeleteMonitorService.kt | 186 - .../alerting/settings/AlertingSettings.kt | 180 - .../alerting/settings/DestinationSettings.kt | 109 - .../LegacyOpenDistroAlertingSettings.kt | 157 - .../LegacyOpenDistroDestinationSettings.kt | 99 - .../SupportedClusterMetricsSettings.kt | 154 - .../settings/supported_json_payloads.json | 12 - .../transport/SecureTransportAction.kt | 139 - .../TransportAcknowledgeAlertAction.kt | 268 - .../TransportAcknowledgeChainedAlertAction.kt | 296 - .../transport/TransportDeleteMonitorAction.kt | 141 - .../TransportDeleteWorkflowAction.kt | 327 - .../TransportExecuteMonitorAction.kt | 161 - .../TransportExecuteWorkflowAction.kt | 131 - .../transport/TransportGetAlertsAction.kt | 273 - .../TransportGetDestinationsAction.kt | 171 - .../TransportGetEmailAccountAction.kt | 122 - .../transport/TransportGetEmailGroupAction.kt | 122 - .../transport/TransportGetFindingsAction.kt | 230 - .../transport/TransportGetMonitorAction.kt | 194 - .../transport/TransportGetWorkflowAction.kt | 148 - .../TransportGetWorkflowAlertsAction.kt | 274 - .../transport/TransportIndexMonitorAction.kt | 732 -- .../transport/TransportIndexWorkflowAction.kt | 796 --- .../TransportSearchEmailAccountAction.kt | 73 - .../TransportSearchEmailGroupAction.kt | 73 - .../transport/TransportSearchMonitorAction.kt | 128 - .../parsers/ExpressionParser.kt | 12 - .../parsers/TriggerExpressionParser.kt | 53 - .../parsers/TriggerExpressionRPNBaseParser.kt | 116 - .../resolvers/TriggerExpression.kt | 32 - .../resolvers/TriggerExpressionRPNResolver.kt | 103 - .../resolvers/TriggerExpressionResolver.kt | 12 - .../tokens/ExpressionToken.kt | 8 - .../tokens/TriggerExpressionConstant.kt | 26 - .../tokens/TriggerExpressionOperator.kt | 20 - .../tokens/TriggerExpressionToken.kt | 11 - .../alerting/util/AggregationQueryRewriter.kt | 114 - .../alerting/util/AlertingException.kt | 73 - .../opensearch/alerting/util/AlertingUtils.kt | 141 - .../alerting/util/AnomalyDetectionUtils.kt | 68 - .../alerting/util/DestinationType.kt | 18 - .../alerting/util/DocLevelMonitorQueries.kt | 608 -- .../opensearch/alerting/util/IndexUtils.kt | 200 - .../alerting/util/RestHandlerUtils.kt | 29 - .../alerting/util/ScheduledJobUtils.kt | 70 - .../CatIndicesHelpers.kt | 859 --- .../CatShardsHelpers.kt | 495 -- ...pportedClusterMetricsSettingsExtensions.kt | 171 - .../DestinationConversionUtils.kt | 184 - .../DestinationMigrationCoordinator.kt | 106 - .../DestinationMigrationUtilService.kt | 228 - .../NotificationApiUtils.kt | 172 - .../workflow/CompositeWorkflowRunner.kt | 395 -- .../alerting/workflow/WorkflowRunContext.kt | 15 - .../alerting/workflow/WorkflowRunner.kt | 199 - .../PercolateQueryBuilderExt$1.class | Bin 2105 -> 0 bytes .../PercolateQueryBuilderExt$2.class | Bin 5688 -> 0 bytes .../percolator/PercolateQueryBuilderExt.class | Bin 33817 -> 0 bytes .../PercolatorFieldMapperExt$Builder.class | Bin 9395 -> 0 bytes ...orFieldMapperExt$PercolatorFieldType.class | Bin 11742 -> 0 bytes .../PercolatorFieldMapperExt$TypeParser.class | Bin 1964 -> 0 bytes .../percolator/PercolatorFieldMapperExt.class | Bin 17553 -> 0 bytes .../percolator/PercolatorPluginExt.class | Bin 4265 -> 0 bytes alerting/bin/test/esnode-key.pem | 28 - alerting/bin/test/esnode.pem | 28 - alerting/bin/test/kirk-key.pem | 28 - alerting/bin/test/kirk.pem | 26 - .../org/opensearch/alerting/ADTestHelpers.kt | 508 -- .../org/opensearch/alerting/AccessRoles.kt | 49 - .../opensearch/alerting/AlertServiceTests.kt | 256 - .../alerting/AlertingRestTestCase.kt | 1821 ----- .../alerting/DocumentMonitorRunnerIT.kt | 1675 ----- .../alerting/MonitorDataSourcesIT.kt | 5923 ----------------- .../alerting/MonitorRunnerServiceIT.kt | 2093 ------ .../org/opensearch/alerting/MonitorTests.kt | 47 - .../opensearch/alerting/ODFERestTestCase.kt | 146 - .../org/opensearch/alerting/TestHelpers.kt | 797 --- .../alerting/TriggerServiceTests.kt | 260 - .../action/ExecuteMonitorActionTests.kt | 15 - .../action/ExecuteMonitorRequestTests.kt | 47 - .../action/ExecuteMonitorResponseTests.kt | 42 - .../action/GetDestinationsActionTests.kt | 16 - .../action/GetDestinationsRequestTests.kt | 83 - .../action/GetDestinationsResponseTests.kt | 62 - .../action/GetEmailAccountActionTests.kt | 16 - .../action/GetEmailAccountRequestTests.kt | 45 - .../action/GetEmailAccountResponseTests.kt | 47 - .../action/GetEmailGroupActionTests.kt | 16 - .../action/GetEmailGroupRequestTests.kt | 45 - .../action/GetEmailGroupResponseTests.kt | 47 - .../action/GetFindingsRequestTests.kt | 41 - .../action/SearchEmailAccountActionTests.kt | 16 - .../action/SearchEmailGroupActionTests.kt | 16 - ...ucketSelectorExtAggregationBuilderTests.kt | 56 - .../BucketSelectorExtAggregatorTests.kt | 371 -- .../alerting/alerts/AlertIndicesIT.kt | 371 -- .../bwc/AlertingBackwardsCompatibilityIT.kt | 207 - .../ChainedAlertsExpressionParserTests.kt | 84 - .../ChainedAlertsExpressionResolveTests.kt | 118 - .../opensearch/alerting/model/AlertTests.kt | 62 - .../alerting/model/DestinationTests.kt | 310 - .../alerting/model/EmailAccountTests.kt | 61 - .../alerting/model/EmailGroupTests.kt | 60 - .../opensearch/alerting/model/FindingTests.kt | 40 - .../alerting/model/WriteableTests.kt | 126 - .../alerting/model/XContentTests.kt | 94 - .../resthandler/DestinationRestApiIT.kt | 189 - .../resthandler/EmailAccountRestApiIT.kt | 180 - .../resthandler/EmailGroupRestApiIT.kt | 173 - .../alerting/resthandler/FindingsRestApiIT.kt | 210 - .../alerting/resthandler/MonitorRestApiIT.kt | 1412 ---- .../resthandler/SecureDestinationRestApiIT.kt | 159 - .../SecureEmailAccountRestApiIT.kt | 179 - .../resthandler/SecureEmailGroupsRestApiIT.kt | 128 - .../resthandler/SecureMonitorRestApiIT.kt | 1577 ----- .../resthandler/SecureWorkflowRestApiIT.kt | 1421 ---- .../alerting/resthandler/WorkflowRestApiIT.kt | 1188 ---- .../settings/AlertingSettingsTests.kt | 189 - .../settings/DestinationSettingsTests.kt | 73 - .../transport/AlertingSingleNodeTestCase.kt | 503 -- .../TriggerExpressionParserTests.kt | 76 - .../TriggerExpressionResolverTests.kt | 124 - .../util/AggregationQueryRewriterTests.kt | 335 - .../util/AnomalyDetectionUtilsTests.kt | 162 - .../alerting/util/IndexUtilsTests.kt | 91 - .../CatIndicesWrappersIT.kt | 173 - .../CatShardsWrappersIT.kt | 165 - ...edClusterMetricsSettingsExtensionsTests.kt | 122 - .../DestinationMigrationUtilServiceIT.kt | 104 - alerting/bin/test/plugin-security.policy | 8 - alerting/bin/test/root-ca.pem | 24 - alerting/bin/test/sample.pem | 25 - alerting/bin/test/test-kirk.jks | Bin 4504 -> 0 bytes 228 files changed, 46231 deletions(-) delete mode 100644 alerting/bin/main/DUMMY-FILE delete mode 100644 alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension delete mode 100644 alerting/bin/main/org/opensearch/alerting/AlertService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/InputService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/TriggerService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/WorkflowService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json delete mode 100644 alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt delete mode 100644 alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$1.class delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$2.class delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt.class delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$Builder.class delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$PercolatorFieldType.class delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$TypeParser.class delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt.class delete mode 100644 alerting/bin/main/org/opensearch/percolator/PercolatorPluginExt.class delete mode 100644 alerting/bin/test/esnode-key.pem delete mode 100644 alerting/bin/test/esnode.pem delete mode 100644 alerting/bin/test/kirk-key.pem delete mode 100644 alerting/bin/test/kirk.pem delete mode 100644 alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/AccessRoles.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/MonitorTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/TestHelpers.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt delete mode 100644 alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt delete mode 100644 alerting/bin/test/plugin-security.policy delete mode 100644 alerting/bin/test/root-ca.pem delete mode 100644 alerting/bin/test/sample.pem delete mode 100644 alerting/bin/test/test-kirk.jks diff --git a/alerting/bin/main/DUMMY-FILE b/alerting/bin/main/DUMMY-FILE deleted file mode 100644 index 74623997f..000000000 --- a/alerting/bin/main/DUMMY-FILE +++ /dev/null @@ -1 +0,0 @@ -THIS IS A DUMMY FILE \ No newline at end of file diff --git a/alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension b/alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension deleted file mode 100644 index 3a1412058..000000000 --- a/alerting/bin/main/META-INF/services/org.opensearch.painless.spi.PainlessExtension +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright OpenSearch Contributors -# SPDX-License-Identifier: Apache-2.0 - -org.opensearch.alerting.AlertingPlugin \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/AlertService.kt b/alerting/bin/main/org/opensearch/alerting/AlertService.kt deleted file mode 100644 index 05e35c1b7..000000000 --- a/alerting/bin/main/org/opensearch/alerting/AlertService.kt +++ /dev/null @@ -1,892 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.action.DocWriteRequest -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.ChainedAlertTriggerRunResult -import org.opensearch.alerting.model.QueryLevelTriggerRunResult -import org.opensearch.alerting.opensearchapi.firstFailureOrNull -import org.opensearch.alerting.opensearchapi.retry -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext -import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext -import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext -import org.opensearch.alerting.util.IndexUtils -import org.opensearch.alerting.util.MAX_SEARCH_SIZE -import org.opensearch.alerting.util.getBucketKeysHash -import org.opensearch.alerting.workflow.WorkflowRunContext -import org.opensearch.client.Client -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.alerts.AlertError -import org.opensearch.commons.alerting.model.ActionExecutionResult -import org.opensearch.commons.alerting.model.AggregationResultBucket -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.NoOpTrigger -import org.opensearch.commons.alerting.model.Trigger -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.alerting.model.action.AlertCategory -import org.opensearch.core.action.ActionListener -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.VersionType -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.reindex.BulkByScrollResponse -import org.opensearch.index.reindex.DeleteByQueryAction -import org.opensearch.index.reindex.DeleteByQueryRequestBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.sort.SortOrder -import java.time.Instant -import java.util.UUID -import java.util.concurrent.TimeUnit -import kotlin.coroutines.resume -import kotlin.coroutines.resumeWithException -import kotlin.coroutines.suspendCoroutine - -/** Service that handles CRUD operations for alerts */ -class AlertService( - val client: Client, - val xContentRegistry: NamedXContentRegistry, - val alertIndices: AlertIndices -) { - - companion object { - const val MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT = 500 - const val ERROR_ALERT_ID_PREFIX = "error-alert" - - val ALERTS_SEARCH_TIMEOUT = TimeValue(5, TimeUnit.MINUTES) - } - - private val logger = LogManager.getLogger(AlertService::class.java) - - suspend fun loadCurrentAlertsForWorkflow(workflow: Workflow, dataSources: DataSources): Map { - val searchAlertsResponse: SearchResponse = searchAlerts( - workflow = workflow, - size = workflow.triggers.size * 2, // We expect there to be only a single in-progress alert so fetch 2 to check - dataSources = dataSources - ) - - val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } - .groupBy { it.triggerId } - foundAlerts.values.forEach { alerts -> - if (alerts.size > 1) { - logger.warn("Found multiple alerts for same trigger: $alerts") - } - } - - return workflow.triggers.associateWith { trigger -> - foundAlerts[trigger.id]?.firstOrNull() - } - } - - suspend fun loadCurrentAlertsForQueryLevelMonitor(monitor: Monitor, workflowRunContext: WorkflowRunContext?): Map { - val searchAlertsResponse: SearchResponse = searchAlerts( - monitor = monitor, - size = monitor.triggers.size * 2, // We expect there to be only a single in-progress alert so fetch 2 to check - workflowRunContext - ) - - val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } - .groupBy { it.triggerId } - foundAlerts.values.forEach { alerts -> - if (alerts.size > 1) { - logger.warn("Found multiple alerts for same trigger: $alerts") - } - } - - return monitor.triggers.associateWith { trigger -> - foundAlerts[trigger.id]?.firstOrNull() - } - } - - suspend fun loadCurrentAlertsForBucketLevelMonitor( - monitor: Monitor, - workflowRunContext: WorkflowRunContext?, - ): Map> { - val searchAlertsResponse: SearchResponse = searchAlerts( - monitor = monitor, - // TODO: This should be limited based on a circuit breaker that limits Alerts - size = MAX_BUCKET_LEVEL_MONITOR_ALERT_SEARCH_COUNT, - workflowRunContext = workflowRunContext - ) - - val foundAlerts = searchAlertsResponse.hits.map { Alert.parse(contentParser(it.sourceRef), it.id, it.version) } - .groupBy { it.triggerId } - - return monitor.triggers.associateWith { trigger -> - // Default to an empty map if there are no Alerts found for a Trigger to make Alert categorization logic easier - ( - foundAlerts[trigger.id]?.mapNotNull { alert -> - alert.aggregationResultBucket?.let { it.getBucketKeysHash() to alert } - }?.toMap()?.toMutableMap() ?: mutableMapOf() - ) - } - } - - fun composeQueryLevelAlert( - ctx: QueryLevelTriggerExecutionContext, - result: QueryLevelTriggerRunResult, - alertError: AlertError?, - executionId: String, - workflorwRunContext: WorkflowRunContext? - ): Alert? { - val currentTime = Instant.now() - val currentAlert = ctx.alert - - val updatedActionExecutionResults = mutableListOf() - val currentActionIds = mutableSetOf() - if (currentAlert != null) { - // update current alert's action execution results - for (actionExecutionResult in currentAlert.actionExecutionResults) { - val actionId = actionExecutionResult.actionId - currentActionIds.add(actionId) - val actionRunResult = result.actionResults[actionId] - when { - actionRunResult == null -> updatedActionExecutionResults.add(actionExecutionResult) - actionRunResult.throttled -> - updatedActionExecutionResults.add( - actionExecutionResult.copy( - throttledCount = actionExecutionResult.throttledCount + 1 - ) - ) - else -> updatedActionExecutionResults.add(actionExecutionResult.copy(lastExecutionTime = actionRunResult.executionTime)) - } - } - // add action execution results which not exist in current alert - updatedActionExecutionResults.addAll( - result.actionResults.filter { !currentActionIds.contains(it.key) } - .map { ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) } - ) - } else { - updatedActionExecutionResults.addAll( - result.actionResults.map { - ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) - } - ) - } - - // Merge the alert's error message to the current alert's history - val updatedHistory = currentAlert?.errorHistory.update(alertError) - return if (alertError == null && !result.triggered) { - currentAlert?.copy( - state = Alert.State.COMPLETED, - endTime = currentTime, - errorMessage = null, - errorHistory = updatedHistory, - actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion - ) - } else if (alertError == null && currentAlert?.isAcknowledged() == true) { - null - } else if (currentAlert != null) { - val alertState = if (alertError == null) Alert.State.ACTIVE else Alert.State.ERROR - currentAlert.copy( - state = alertState, - lastNotificationTime = currentTime, - errorMessage = alertError?.message, - errorHistory = updatedHistory, - actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion, - ) - } else { - val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { - Alert.State.AUDIT - } else if (alertError == null) Alert.State.ACTIVE - else Alert.State.ERROR - Alert( - monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, - lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, - errorHistory = updatedHistory, actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion, executionId = executionId, - workflowId = workflorwRunContext?.workflowId ?: "" - ) - } - } - - // TODO: clean this up so it follows the proper alert management for doc monitors - fun composeDocLevelAlert( - findings: List, - relatedDocIds: List, - ctx: DocumentLevelTriggerExecutionContext, - alertError: AlertError?, - executionId: String, - workflorwRunContext: WorkflowRunContext? - ): Alert { - val currentTime = Instant.now() - - val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { - Alert.State.AUDIT - } else if (alertError == null) { - Alert.State.ACTIVE - } else { - Alert.State.ERROR - } - return Alert( - id = UUID.randomUUID().toString(), monitor = ctx.monitor, trigger = ctx.trigger, startTime = currentTime, - lastNotificationTime = currentTime, state = alertState, errorMessage = alertError?.message, - schemaVersion = IndexUtils.alertIndexSchemaVersion, findingIds = findings, relatedDocIds = relatedDocIds, - executionId = executionId, workflowId = workflorwRunContext?.workflowId ?: "" - ) - } - - fun composeMonitorErrorAlert( - id: String, - monitor: Monitor, - alertError: AlertError, - executionId: String?, - workflowRunContext: WorkflowRunContext? - ): Alert { - val currentTime = Instant.now() - val alertState = if (workflowRunContext?.auditDelegateMonitorAlerts == true) { - Alert.State.AUDIT - } else { - Alert.State.ERROR - } - return Alert( - id = id, monitor = monitor, trigger = NoOpTrigger(), startTime = currentTime, - lastNotificationTime = currentTime, state = alertState, errorMessage = alertError.message, - schemaVersion = IndexUtils.alertIndexSchemaVersion, executionId = executionId, workflowId = workflowRunContext?.workflowId ?: "" - ) - } - - fun composeChainedAlert( - ctx: ChainedAlertTriggerExecutionContext, - executionId: String, - workflow: Workflow, - associatedAlertIds: List, - result: ChainedAlertTriggerRunResult, - alertError: AlertError? = null, - ): Alert? { - - val currentTime = Instant.now() - val currentAlert = ctx.alert - - val updatedActionExecutionResults = mutableListOf() - val currentActionIds = mutableSetOf() - if (currentAlert != null) { - // update current alert's action execution results - for (actionExecutionResult in currentAlert.actionExecutionResults) { - val actionId = actionExecutionResult.actionId - currentActionIds.add(actionId) - val actionRunResult = result.actionResults[actionId] - when { - actionRunResult == null -> updatedActionExecutionResults.add(actionExecutionResult) - actionRunResult.throttled -> - updatedActionExecutionResults.add( - actionExecutionResult.copy( - throttledCount = actionExecutionResult.throttledCount + 1 - ) - ) - - else -> updatedActionExecutionResults.add(actionExecutionResult.copy(lastExecutionTime = actionRunResult.executionTime)) - } - } - // add action execution results which not exist in current alert - updatedActionExecutionResults.addAll( - result.actionResults.filter { !currentActionIds.contains(it.key) } - .map { ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) } - ) - } else { - updatedActionExecutionResults.addAll( - result.actionResults.map { - ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) - } - ) - } - - // Merge the alert's error message to the current alert's history - val updatedHistory = currentAlert?.errorHistory.update(alertError) - return if (alertError == null && !result.triggered) { - currentAlert?.copy( - state = Alert.State.COMPLETED, - endTime = currentTime, - errorMessage = null, - errorHistory = updatedHistory, - actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion - ) - } else if (alertError == null && currentAlert?.isAcknowledged() == true) { - null - } else if (currentAlert != null) { - val alertState = Alert.State.ACTIVE - currentAlert.copy( - state = alertState, - lastNotificationTime = currentTime, - errorMessage = alertError?.message, - errorHistory = updatedHistory, - actionExecutionResults = updatedActionExecutionResults, - schemaVersion = IndexUtils.alertIndexSchemaVersion, - ) - } else { - if (alertError == null) Alert.State.ACTIVE - else Alert.State.ERROR - Alert( - startTime = Instant.now(), - lastNotificationTime = currentTime, - state = Alert.State.ACTIVE, - errorMessage = null, schemaVersion = IndexUtils.alertIndexSchemaVersion, - chainedAlertTrigger = ctx.trigger, - executionId = executionId, - workflow = workflow, - associatedAlertIds = associatedAlertIds - ) - } - } - - fun updateActionResultsForBucketLevelAlert( - currentAlert: Alert, - actionResults: Map, - alertError: AlertError? - ): Alert { - val updatedActionExecutionResults = mutableListOf() - val currentActionIds = mutableSetOf() - // Update alert's existing action execution results - for (actionExecutionResult in currentAlert.actionExecutionResults) { - val actionId = actionExecutionResult.actionId - currentActionIds.add(actionId) - val actionRunResult = actionResults[actionId] - when { - actionRunResult == null -> updatedActionExecutionResults.add(actionExecutionResult) - actionRunResult.throttled -> - updatedActionExecutionResults.add( - actionExecutionResult.copy( - throttledCount = actionExecutionResult.throttledCount + 1 - ) - ) - else -> updatedActionExecutionResults.add(actionExecutionResult.copy(lastExecutionTime = actionRunResult.executionTime)) - } - } - - // Add action execution results not currently present in the alert - updatedActionExecutionResults.addAll( - actionResults.filter { !currentActionIds.contains(it.key) } - .map { ActionExecutionResult(it.key, it.value.executionTime, if (it.value.throttled) 1 else 0) } - ) - - val updatedErrorHistory = currentAlert.errorHistory.update(alertError) - return if (alertError == null) { - currentAlert.copy(errorHistory = updatedErrorHistory, actionExecutionResults = updatedActionExecutionResults) - } else { - currentAlert.copy( - state = Alert.State.ERROR, - errorMessage = alertError.message, - errorHistory = updatedErrorHistory, - actionExecutionResults = updatedActionExecutionResults - ) - } - } - - // TODO: Can change the parameters to use ctx: BucketLevelTriggerExecutionContext instead of monitor/trigger and - // result: AggTriggerRunResult for aggResultBuckets - // TODO: Can refactor this method to use Sets instead which can cleanup some of the categorization logic (like getting completed alerts) - fun getCategorizedAlertsForBucketLevelMonitor( - monitor: Monitor, - trigger: BucketLevelTrigger, - currentAlerts: MutableMap, - aggResultBuckets: List, - findings: List, - executionId: String, - workflorwRunContext: WorkflowRunContext? - ): Map> { - val dedupedAlerts = mutableListOf() - val newAlerts = mutableListOf() - val currentTime = Instant.now() - - aggResultBuckets.forEach { aggAlertBucket -> - val currentAlert = currentAlerts[aggAlertBucket.getBucketKeysHash()] - if (currentAlert != null) { - // De-duped Alert - dedupedAlerts.add(currentAlert.copy(aggregationResultBucket = aggAlertBucket)) - - // Remove de-duped Alert from currentAlerts since it is no longer a candidate for a potentially completed Alert - currentAlerts.remove(aggAlertBucket.getBucketKeysHash()) - } else { - // New Alert - val alertState = if (workflorwRunContext?.auditDelegateMonitorAlerts == true) { - Alert.State.AUDIT - } else Alert.State.ACTIVE - val newAlert = Alert( - monitor = monitor, trigger = trigger, startTime = currentTime, - lastNotificationTime = currentTime, state = alertState, errorMessage = null, - errorHistory = mutableListOf(), actionExecutionResults = mutableListOf(), - schemaVersion = IndexUtils.alertIndexSchemaVersion, aggregationResultBucket = aggAlertBucket, - findingIds = findings, executionId = executionId, workflowId = workflorwRunContext?.workflowId ?: "" - ) - newAlerts.add(newAlert) - } - } - - return mapOf( - AlertCategory.DEDUPED to dedupedAlerts, - AlertCategory.NEW to newAlerts - ) - } - - fun convertToCompletedAlerts(currentAlerts: Map?): List { - val currentTime = Instant.now() - return currentAlerts?.map { - it.value.copy( - state = Alert.State.COMPLETED, - endTime = currentTime, - errorMessage = null, - schemaVersion = IndexUtils.alertIndexSchemaVersion - ) - } ?: listOf() - } - - suspend fun upsertMonitorErrorAlert( - monitor: Monitor, - errorMessage: String, - executionId: String?, - workflowRunContext: WorkflowRunContext?, - ) { - val newErrorAlertId = "$ERROR_ALERT_ID_PREFIX-${monitor.id}-${UUID.randomUUID()}" - - val searchRequest = SearchRequest(monitor.dataSources.alertsIndex) - .source( - SearchSourceBuilder() - .sort(Alert.START_TIME_FIELD, SortOrder.DESC) - .query( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitor.id)) - .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) - ) - ) - val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } - - var alert = - composeMonitorErrorAlert(newErrorAlertId, monitor, AlertError(Instant.now(), errorMessage), executionId, workflowRunContext) - - if (searchResponse.hits.totalHits.value > 0L) { - if (searchResponse.hits.totalHits.value > 1L) { - logger.warn("There are [${searchResponse.hits.totalHits.value}] error alerts for monitor [${monitor.id}]") - } - // Deserialize first/latest Alert - val hit = searchResponse.hits.hits[0] - val xcp = contentParser(hit.sourceRef) - val existingErrorAlert = Alert.parse(xcp, hit.id, hit.version) - - val currentTime = Instant.now() - alert = if (alert.errorMessage != existingErrorAlert.errorMessage) { - var newErrorHistory = existingErrorAlert.errorHistory.update( - AlertError(existingErrorAlert.startTime, existingErrorAlert.errorMessage!!) - ) - alert.copy( - id = existingErrorAlert.id, - errorHistory = newErrorHistory, - startTime = currentTime, - lastNotificationTime = currentTime - ) - } else { - existingErrorAlert.copy(lastNotificationTime = currentTime) - } - } - - val alertIndexRequest = IndexRequest(monitor.dataSources.alertsIndex) - .routing(alert.monitorId) - .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) - .opType(DocWriteRequest.OpType.INDEX) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .id(alert.id) - - val indexResponse: IndexResponse = client.suspendUntil { index(alertIndexRequest, it) } - logger.debug("Monitor error Alert successfully upserted. Op result: ${indexResponse.result}") - } - - suspend fun clearMonitorErrorAlert(monitor: Monitor) { - val currentTime = Instant.now() - try { - val searchRequest = SearchRequest("${monitor.dataSources.alertsIndex}") - .source( - SearchSourceBuilder() - .size(MAX_SEARCH_SIZE) - .sort(Alert.START_TIME_FIELD, SortOrder.DESC) - .query( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitor.id)) - .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) - ) - - ) - searchRequest.cancelAfterTimeInterval = ALERTS_SEARCH_TIMEOUT - val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } - // If there's no error alert present, there's nothing to clear. We can stop here. - if (searchResponse.hits.totalHits.value == 0L) { - return - } - - val indexRequests = mutableListOf() - searchResponse.hits.hits.forEach { hit -> - if (searchResponse.hits.totalHits.value > 1L) { - logger.warn("Found [${searchResponse.hits.totalHits.value}] error alerts for monitor [${monitor.id}] while clearing") - } - // Deserialize first/latest Alert - val xcp = contentParser(hit.sourceRef) - val existingErrorAlert = Alert.parse(xcp, hit.id, hit.version) - - val updatedAlert = existingErrorAlert.copy( - endTime = currentTime - ) - - indexRequests += IndexRequest(monitor.dataSources.alertsIndex) - .routing(monitor.id) - .id(updatedAlert.id) - .source(updatedAlert.toXContentWithUser(XContentFactory.jsonBuilder())) - .opType(DocWriteRequest.OpType.INDEX) - } - - val bulkResponse: BulkResponse = client.suspendUntil { - bulk(BulkRequest().add(indexRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) - } - if (bulkResponse.hasFailures()) { - bulkResponse.items.forEach { item -> - if (item.isFailed) { - logger.debug("Failed clearing error alert ${item.id} of monitor [${monitor.id}]") - } - } - } else { - logger.debug("[${bulkResponse.items.size}] Error Alerts successfully cleared. End time set to: $currentTime") - } - } catch (e: Exception) { - logger.error("Error clearing monitor error alerts for monitor [${monitor.id}]: ${ExceptionsHelper.detailedMessage(e)}") - } - } - - /** - * Moves already cleared "error alerts" to history index. - * Error Alert is cleared when endTime timestamp is set, on first successful run after failed run - * */ - suspend fun moveClearedErrorAlertsToHistory(monitorId: String, alertIndex: String, alertHistoryIndex: String) { - try { - val searchRequest = SearchRequest(alertIndex) - .source( - SearchSourceBuilder() - .size(MAX_SEARCH_SIZE) - .query( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) - .must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.ERROR.name)) - .must(QueryBuilders.existsQuery(Alert.END_TIME_FIELD)) - ) - .version(true) // Do we need this? - ) - searchRequest.cancelAfterTimeInterval = ALERTS_SEARCH_TIMEOUT - val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } - - if (searchResponse.hits.totalHits.value == 0L) { - return - } - - // Copy to history index - - val copyRequests = mutableListOf() - - searchResponse.hits.hits.forEach { hit -> - - val xcp = contentParser(hit.sourceRef) - val alert = Alert.parse(xcp, hit.id, hit.version) - - copyRequests.add( - IndexRequest(alertHistoryIndex) - .routing(alert.monitorId) - .source(hit.sourceRef, XContentType.JSON) - .version(hit.version) - .versionType(VersionType.EXTERNAL_GTE) - .id(hit.id) - .timeout(MonitorRunnerService.monitorCtx.indexTimeout) - ) - } - - val bulkResponse: BulkResponse = client.suspendUntil { - bulk(BulkRequest().add(copyRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) - } - if (bulkResponse.hasFailures()) { - bulkResponse.items.forEach { item -> - if (item.isFailed) { - logger.error("Failed copying error alert [${item.id}] to history index [$alertHistoryIndex]") - } - } - return - } - - // Delete from alertIndex - - val alertIds = searchResponse.hits.hits.map { it.id } - - val deleteResponse: BulkByScrollResponse = suspendCoroutine { cont -> - DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) - .source(alertIndex) - .filter(QueryBuilders.termsQuery("_id", alertIds)) - .refresh(true) - .timeout(ALERTS_SEARCH_TIMEOUT) - .execute( - object : ActionListener { - override fun onResponse(response: BulkByScrollResponse) = cont.resume(response) - override fun onFailure(t: Exception) = cont.resumeWithException(t) - } - ) - } - deleteResponse.bulkFailures.forEach { - logger.error("Failed deleting alert while moving cleared alerts: [${it.id}] cause: [${it.cause}] ") - } - } catch (e: Exception) { - logger.error("Failed moving cleared error alerts to history index: ${ExceptionsHelper.detailedMessage(e)}") - } - } - - suspend fun saveAlerts( - dataSources: DataSources, - alerts: List, - retryPolicy: BackoffPolicy, - allowUpdatingAcknowledgedAlert: Boolean = false, - routingId: String // routing is mandatory and set as monitor id. for workflow chained alerts we pass workflow id as routing - ) { - val alertsIndex = dataSources.alertsIndex - val alertsHistoryIndex = dataSources.alertsHistoryIndex - - var requestsToRetry = alerts.flatMap { alert -> - // We don't want to set the version when saving alerts because the MonitorRunner has first priority when writing alerts. - // In the rare event that a user acknowledges an alert between when it's read and when it's written - // back we're ok if that acknowledgement is lost. It's easier to get the user to retry than for the runner to - // spend time reloading the alert and writing it back. - when (alert.state) { - Alert.State.ACTIVE, Alert.State.ERROR -> { - listOf>( - IndexRequest(alertsIndex) - .routing(routingId) - .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) - .id(if (alert.id != Alert.NO_ID) alert.id else null) - ) - } - Alert.State.ACKNOWLEDGED -> { - // Allow ACKNOWLEDGED Alerts to be updated for Bucket-Level Monitors since de-duped Alerts can be ACKNOWLEDGED - // and updated by the MonitorRunner - if (allowUpdatingAcknowledgedAlert) { - listOf>( - IndexRequest(alertsIndex) - .routing(routingId) - .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) - .id(if (alert.id != Alert.NO_ID) alert.id else null) - ) - } else { - throw IllegalStateException("Unexpected attempt to save ${alert.state} alert: $alert") - } - } - Alert.State.AUDIT -> { - val index = if (alertIndices.isAlertHistoryEnabled()) { - dataSources.alertsHistoryIndex - } else dataSources.alertsIndex - listOf>( - IndexRequest(index) - .routing(routingId) - .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) - .id(if (alert.id != Alert.NO_ID) alert.id else null) - ) - } - Alert.State.DELETED -> { - throw IllegalStateException("Unexpected attempt to save ${alert.state} alert: $alert") - } - Alert.State.COMPLETED -> { - listOfNotNull>( - DeleteRequest(alertsIndex, alert.id) - .routing(routingId), - // Only add completed alert to history index if history is enabled - if (alertIndices.isAlertHistoryEnabled()) { - IndexRequest(alertsHistoryIndex) - .routing(routingId) - .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) - .id(alert.id) - } else null - ) - } - } - } - - if (requestsToRetry.isEmpty()) return - // Retry Bulk requests if there was any 429 response - retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkRequest, it) } - val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } - requestsToRetry = failedResponses.filter { it.status() == RestStatus.TOO_MANY_REQUESTS } - .map { bulkRequest.requests()[it.itemId] as IndexRequest } - - if (requestsToRetry.isNotEmpty()) { - val retryCause = failedResponses.first { it.status() == RestStatus.TOO_MANY_REQUESTS }.failure.cause - throw ExceptionsHelper.convertToOpenSearchException(retryCause) - } - } - } - - /** - * This is a separate method created specifically for saving new Alerts during the Bucket-Level Monitor run. - * Alerts are saved in two batches during the execution of an Bucket-Level Monitor, once before the Actions are executed - * and once afterwards. This method saves Alerts to the monitor's alertIndex but returns the same Alerts with their document IDs. - * - * The Alerts are required with their indexed ID so that when the new Alerts are updated after the Action execution, - * the ID is available for the index request so that the existing Alert can be updated, instead of creating a duplicate Alert document. - */ - suspend fun saveNewAlerts(dataSources: DataSources, alerts: List, retryPolicy: BackoffPolicy): List { - val savedAlerts = mutableListOf() - var alertsBeingIndexed = alerts - var requestsToRetry: MutableList = alerts.map { alert -> - if (alert.state != Alert.State.ACTIVE && alert.state != Alert.State.AUDIT) { - throw IllegalStateException("Unexpected attempt to save new alert [$alert] with state [${alert.state}]") - } - if (alert.id != Alert.NO_ID) { - throw IllegalStateException("Unexpected attempt to save new alert [$alert] with an existing alert ID [${alert.id}]") - } - val alertIndex = if (alert.state == Alert.State.AUDIT && alertIndices.isAlertHistoryEnabled()) { - dataSources.alertsHistoryIndex - } else dataSources.alertsIndex - IndexRequest(alertIndex) - .routing(alert.monitorId) - .source(alert.toXContentWithUser(XContentFactory.jsonBuilder())) - }.toMutableList() - - if (requestsToRetry.isEmpty()) return listOf() - - // Retry Bulk requests if there was any 429 response. - // The responses of a bulk request will be in the same order as the individual requests. - // If the index request succeeded for an Alert, the document ID from the response is taken and saved in the Alert. - // If the index request is to be retried, the Alert is saved separately as well so that its relative ordering is maintained in - // relation to index request in the retried bulk request for when it eventually succeeds. - retryPolicy.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkRequest, it) } - // TODO: This is only used to retrieve the retryCause, could instead fetch it from the bulkResponse iteration below - val failedResponses = (bulkResponse.items ?: arrayOf()).filter { it.isFailed } - - requestsToRetry = mutableListOf() - val alertsBeingRetried = mutableListOf() - bulkResponse.items.forEach { item -> - if (item.isFailed) { - // TODO: What if the failure cause was not TOO_MANY_REQUESTS, should these be saved and logged? - if (item.status() == RestStatus.TOO_MANY_REQUESTS) { - requestsToRetry.add(bulkRequest.requests()[item.itemId] as IndexRequest) - alertsBeingRetried.add(alertsBeingIndexed[item.itemId]) - } - } else { - // The ID of the BulkItemResponse in this case is the document ID resulting from the DocWriteRequest operation - savedAlerts.add(alertsBeingIndexed[item.itemId].copy(id = item.id)) - } - } - - alertsBeingIndexed = alertsBeingRetried - - if (requestsToRetry.isNotEmpty()) { - val retryCause = failedResponses.first { it.status() == RestStatus.TOO_MANY_REQUESTS }.failure.cause - throw ExceptionsHelper.convertToOpenSearchException(retryCause) - } - } - - return savedAlerts - } - - private fun contentParser(bytesReference: BytesReference): XContentParser { - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - bytesReference, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - return xcp - } - - /** - * Searches for Alerts in the monitor's alertIndex. - * - * @param monitorId The Monitor to get Alerts for - * @param size The number of search hits (Alerts) to return - */ - private suspend fun searchAlerts(monitor: Monitor, size: Int, workflowRunContext: WorkflowRunContext?): SearchResponse { - val monitorId = monitor.id - val alertIndex = monitor.dataSources.alertsIndex - - val queryBuilder = QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) - if (workflowRunContext != null) { - queryBuilder.must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowRunContext.workflowId)) - } - val searchSourceBuilder = SearchSourceBuilder() - .size(size) - .query(queryBuilder) - - val searchRequest = SearchRequest(alertIndex) - .routing(monitorId) - .source(searchSourceBuilder) - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - if (searchResponse.status() != RestStatus.OK) { - throw (searchResponse.firstFailureOrNull()?.cause ?: RuntimeException("Unknown error loading alerts")) - } - - return searchResponse - } - - /** - * Searches for ACTIVE/ACKNOWLEDGED chained alerts in the workflow's alertIndex. - * - * @param monitorId The Monitor to get Alerts for - * @param size The number of search hits (Alerts) to return - */ - private suspend fun searchAlerts( - workflow: Workflow, - size: Int, - dataSources: DataSources, - ): SearchResponse { - val workflowId = workflow.id - val alertIndex = dataSources.alertsIndex - - val queryBuilder = QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowId)) - .must(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, "")) - val searchSourceBuilder = SearchSourceBuilder() - .size(size) - .query(queryBuilder) - - val searchRequest = SearchRequest(alertIndex) - .routing(workflowId) - .source(searchSourceBuilder) - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - if (searchResponse.status() != RestStatus.OK) { - throw (searchResponse.firstFailureOrNull()?.cause ?: RuntimeException("Unknown error loading alerts")) - } - return searchResponse - } - - private fun List?.update(alertError: AlertError?): List { - return when { - this == null && alertError == null -> emptyList() - this != null && alertError == null -> this - this == null && alertError != null -> listOf(alertError) - this != null && alertError != null -> (listOf(alertError) + this).take(10) - else -> throw IllegalStateException("Unreachable code reached!") - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt b/alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt deleted file mode 100644 index e0d73658f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/AlertingPlugin.kt +++ /dev/null @@ -1,387 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.opensearch.action.ActionRequest -import org.opensearch.alerting.action.ExecuteMonitorAction -import org.opensearch.alerting.action.ExecuteWorkflowAction -import org.opensearch.alerting.action.GetDestinationsAction -import org.opensearch.alerting.action.GetEmailAccountAction -import org.opensearch.alerting.action.GetEmailGroupAction -import org.opensearch.alerting.action.SearchEmailAccountAction -import org.opensearch.alerting.action.SearchEmailGroupAction -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.core.JobSweeper -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.alerting.core.action.node.ScheduledJobsStatsAction -import org.opensearch.alerting.core.action.node.ScheduledJobsStatsTransportAction -import org.opensearch.alerting.core.resthandler.RestScheduledJobStatsHandler -import org.opensearch.alerting.core.schedule.JobScheduler -import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings -import org.opensearch.alerting.core.settings.ScheduledJobSettings -import org.opensearch.alerting.resthandler.RestAcknowledgeAlertAction -import org.opensearch.alerting.resthandler.RestAcknowledgeChainedAlertAction -import org.opensearch.alerting.resthandler.RestDeleteMonitorAction -import org.opensearch.alerting.resthandler.RestDeleteWorkflowAction -import org.opensearch.alerting.resthandler.RestExecuteMonitorAction -import org.opensearch.alerting.resthandler.RestExecuteWorkflowAction -import org.opensearch.alerting.resthandler.RestGetAlertsAction -import org.opensearch.alerting.resthandler.RestGetDestinationsAction -import org.opensearch.alerting.resthandler.RestGetEmailAccountAction -import org.opensearch.alerting.resthandler.RestGetEmailGroupAction -import org.opensearch.alerting.resthandler.RestGetFindingsAction -import org.opensearch.alerting.resthandler.RestGetMonitorAction -import org.opensearch.alerting.resthandler.RestGetWorkflowAction -import org.opensearch.alerting.resthandler.RestGetWorkflowAlertsAction -import org.opensearch.alerting.resthandler.RestIndexMonitorAction -import org.opensearch.alerting.resthandler.RestIndexWorkflowAction -import org.opensearch.alerting.resthandler.RestSearchEmailAccountAction -import org.opensearch.alerting.resthandler.RestSearchEmailGroupAction -import org.opensearch.alerting.resthandler.RestSearchMonitorAction -import org.opensearch.alerting.script.TriggerScript -import org.opensearch.alerting.service.DeleteMonitorService -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.settings.DestinationSettings -import org.opensearch.alerting.settings.LegacyOpenDistroAlertingSettings -import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings -import org.opensearch.alerting.transport.TransportAcknowledgeAlertAction -import org.opensearch.alerting.transport.TransportAcknowledgeChainedAlertAction -import org.opensearch.alerting.transport.TransportDeleteMonitorAction -import org.opensearch.alerting.transport.TransportDeleteWorkflowAction -import org.opensearch.alerting.transport.TransportExecuteMonitorAction -import org.opensearch.alerting.transport.TransportExecuteWorkflowAction -import org.opensearch.alerting.transport.TransportGetAlertsAction -import org.opensearch.alerting.transport.TransportGetDestinationsAction -import org.opensearch.alerting.transport.TransportGetEmailAccountAction -import org.opensearch.alerting.transport.TransportGetEmailGroupAction -import org.opensearch.alerting.transport.TransportGetFindingsSearchAction -import org.opensearch.alerting.transport.TransportGetMonitorAction -import org.opensearch.alerting.transport.TransportGetWorkflowAction -import org.opensearch.alerting.transport.TransportGetWorkflowAlertsAction -import org.opensearch.alerting.transport.TransportIndexMonitorAction -import org.opensearch.alerting.transport.TransportIndexWorkflowAction -import org.opensearch.alerting.transport.TransportSearchEmailAccountAction -import org.opensearch.alerting.transport.TransportSearchEmailGroupAction -import org.opensearch.alerting.transport.TransportSearchMonitorAction -import org.opensearch.alerting.util.DocLevelMonitorQueries -import org.opensearch.alerting.util.destinationmigration.DestinationMigrationCoordinator -import org.opensearch.client.Client -import org.opensearch.cluster.metadata.IndexNameExpressionResolver -import org.opensearch.cluster.node.DiscoveryNodes -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.ClusterSettings -import org.opensearch.common.settings.IndexScopedSettings -import org.opensearch.common.settings.Setting -import org.opensearch.common.settings.Settings -import org.opensearch.common.settings.SettingsFilter -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.ClusterMetricsInput -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.NamedWriteableRegistry -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.env.Environment -import org.opensearch.env.NodeEnvironment -import org.opensearch.index.IndexModule -import org.opensearch.painless.spi.PainlessExtension -import org.opensearch.painless.spi.Whitelist -import org.opensearch.painless.spi.WhitelistLoader -import org.opensearch.percolator.PercolatorPluginExt -import org.opensearch.plugins.ActionPlugin -import org.opensearch.plugins.ReloadablePlugin -import org.opensearch.plugins.ScriptPlugin -import org.opensearch.plugins.SearchPlugin -import org.opensearch.repositories.RepositoriesService -import org.opensearch.rest.RestController -import org.opensearch.rest.RestHandler -import org.opensearch.script.ScriptContext -import org.opensearch.script.ScriptService -import org.opensearch.threadpool.ThreadPool -import org.opensearch.watcher.ResourceWatcherService -import java.util.function.Supplier - -/** - * Entry point of the OpenDistro for Elasticsearch alerting plugin - * This class initializes the [RestGetMonitorAction], [RestDeleteMonitorAction], [RestIndexMonitorAction] rest handlers. - * It also adds [Monitor.XCONTENT_REGISTRY], [SearchInput.XCONTENT_REGISTRY], [QueryLevelTrigger.XCONTENT_REGISTRY], - * [BucketLevelTrigger.XCONTENT_REGISTRY], [ClusterMetricsInput.XCONTENT_REGISTRY] to the [NamedXContentRegistry] so that we are able to deserialize the custom named objects. - */ -internal class AlertingPlugin : PainlessExtension, ActionPlugin, ScriptPlugin, ReloadablePlugin, SearchPlugin, PercolatorPluginExt() { - - override fun getContextWhitelists(): Map, List> { - val whitelist = WhitelistLoader.loadFromResourceFiles(javaClass, "org.opensearch.alerting.txt") - return mapOf(TriggerScript.CONTEXT to listOf(whitelist)) - } - - companion object { - @JvmField val OPEN_SEARCH_DASHBOARDS_USER_AGENT = "OpenSearch-Dashboards" - - @JvmField val UI_METADATA_EXCLUDE = arrayOf("monitor.${Monitor.UI_METADATA_FIELD}") - - @JvmField val MONITOR_BASE_URI = "/_plugins/_alerting/monitors" - @JvmField val WORKFLOW_BASE_URI = "/_plugins/_alerting/workflows" - @JvmField val DESTINATION_BASE_URI = "/_plugins/_alerting/destinations" - - @JvmField val LEGACY_OPENDISTRO_MONITOR_BASE_URI = "/_opendistro/_alerting/monitors" - - @JvmField val LEGACY_OPENDISTRO_DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" - - @JvmField val EMAIL_ACCOUNT_BASE_URI = "$DESTINATION_BASE_URI/email_accounts" - - @JvmField val EMAIL_GROUP_BASE_URI = "$DESTINATION_BASE_URI/email_groups" - - @JvmField val LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_accounts" - - @JvmField val LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI = "$LEGACY_OPENDISTRO_DESTINATION_BASE_URI/email_groups" - - @JvmField val FINDING_BASE_URI = "/_plugins/_alerting/findings" - - @JvmField val ALERTING_JOB_TYPES = listOf("monitor", "workflow") - } - - lateinit var runner: MonitorRunnerService - lateinit var scheduler: JobScheduler - lateinit var sweeper: JobSweeper - lateinit var scheduledJobIndices: ScheduledJobIndices - lateinit var docLevelMonitorQueries: DocLevelMonitorQueries - lateinit var threadPool: ThreadPool - lateinit var alertIndices: AlertIndices - lateinit var clusterService: ClusterService - lateinit var destinationMigrationCoordinator: DestinationMigrationCoordinator - - override fun getRestHandlers( - settings: Settings, - restController: RestController, - clusterSettings: ClusterSettings, - indexScopedSettings: IndexScopedSettings, - settingsFilter: SettingsFilter, - indexNameExpressionResolver: IndexNameExpressionResolver?, - nodesInCluster: Supplier - ): List { - return listOf( - RestGetMonitorAction(), - RestDeleteMonitorAction(), - RestIndexMonitorAction(), - RestIndexWorkflowAction(), - RestSearchMonitorAction(settings, clusterService), - RestExecuteMonitorAction(), - RestExecuteWorkflowAction(), - RestAcknowledgeAlertAction(), - RestAcknowledgeChainedAlertAction(), - RestScheduledJobStatsHandler("_alerting"), - RestSearchEmailAccountAction(), - RestGetEmailAccountAction(), - RestSearchEmailGroupAction(), - RestGetEmailGroupAction(), - RestGetDestinationsAction(), - RestGetAlertsAction(), - RestGetWorkflowAlertsAction(), - RestGetFindingsAction(), - RestGetWorkflowAction(), - RestDeleteWorkflowAction() - ) - } - - override fun getActions(): List> { - return listOf( - ActionPlugin.ActionHandler(ScheduledJobsStatsAction.INSTANCE, ScheduledJobsStatsTransportAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.INDEX_MONITOR_ACTION_TYPE, TransportIndexMonitorAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.GET_MONITOR_ACTION_TYPE, TransportGetMonitorAction::class.java), - ActionPlugin.ActionHandler(ExecuteMonitorAction.INSTANCE, TransportExecuteMonitorAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, TransportSearchMonitorAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.DELETE_MONITOR_ACTION_TYPE, TransportDeleteMonitorAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, TransportAcknowledgeAlertAction::class.java), - ActionPlugin.ActionHandler( - AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, TransportAcknowledgeChainedAlertAction::class.java - ), - ActionPlugin.ActionHandler(GetEmailAccountAction.INSTANCE, TransportGetEmailAccountAction::class.java), - ActionPlugin.ActionHandler(SearchEmailAccountAction.INSTANCE, TransportSearchEmailAccountAction::class.java), - ActionPlugin.ActionHandler(GetEmailGroupAction.INSTANCE, TransportGetEmailGroupAction::class.java), - ActionPlugin.ActionHandler(SearchEmailGroupAction.INSTANCE, TransportSearchEmailGroupAction::class.java), - ActionPlugin.ActionHandler(GetDestinationsAction.INSTANCE, TransportGetDestinationsAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.GET_ALERTS_ACTION_TYPE, TransportGetAlertsAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, TransportGetWorkflowAlertsAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.GET_FINDINGS_ACTION_TYPE, TransportGetFindingsSearchAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, TransportIndexWorkflowAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.GET_WORKFLOW_ACTION_TYPE, TransportGetWorkflowAction::class.java), - ActionPlugin.ActionHandler(AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, TransportDeleteWorkflowAction::class.java), - ActionPlugin.ActionHandler(ExecuteWorkflowAction.INSTANCE, TransportExecuteWorkflowAction::class.java) - ) - } - - override fun getNamedXContent(): List { - return listOf( - Monitor.XCONTENT_REGISTRY, - SearchInput.XCONTENT_REGISTRY, - DocLevelMonitorInput.XCONTENT_REGISTRY, - QueryLevelTrigger.XCONTENT_REGISTRY, - BucketLevelTrigger.XCONTENT_REGISTRY, - ClusterMetricsInput.XCONTENT_REGISTRY, - DocumentLevelTrigger.XCONTENT_REGISTRY, - ChainedAlertTrigger.XCONTENT_REGISTRY, - Workflow.XCONTENT_REGISTRY - ) - } - - override fun createComponents( - client: Client, - clusterService: ClusterService, - threadPool: ThreadPool, - resourceWatcherService: ResourceWatcherService, - scriptService: ScriptService, - xContentRegistry: NamedXContentRegistry, - environment: Environment, - nodeEnvironment: NodeEnvironment, - namedWriteableRegistry: NamedWriteableRegistry, - indexNameExpressionResolver: IndexNameExpressionResolver, - repositoriesServiceSupplier: Supplier - ): Collection { - // Need to figure out how to use the OpenSearch DI classes rather than handwiring things here. - val settings = environment.settings() - alertIndices = AlertIndices(settings, client, threadPool, clusterService) - runner = MonitorRunnerService - .registerClusterService(clusterService) - .registerClient(client) - .registerNamedXContentRegistry(xContentRegistry) - .registerindexNameExpressionResolver(indexNameExpressionResolver) - .registerScriptService(scriptService) - .registerSettings(settings) - .registerThreadPool(threadPool) - .registerAlertIndices(alertIndices) - .registerInputService(InputService(client, scriptService, namedWriteableRegistry, xContentRegistry, clusterService, settings)) - .registerTriggerService(TriggerService(scriptService)) - .registerAlertService(AlertService(client, xContentRegistry, alertIndices)) - .registerDocLevelMonitorQueries(DocLevelMonitorQueries(client, clusterService)) - .registerWorkflowService(WorkflowService(client, xContentRegistry)) - .registerConsumers() - .registerDestinationSettings() - scheduledJobIndices = ScheduledJobIndices(client.admin(), clusterService) - docLevelMonitorQueries = DocLevelMonitorQueries(client, clusterService) - scheduler = JobScheduler(threadPool, runner) - sweeper = JobSweeper(environment.settings(), client, clusterService, threadPool, xContentRegistry, scheduler, ALERTING_JOB_TYPES) - destinationMigrationCoordinator = DestinationMigrationCoordinator(client, clusterService, threadPool, scheduledJobIndices) - this.threadPool = threadPool - this.clusterService = clusterService - - MonitorMetadataService.initialize( - client, - clusterService, - xContentRegistry, - settings - ) - - WorkflowMetadataService.initialize( - client, - clusterService, - xContentRegistry, - settings - ) - - DeleteMonitorService.initialize(client) - - return listOf(sweeper, scheduler, runner, scheduledJobIndices, docLevelMonitorQueries, destinationMigrationCoordinator) - } - - override fun getSettings(): List> { - return listOf( - ScheduledJobSettings.REQUEST_TIMEOUT, - ScheduledJobSettings.SWEEP_BACKOFF_MILLIS, - ScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, - ScheduledJobSettings.SWEEP_PERIOD, - ScheduledJobSettings.SWEEP_PAGE_SIZE, - ScheduledJobSettings.SWEEPER_ENABLED, - LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT, - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, - LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD, - LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, - LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, - AlertingSettings.INPUT_TIMEOUT, - AlertingSettings.INDEX_TIMEOUT, - AlertingSettings.BULK_TIMEOUT, - AlertingSettings.ALERT_BACKOFF_MILLIS, - AlertingSettings.ALERT_BACKOFF_COUNT, - AlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, - AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, - AlertingSettings.ALERT_HISTORY_ENABLED, - AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, - AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, - AlertingSettings.ALERT_HISTORY_MAX_DOCS, - AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, - AlertingSettings.ALERTING_MAX_MONITORS, - AlertingSettings.REQUEST_TIMEOUT, - AlertingSettings.MAX_ACTION_THROTTLE_VALUE, - AlertingSettings.FILTER_BY_BACKEND_ROLES, - AlertingSettings.MAX_ACTIONABLE_ALERT_COUNT, - LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, - LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, - LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, - LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, - LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, - LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, - LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, - DestinationSettings.EMAIL_USERNAME, - DestinationSettings.EMAIL_PASSWORD, - DestinationSettings.ALLOW_LIST, - DestinationSettings.HOST_DENY_LIST, - LegacyOpenDistroDestinationSettings.EMAIL_USERNAME, - LegacyOpenDistroDestinationSettings.EMAIL_PASSWORD, - LegacyOpenDistroDestinationSettings.ALLOW_LIST, - LegacyOpenDistroDestinationSettings.HOST_DENY_LIST, - AlertingSettings.FINDING_HISTORY_ENABLED, - AlertingSettings.FINDING_HISTORY_MAX_DOCS, - AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE, - AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD, - AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD - ) - } - - override fun onIndexModule(indexModule: IndexModule) { - if (indexModule.index.name == ScheduledJob.SCHEDULED_JOBS_INDEX) { - indexModule.addIndexOperationListener(sweeper) - } - } - - override fun getContexts(): List> { - return listOf(TriggerScript.CONTEXT) - } - - override fun reload(settings: Settings) { - runner.reloadDestinationSettings(settings) - } - - override fun getPipelineAggregations(): List { - return listOf( - SearchPlugin.PipelineAggregationSpec( - BucketSelectorExtAggregationBuilder.NAME, - { sin: StreamInput -> BucketSelectorExtAggregationBuilder(sin) }, - { parser: XContentParser, agg_name: String -> - BucketSelectorExtAggregationBuilder.parse(agg_name, parser) - } - ) - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt deleted file mode 100644 index c8adc021c..000000000 --- a/alerting/bin/main/org/opensearch/alerting/BucketLevelMonitorRunner.kt +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.opensearchapi.InjectorContextElement -import org.opensearch.alerting.opensearchapi.retry -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.opensearchapi.withClosableContext -import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext -import org.opensearch.alerting.util.defaultToPerExecutionAction -import org.opensearch.alerting.util.getActionExecutionPolicy -import org.opensearch.alerting.util.getBucketKeysHash -import org.opensearch.alerting.util.getCombinedTriggerRunResult -import org.opensearch.alerting.workflow.WorkflowRunContext -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.action.AlertCategory -import org.opensearch.commons.alerting.model.action.PerAlertActionScope -import org.opensearch.commons.alerting.model.action.PerExecutionActionScope -import org.opensearch.commons.alerting.util.string -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.script.ScriptType -import org.opensearch.script.TemplateScript -import org.opensearch.search.aggregations.AggregatorFactories -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder -import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import java.time.Instant -import java.util.UUID - -object BucketLevelMonitorRunner : MonitorRunner() { - private val logger = LogManager.getLogger(javaClass) - - override suspend fun runMonitor( - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - dryrun: Boolean, - workflowRunContext: WorkflowRunContext?, - executionId: String - ): MonitorRunResult { - val roles = MonitorRunnerService.getRolesForMonitor(monitor) - logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") - - if (periodStart == periodEnd) { - logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") - } - - var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) - val currentAlerts = try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) - monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) - if (monitor.dataSources.findingsEnabled == true) { - monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex(monitor.dataSources) - } - monitorCtx.alertService!!.loadCurrentAlertsForBucketLevelMonitor(monitor, workflowRunContext) - } catch (e: Exception) { - // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts - val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id - logger.error("Error loading alerts for monitor: $id", e) - return monitorResult.copy(error = e) - } - - /* - * Since the aggregation query can consist of multiple pages, each iteration of the do-while loop only has partial results - * from the runBucketLevelTrigger results whereas the currentAlerts has a complete view of existing Alerts. This means that - * it can be confirmed if an Alert is new or de-duped local to the do-while loop if a key appears or doesn't appear in - * the currentAlerts. However, it cannot be guaranteed that an existing Alert is COMPLETED until all pages have been - * iterated over (since a bucket that did not appear in one page of the aggregation results, could appear in a later page). - * - * To solve for this, the currentAlerts will be acting as a list of "potentially completed alerts" throughout the execution. - * When categorizing the Alerts in each iteration, de-duped Alerts will be removed from the currentAlerts map - * (for the Trigger being executed) and the Alerts left in currentAlerts after all pages have been iterated through can - * be marked as COMPLETED since they were never de-duped. - * - * Meanwhile, the nextAlerts map will contain Alerts that will exist at the end of this Monitor execution. It is a compilation - * across Triggers because in the case of executing actions at a PER_EXECUTION frequency, all the Alerts are needed before executing - * Actions which can only be done once all of the aggregation results (and Triggers given the pagination logic) have been evaluated. - */ - val triggerResults = mutableMapOf() - val triggerContexts = mutableMapOf() - val nextAlerts = mutableMapOf>>() - var firstIteration = true - var firstPageOfInputResults = InputRunResults(listOf(), null) - do { - // TODO: Since a composite aggregation is being used for the input query, the total bucket count cannot be determined. - // If a setting is imposed that limits buckets that can be processed for Bucket-Level Monitors, we'd need to iterate over - // the buckets until we hit that threshold. In that case, we'd want to exit the execution without creating any alerts since the - // buckets we iterate over before hitting the limit is not deterministic. Is there a better way to fail faster in this case? - withClosableContext(InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles)) { - // Storing the first page of results in the case of pagination input results to prevent empty results - // in the final output of monitorResult which occurs when all pages have been exhausted. - // If it's favorable to return the last page, will need to check how to accomplish that with multiple aggregation paths - // with different page counts. - val inputResults = monitorCtx.inputService!!.collectInputResults( - monitor, - periodStart, - periodEnd, - monitorResult.inputResults, - workflowRunContext - ) - if (firstIteration) { - firstPageOfInputResults = inputResults - firstIteration = false - } - monitorResult = monitorResult.copy(inputResults = inputResults) - } - - for (trigger in monitor.triggers) { - // The currentAlerts map is formed by iterating over the Monitor's Triggers as keys so null should not be returned here - val currentAlertsForTrigger = currentAlerts[trigger]!! - val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger as BucketLevelTrigger, monitorResult) - triggerContexts[trigger.id] = triggerCtx - val triggerResult = monitorCtx.triggerService!!.runBucketLevelTrigger(monitor, trigger, triggerCtx) - triggerResults[trigger.id] = triggerResult.getCombinedTriggerRunResult(triggerResults[trigger.id]) - - /* - * If an error was encountered when running the trigger, it means that something went wrong when parsing the input results - * for the filtered buckets returned from the pipeline bucket selector injected into the input query. - * - * In this case, the returned aggregation result buckets are empty so the categorization of the Alerts that happens below - * should be skipped/invalidated since comparing the current Alerts to an empty result will lead the execution to believe - * that all Alerts have been COMPLETED. Not doing so would mean it would not be possible to propagate the error into the - * existing Alerts in a way the user can easily view them since they will have all been moved to the history index. - */ - if (triggerResults[trigger.id]?.error != null) continue - val findings = - if (monitor.triggers.size == 1 && monitor.dataSources.findingsEnabled == true) { - logger.debug("Creating bucket level findings") - createFindings( - triggerResult, - monitor, - monitorCtx, - periodStart, - periodEnd, - !dryrun && monitor.id != Monitor.NO_ID, - executionId - ) - } else { - emptyList() - } - // TODO: Should triggerResult's aggregationResultBucket be a list? If not, getCategorizedAlertsForBucketLevelMonitor can - // be refactored to use a map instead - val categorizedAlerts = monitorCtx.alertService!!.getCategorizedAlertsForBucketLevelMonitor( - monitor, - trigger, - currentAlertsForTrigger, - triggerResult.aggregationResultBuckets.values.toList(), - findings, - executionId, - workflowRunContext - ).toMutableMap() - val dedupedAlerts = categorizedAlerts.getOrDefault(AlertCategory.DEDUPED, emptyList()) - var newAlerts = categorizedAlerts.getOrDefault(AlertCategory.NEW, emptyList()) - - /* - * Index de-duped and new Alerts here (if it's not a test Monitor) so they are available at the time the Actions are executed. - * - * The new Alerts have to be returned and saved back with their indexed doc ID to prevent duplicate documents - * when the Alerts are updated again after Action execution. - * - * Note: Index operations can fail for various reasons (such as write blocks on cluster), in such a case, the Actions - * will still execute with the Alert information in the ctx but the Alerts may not be visible. - */ - if (!dryrun && monitor.id != Monitor.NO_ID) { - monitorCtx.alertService!!.saveAlerts( - monitor.dataSources, - dedupedAlerts, - monitorCtx.retryPolicy!!, - allowUpdatingAcknowledgedAlert = true, - monitor.id - ) - newAlerts = monitorCtx.alertService!!.saveNewAlerts(monitor.dataSources, newAlerts, monitorCtx.retryPolicy!!) - } - - // Store deduped and new Alerts to accumulate across pages - if (!nextAlerts.containsKey(trigger.id)) { - nextAlerts[trigger.id] = mutableMapOf( - AlertCategory.DEDUPED to mutableListOf(), - AlertCategory.NEW to mutableListOf(), - AlertCategory.COMPLETED to mutableListOf() - ) - } - nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED)?.addAll(dedupedAlerts) - nextAlerts[trigger.id]?.get(AlertCategory.NEW)?.addAll(newAlerts) - } - } while (monitorResult.inputResults.afterKeysPresent()) - - // The completed Alerts are whatever are left in the currentAlerts. - // However, this operation will only be done if there was no trigger error, since otherwise the nextAlerts were not collected - // in favor of just using the currentAlerts as-is. - currentAlerts.forEach { (trigger, keysToAlertsMap) -> - if (triggerResults[trigger.id]?.error == null) { - nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED) - ?.addAll(monitorCtx.alertService!!.convertToCompletedAlerts(keysToAlertsMap)) - } - } - - for (trigger in monitor.triggers) { - val alertsToUpdate = mutableSetOf() - val completedAlertsToUpdate = mutableSetOf() - // Filter ACKNOWLEDGED Alerts from the deduped list so they do not have Actions executed for them. - // New Alerts are ignored since they cannot be acknowledged yet. - val dedupedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.DEDUPED) - ?.filterNot { it.state == Alert.State.ACKNOWLEDGED }?.toMutableList() - ?: mutableListOf() - // Update nextAlerts so the filtered DEDUPED Alerts are reflected for PER_ALERT Action execution - nextAlerts[trigger.id]?.set(AlertCategory.DEDUPED, dedupedAlerts) - val newAlerts = nextAlerts[trigger.id]?.get(AlertCategory.NEW) ?: mutableListOf() - val completedAlerts = nextAlerts[trigger.id]?.get(AlertCategory.COMPLETED) ?: mutableListOf() - - // Adding all the COMPLETED Alerts to a separate set and removing them if they get added - // to alertsToUpdate to ensure the Alert doc is updated at the end in either case - completedAlertsToUpdate.addAll(completedAlerts) - - // All trigger contexts and results should be available at this point since all triggers were evaluated in the main do-while loop - val triggerCtx = triggerContexts[trigger.id]!! - val triggerResult = triggerResults[trigger.id]!! - val monitorOrTriggerError = monitorResult.error ?: triggerResult.error - val shouldDefaultToPerExecution = defaultToPerExecutionAction( - monitorCtx.maxActionableAlertCount, - monitorId = monitor.id, - triggerId = trigger.id, - totalActionableAlertCount = dedupedAlerts.size + newAlerts.size + completedAlerts.size, - monitorOrTriggerError = monitorOrTriggerError - ) - for (action in trigger.actions) { - // ActionExecutionPolicy should not be null for Bucket-Level Monitors since it has a default config when not set explicitly - val actionExecutionScope = action.getActionExecutionPolicy(monitor)!!.actionExecutionScope - if (actionExecutionScope is PerAlertActionScope && !shouldDefaultToPerExecution) { - for (alertCategory in actionExecutionScope.actionableAlerts) { - val alertsToExecuteActionsFor = nextAlerts[trigger.id]?.get(alertCategory) ?: mutableListOf() - for (alert in alertsToExecuteActionsFor) { - val actionCtx = getActionContextForAlertCategory( - alertCategory, - alert, - triggerCtx, - monitorOrTriggerError - ) - // AggregationResultBucket should not be null here - val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() - if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { - triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() - } - - // Keeping the throttled response separate from runAction for now since - // throttling is not supported for PER_EXECUTION - val actionResult = if (MonitorRunnerService.isActionActionable(action, alert)) { - this.runAction(action, actionCtx, monitorCtx, monitor, dryrun) - } else { - ActionRunResult(action.id, action.name, mapOf(), true, null, null) - } - - triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) - alertsToUpdate.add(alert) - // Remove the alert from completedAlertsToUpdate in case it is present there since - // its update will be handled in the alertsToUpdate batch - completedAlertsToUpdate.remove(alert) - } - } - } else if (actionExecutionScope is PerExecutionActionScope || shouldDefaultToPerExecution) { - // If all categories of Alerts are empty, there is nothing to message on and we can skip the Action. - // If the error is not null, this is disregarded and the Action is executed anyway so the user can be notified. - if (monitorOrTriggerError == null && dedupedAlerts.isEmpty() && newAlerts.isEmpty() && completedAlerts.isEmpty()) { - continue - } - - val actionCtx = triggerCtx.copy( - dedupedAlerts = dedupedAlerts, - newAlerts = newAlerts, - completedAlerts = completedAlerts, - error = monitorResult.error ?: triggerResult.error - ) - val actionResult = this.runAction(action, actionCtx, monitorCtx, monitor, dryrun) - // If there was an error during trigger execution then the Alerts to be updated are the current Alerts since the state - // was not changed. Otherwise, the Alerts to be updated are the sum of the deduped, new and completed Alerts. - val alertsToIterate = if (monitorOrTriggerError == null) { - (dedupedAlerts + newAlerts + completedAlerts) - } else currentAlerts[trigger]?.map { it.value } ?: listOf() - // Save the Action run result for every Alert - for (alert in alertsToIterate) { - val alertBucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() - if (!triggerResult.actionResultsMap.containsKey(alertBucketKeysHash)) { - triggerResult.actionResultsMap[alertBucketKeysHash] = mutableMapOf() - } - triggerResult.actionResultsMap[alertBucketKeysHash]?.set(action.id, actionResult) - alertsToUpdate.add(alert) - // Remove the alert from completedAlertsToUpdate in case it is present there since - // its update will be handled in the alertsToUpdate batch - completedAlertsToUpdate.remove(alert) - } - } - } - - // Alerts are only added to alertsToUpdate after Action execution meaning the action results for it should be present - // in the actionResultsMap but returning a default value when accessing the map to be safe. - val updatedAlerts = alertsToUpdate.map { alert -> - val bucketKeysHash = alert.aggregationResultBucket!!.getBucketKeysHash() - val actionResults = triggerResult.actionResultsMap.getOrDefault(bucketKeysHash, emptyMap()) - monitorCtx.alertService!!.updateActionResultsForBucketLevelAlert( - alert.copy(lastNotificationTime = MonitorRunnerService.currentTime()), - actionResults, - // TODO: Update BucketLevelTriggerRunResult.alertError() to retrieve error based on the first failed Action - monitorResult.alertError() ?: triggerResult.alertError() - ) - } - - // Update Alerts with action execution results (if it's not a test Monitor). - // ACKNOWLEDGED Alerts should not be saved here since actions are not executed for them. - if (!dryrun && monitor.id != Monitor.NO_ID) { - monitorCtx.alertService!!.saveAlerts( - monitor.dataSources, updatedAlerts, monitorCtx.retryPolicy!!, allowUpdatingAcknowledgedAlert = false, - routingId = monitor.id - ) - // Save any COMPLETED Alerts that were not covered in updatedAlerts - monitorCtx.alertService!!.saveAlerts( - monitor.dataSources, - completedAlertsToUpdate.toList(), - monitorCtx.retryPolicy!!, - allowUpdatingAcknowledgedAlert = false, - monitor.id - ) - } - } - - return monitorResult.copy(inputResults = firstPageOfInputResults, triggerResults = triggerResults) - } - - private suspend fun createFindings( - triggerResult: BucketLevelTriggerRunResult, - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - shouldCreateFinding: Boolean, - executionId: String, - ): List { - monitor.inputs.forEach { input -> - if (input is SearchInput) { - val bucketValues: Set = triggerResult.aggregationResultBuckets.keys - val query = input.query - var fieldName = "" - - for (aggFactory in (query.aggregations() as AggregatorFactories.Builder).aggregatorFactories) { - when (aggFactory) { - is CompositeAggregationBuilder -> { - var groupByFields = 0 // if number of fields used to group by > 1 we won't calculate findings - val sources = aggFactory.sources() - for (source in sources) { - if (groupByFields > 0) { - logger.error("grouByFields > 0. not generating findings for bucket level monitor ${monitor.id}") - return listOf() - } - groupByFields++ - fieldName = source.field() - } - } - is TermsAggregationBuilder -> { - fieldName = aggFactory.field() - } - else -> { - logger.error( - "Bucket level monitor findings supported only for composite and term aggs. Found [{${aggFactory.type}}]" - ) - return listOf() - } - } - } - if (fieldName != "") { - val searchParams = mapOf( - "period_start" to periodStart.toEpochMilli(), - "period_end" to periodEnd.toEpochMilli() - ) - val searchSource = monitorCtx.scriptService!!.compile( - Script( - ScriptType.INLINE, - Script.DEFAULT_TEMPLATE_LANG, - query.toString(), - searchParams - ), - TemplateScript.CONTEXT - ) - .newInstance(searchParams) - .execute() - val sr = SearchRequest(*input.indices.toTypedArray()) - XContentType.JSON.xContent().createParser(monitorCtx.xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource) - .use { - val source = SearchSourceBuilder.fromXContent(it) - val queryBuilder = if (input.query.query() == null) BoolQueryBuilder() - else QueryBuilders.boolQuery().must(source.query()) - queryBuilder.filter(QueryBuilders.termsQuery(fieldName, bucketValues)) - sr.source().query(queryBuilder) - } - val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(sr, it) } - return createFindingPerIndex(searchResponse, monitor, monitorCtx, shouldCreateFinding, executionId) - } else { - logger.error("Couldn't resolve groupBy field. Not generating bucket level monitor findings for monitor %${monitor.id}") - } - } - } - return listOf() - } - - private suspend fun createFindingPerIndex( - searchResponse: SearchResponse, - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - shouldCreateFinding: Boolean, - workflowExecutionId: String? = null - ): List { - val docIdsByIndexName: MutableMap> = mutableMapOf() - for (hit in searchResponse.hits.hits) { - val ids = docIdsByIndexName.getOrDefault(hit.index, mutableListOf()) - ids.add(hit.id) - docIdsByIndexName[hit.index] = ids - } - val findings = mutableListOf() - var requestsToRetry: MutableList = mutableListOf() - docIdsByIndexName.entries.forEach { it -> - run { - val finding = Finding( - id = UUID.randomUUID().toString(), - relatedDocIds = it.value, - monitorId = monitor.id, - monitorName = monitor.name, - index = it.key, - timestamp = Instant.now(), - docLevelQueries = listOf(), - executionId = workflowExecutionId - ) - - val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() - logger.debug("Bucket level monitor ${monitor.id} Findings: $findingStr") - if (shouldCreateFinding) { - logger.debug("Saving bucket level monitor findings for monitor ${monitor.id}") - val indexRequest = IndexRequest(monitor.dataSources.findingsIndex) - .source(findingStr, XContentType.JSON) - .id(finding.id) - .routing(finding.id) - requestsToRetry.add(indexRequest) - } - findings.add(finding.id) - } - } - if (requestsToRetry.isEmpty()) return listOf() - monitorCtx.retryPolicy!!.retry(logger, listOf(RestStatus.TOO_MANY_REQUESTS)) { - val bulkRequest = BulkRequest().add(requestsToRetry).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - val bulkResponse: BulkResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.bulk(bulkRequest, it) } - requestsToRetry = mutableListOf() - val findingsBeingRetried = mutableListOf() - bulkResponse.items.forEach { item -> - if (item.isFailed) { - if (item.status() == RestStatus.TOO_MANY_REQUESTS) { - requestsToRetry.add(bulkRequest.requests()[item.itemId] as IndexRequest) - findingsBeingRetried.add(findingsBeingRetried[item.itemId]) - } - } - } - } - return findings - } - - private fun getActionContextForAlertCategory( - alertCategory: AlertCategory, - alert: Alert, - ctx: BucketLevelTriggerExecutionContext, - error: Exception? - ): BucketLevelTriggerExecutionContext { - return when (alertCategory) { - AlertCategory.DEDUPED -> - ctx.copy(dedupedAlerts = listOf(alert), newAlerts = emptyList(), completedAlerts = emptyList(), error = error) - AlertCategory.NEW -> - ctx.copy(dedupedAlerts = emptyList(), newAlerts = listOf(alert), completedAlerts = emptyList(), error = error) - AlertCategory.COMPLETED -> - ctx.copy(dedupedAlerts = emptyList(), newAlerts = emptyList(), completedAlerts = listOf(alert), error = error) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt deleted file mode 100644 index 1eb826622..000000000 --- a/alerting/bin/main/org/opensearch/alerting/DocumentLevelMonitorRunner.kt +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse -import org.opensearch.action.search.SearchAction -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.model.DocumentExecutionContext -import org.opensearch.alerting.model.DocumentLevelTriggerRunResult -import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.MonitorMetadata -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.userErrorMessage -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.script.DocumentLevelTriggerExecutionContext -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.IndexUtils -import org.opensearch.alerting.util.defaultToPerExecutionAction -import org.opensearch.alerting.util.getActionExecutionPolicy -import org.opensearch.alerting.workflow.WorkflowRunContext -import org.opensearch.client.Client -import org.opensearch.client.node.NodeClient -import org.opensearch.cluster.metadata.IndexMetadata -import org.opensearch.cluster.routing.Preference -import org.opensearch.cluster.routing.ShardRouting -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.AlertingPluginInterface -import org.opensearch.commons.alerting.action.PublishFindingsRequest -import org.opensearch.commons.alerting.action.SubscribeFindingsResponse -import org.opensearch.commons.alerting.model.ActionExecutionResult -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.action.PerAlertActionScope -import org.opensearch.commons.alerting.util.string -import org.opensearch.core.action.ActionListener -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.Operator -import org.opensearch.index.query.QueryBuilders -import org.opensearch.percolator.PercolateQueryBuilderExt -import org.opensearch.search.SearchHits -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.sort.SortOrder -import java.io.IOException -import java.time.Instant -import java.util.UUID -import kotlin.math.max - -object DocumentLevelMonitorRunner : MonitorRunner() { - private val logger = LogManager.getLogger(javaClass) - - override suspend fun runMonitor( - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - dryrun: Boolean, - workflowRunContext: WorkflowRunContext?, - executionId: String - ): MonitorRunResult { - logger.debug("Document-level-monitor is running ...") - val isTempMonitor = dryrun || monitor.id == Monitor.NO_ID - var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) - - try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) - monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) - monitorCtx.alertIndices!!.createOrUpdateInitialFindingHistoryIndex(monitor.dataSources) - } catch (e: Exception) { - val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id - logger.error("Error setting up alerts and findings indices for monitor: $id", e) - monitorResult = monitorResult.copy(error = AlertingException.wrap(e)) - } - - try { - validate(monitor) - } catch (e: Exception) { - logger.error("Failed to start Document-level-monitor. Error: ${e.message}") - monitorResult = monitorResult.copy(error = AlertingException.wrap(e)) - } - - var (monitorMetadata, _) = MonitorMetadataService.getOrCreateMetadata( - monitor = monitor, - createWithRunContext = false, - skipIndex = isTempMonitor, - workflowRunContext?.workflowMetadataId - ) - - val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput - - val queries: List = docLevelMonitorInput.queries - - val lastRunContext = if (monitorMetadata.lastRunContext.isNullOrEmpty()) mutableMapOf() - else monitorMetadata.lastRunContext.toMutableMap() as MutableMap> - - val updatedLastRunContext = lastRunContext.toMutableMap() - - val queryToDocIds = mutableMapOf>() - val inputRunResults = mutableMapOf>() - val docsToQueries = mutableMapOf>() - - try { - // Resolve all passed indices to concrete indices - val allConcreteIndices = IndexUtils.resolveAllIndices( - docLevelMonitorInput.indices, - monitorCtx.clusterService!!, - monitorCtx.indexNameExpressionResolver!! - ) - if (allConcreteIndices.isEmpty()) { - logger.error("indices not found-${docLevelMonitorInput.indices.joinToString(",")}") - throw IndexNotFoundException(docLevelMonitorInput.indices.joinToString(",")) - } - - monitorCtx.docLevelMonitorQueries!!.initDocLevelQueryIndex(monitor.dataSources) - monitorCtx.docLevelMonitorQueries!!.indexDocLevelQueries( - monitor = monitor, - monitorId = monitor.id, - monitorMetadata, - indexTimeout = monitorCtx.indexTimeout!! - ) - - // cleanup old indices that are not monitored anymore from the same monitor - val runContextKeys = updatedLastRunContext.keys.toMutableSet() - for (ind in runContextKeys) { - if (!allConcreteIndices.contains(ind)) { - updatedLastRunContext.remove(ind) - } - } - - // Map of document ids per index when monitor is workflow delegate and has chained findings - val matchingDocIdsPerIndex = workflowRunContext?.matchingDocIdsPerIndex - - docLevelMonitorInput.indices.forEach { indexName -> - var concreteIndices = IndexUtils.resolveAllIndices( - listOf(indexName), - monitorCtx.clusterService!!, - monitorCtx.indexNameExpressionResolver!! - ) - var lastWriteIndex: String? = null - if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || - IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) - ) { - lastWriteIndex = concreteIndices.find { lastRunContext.containsKey(it) } - if (lastWriteIndex != null) { - val lastWriteIndexCreationDate = - IndexUtils.getCreationDateForIndex(lastWriteIndex, monitorCtx.clusterService!!.state()) - concreteIndices = IndexUtils.getNewestIndicesByCreationDate( - concreteIndices, - monitorCtx.clusterService!!.state(), - lastWriteIndexCreationDate - ) - } - } - val updatedIndexName = indexName.replace("*", "_") - val conflictingFields = monitorCtx.docLevelMonitorQueries!!.getAllConflictingFields( - monitorCtx.clusterService!!.state(), - concreteIndices - ) - - concreteIndices.forEach { concreteIndexName -> - // Prepare lastRunContext for each index - val indexLastRunContext = lastRunContext.getOrPut(concreteIndexName) { - val isIndexCreatedRecently = createdRecently( - monitor, - periodStart, - periodEnd, - monitorCtx.clusterService!!.state().metadata.index(concreteIndexName) - ) - MonitorMetadataService.createRunContextForIndex(concreteIndexName, isIndexCreatedRecently) - } - - // Prepare updatedLastRunContext for each index - val indexUpdatedRunContext = updateLastRunContext( - indexLastRunContext.toMutableMap(), - monitorCtx, - concreteIndexName - ) as MutableMap - if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || - IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) - ) { - if (concreteIndexName == IndexUtils.getWriteIndex(indexName, monitorCtx.clusterService!!.state())) { - updatedLastRunContext.remove(lastWriteIndex) - updatedLastRunContext[concreteIndexName] = indexUpdatedRunContext - } - } else { - updatedLastRunContext[concreteIndexName] = indexUpdatedRunContext - } - - val count: Int = indexLastRunContext["shards_count"] as Int - for (i: Int in 0 until count) { - val shard = i.toString() - - // update lastRunContext if its a temp monitor as we only want to view the last bit of data then - // TODO: If dryrun, we should make it so we limit the search as this could still potentially give us lots of data - if (isTempMonitor) { - indexLastRunContext[shard] = max(-1, (indexUpdatedRunContext[shard] as String).toInt() - 10) - } - } - - // Prepare DocumentExecutionContext for each index - val docExecutionContext = DocumentExecutionContext(queries, indexLastRunContext, indexUpdatedRunContext) - - val matchingDocs = getMatchingDocs( - monitor, - monitorCtx, - docExecutionContext, - updatedIndexName, - concreteIndexName, - conflictingFields.toList(), - matchingDocIdsPerIndex?.get(concreteIndexName) - ) - - if (matchingDocs.isNotEmpty()) { - val matchedQueriesForDocs = getMatchedQueries( - monitorCtx, - matchingDocs.map { it.second }, - monitor, - monitorMetadata, - updatedIndexName, - concreteIndexName - ) - - matchedQueriesForDocs.forEach { hit -> - val id = hit.id - .replace("_${updatedIndexName}_${monitor.id}", "") - .replace("_${concreteIndexName}_${monitor.id}", "") - - val docIndices = hit.field("_percolator_document_slot").values.map { it.toString().toInt() } - docIndices.forEach { idx -> - val docIndex = "${matchingDocs[idx].first}|$concreteIndexName" - inputRunResults.getOrPut(id) { mutableSetOf() }.add(docIndex) - docsToQueries.getOrPut(docIndex) { mutableListOf() }.add(id) - } - } - } - } - } - monitorResult = monitorResult.copy(inputResults = InputRunResults(listOf(inputRunResults))) - - /* - populate the map queryToDocIds with pairs of - this fixes the issue of passing id, name, tags fields of DocLevelQuery object correctly to TriggerExpressionParser - */ - queries.forEach { - if (inputRunResults.containsKey(it.id)) { - queryToDocIds[it] = inputRunResults[it.id]!! - } - } - - val idQueryMap: Map = queries.associateBy { it.id } - - val triggerResults = mutableMapOf() - // If there are no triggers defined, we still want to generate findings - if (monitor.triggers.isEmpty()) { - if (dryrun == false && monitor.id != Monitor.NO_ID) { - docsToQueries.forEach { - val triggeredQueries = it.value.map { queryId -> idQueryMap[queryId]!! } - createFindings(monitor, monitorCtx, triggeredQueries, it.key, true) - } - } - } else { - monitor.triggers.forEach { - triggerResults[it.id] = runForEachDocTrigger( - monitorCtx, - monitorResult, - it as DocumentLevelTrigger, - monitor, - idQueryMap, - docsToQueries, - queryToDocIds, - dryrun, - executionId = executionId, - workflowRunContext = workflowRunContext - ) - } - } - // Don't update monitor if this is a test monitor - if (!isTempMonitor) { - // If any error happened during trigger execution, upsert monitor error alert - val errorMessage = constructErrorMessageFromTriggerResults(triggerResults = triggerResults) - if (errorMessage.isNotEmpty()) { - monitorCtx.alertService!!.upsertMonitorErrorAlert( - monitor = monitor, - errorMessage = errorMessage, - executionId = executionId, - workflowRunContext - ) - } else { - onSuccessfulMonitorRun(monitorCtx, monitor) - } - - MonitorMetadataService.upsertMetadata( - monitorMetadata.copy(lastRunContext = updatedLastRunContext), - true - ) - } - - // TODO: Update the Document as part of the Trigger and return back the trigger action result - return monitorResult.copy(triggerResults = triggerResults) - } catch (e: Exception) { - val errorMessage = ExceptionsHelper.detailedMessage(e) - monitorCtx.alertService!!.upsertMonitorErrorAlert(monitor, errorMessage, executionId, workflowRunContext) - logger.error("Failed running Document-level-monitor ${monitor.name}", e) - val alertingException = AlertingException( - errorMessage, - RestStatus.INTERNAL_SERVER_ERROR, - e - ) - return monitorResult.copy(error = alertingException, inputResults = InputRunResults(emptyList(), alertingException)) - } - } - - private suspend fun onSuccessfulMonitorRun(monitorCtx: MonitorRunnerExecutionContext, monitor: Monitor) { - monitorCtx.alertService!!.clearMonitorErrorAlert(monitor) - if (monitor.dataSources.alertsHistoryIndex != null) { - monitorCtx.alertService!!.moveClearedErrorAlertsToHistory( - monitor.id, - monitor.dataSources.alertsIndex, - monitor.dataSources.alertsHistoryIndex!! - ) - } - } - - private fun constructErrorMessageFromTriggerResults( - triggerResults: MutableMap? = null - ): String { - var errorMessage = "" - if (triggerResults != null) { - val triggersErrorBuilder = StringBuilder() - triggerResults.forEach { - if (it.value.error != null) { - triggersErrorBuilder.append("[${it.key}]: [${it.value.error!!.userErrorMessage()}]").append(" | ") - } - } - if (triggersErrorBuilder.isNotEmpty()) { - errorMessage = "Trigger errors: $triggersErrorBuilder" - } - } - return errorMessage - } - - private suspend fun runForEachDocTrigger( - monitorCtx: MonitorRunnerExecutionContext, - monitorResult: MonitorRunResult, - trigger: DocumentLevelTrigger, - monitor: Monitor, - idQueryMap: Map, - docsToQueries: Map>, - queryToDocIds: Map>, - dryrun: Boolean, - workflowRunContext: WorkflowRunContext?, - executionId: String - ): DocumentLevelTriggerRunResult { - val triggerCtx = DocumentLevelTriggerExecutionContext(monitor, trigger) - val triggerResult = monitorCtx.triggerService!!.runDocLevelTrigger(monitor, trigger, queryToDocIds) - - val findings = mutableListOf() - val findingDocPairs = mutableListOf>() - - // TODO: Implement throttling for findings - docsToQueries.forEach { - val triggeredQueries = it.value.map { queryId -> idQueryMap[queryId]!! } - val findingId = createFindings( - monitor, - monitorCtx, - triggeredQueries, - it.key, - !dryrun && monitor.id != Monitor.NO_ID, - executionId - ) - findings.add(findingId) - - if (triggerResult.triggeredDocs.contains(it.key)) { - findingDocPairs.add(Pair(findingId, it.key)) - } - } - - val actionCtx = triggerCtx.copy( - triggeredDocs = triggerResult.triggeredDocs, - relatedFindings = findings, - error = monitorResult.error ?: triggerResult.error - ) - - val alerts = mutableListOf() - findingDocPairs.forEach { - val alert = monitorCtx.alertService!!.composeDocLevelAlert( - listOf(it.first), - listOf(it.second), - triggerCtx, - monitorResult.alertError() ?: triggerResult.alertError(), - executionId = executionId, - workflorwRunContext = workflowRunContext - ) - alerts.add(alert) - } - - val shouldDefaultToPerExecution = defaultToPerExecutionAction( - monitorCtx.maxActionableAlertCount, - monitorId = monitor.id, - triggerId = trigger.id, - totalActionableAlertCount = alerts.size, - monitorOrTriggerError = actionCtx.error - ) - - for (action in trigger.actions) { - val actionExecutionScope = action.getActionExecutionPolicy(monitor)!!.actionExecutionScope - if (actionExecutionScope is PerAlertActionScope && !shouldDefaultToPerExecution) { - for (alert in alerts) { - val actionResults = this.runAction(action, actionCtx.copy(alerts = listOf(alert)), monitorCtx, monitor, dryrun) - triggerResult.actionResultsMap.getOrPut(alert.id) { mutableMapOf() } - triggerResult.actionResultsMap[alert.id]?.set(action.id, actionResults) - } - } else if (alerts.isNotEmpty()) { - val actionResults = this.runAction(action, actionCtx.copy(alerts = alerts), monitorCtx, monitor, dryrun) - for (alert in alerts) { - triggerResult.actionResultsMap.getOrPut(alert.id) { mutableMapOf() } - triggerResult.actionResultsMap[alert.id]?.set(action.id, actionResults) - } - } - } - - // Alerts are saved after the actions since if there are failures in the actions, they can be stated in the alert - if (!dryrun && monitor.id != Monitor.NO_ID) { - val updatedAlerts = alerts.map { alert -> - val actionResults = triggerResult.actionResultsMap.getOrDefault(alert.id, emptyMap()) - val actionExecutionResults = actionResults.values.map { actionRunResult -> - ActionExecutionResult(actionRunResult.actionId, actionRunResult.executionTime, if (actionRunResult.throttled) 1 else 0) - } - alert.copy(actionExecutionResults = actionExecutionResults) - } - - monitorCtx.retryPolicy?.let { - monitorCtx.alertService!!.saveAlerts( - monitor.dataSources, - updatedAlerts, - it, - routingId = monitor.id - ) - } - } - return triggerResult - } - - private suspend fun createFindings( - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - docLevelQueries: List, - matchingDocId: String, - shouldCreateFinding: Boolean, - workflowExecutionId: String? = null, - ): String { - // Before the "|" is the doc id and after the "|" is the index - val docIndex = matchingDocId.split("|") - - val finding = Finding( - id = UUID.randomUUID().toString(), - relatedDocIds = listOf(docIndex[0]), - correlatedDocIds = listOf(docIndex[0]), - monitorId = monitor.id, - monitorName = monitor.name, - index = docIndex[1], - docLevelQueries = docLevelQueries, - timestamp = Instant.now(), - executionId = workflowExecutionId - ) - - val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() - logger.debug("Findings: $findingStr") - - if (shouldCreateFinding) { - val indexRequest = IndexRequest(monitor.dataSources.findingsIndex) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(findingStr, XContentType.JSON) - .id(finding.id) - .routing(finding.id) - - monitorCtx.client!!.suspendUntil { - monitorCtx.client!!.index(indexRequest, it) - } - } - - try { - publishFinding(monitor, monitorCtx, finding) - } catch (e: Exception) { - // suppress exception - logger.error("Optional finding callback failed", e) - } - return finding.id - } - - private fun publishFinding( - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - finding: Finding - ) { - val publishFindingsRequest = PublishFindingsRequest(monitor.id, finding) - AlertingPluginInterface.publishFinding( - monitorCtx.client!! as NodeClient, - publishFindingsRequest, - object : ActionListener { - override fun onResponse(response: SubscribeFindingsResponse) {} - - override fun onFailure(e: Exception) {} - } - ) - } - - private suspend fun updateLastRunContext( - lastRunContext: Map, - monitorCtx: MonitorRunnerExecutionContext, - index: String - ): Map { - val count: Int = getShardsCount(monitorCtx.clusterService!!, index) - val updatedLastRunContext = lastRunContext.toMutableMap() - for (i: Int in 0 until count) { - val shard = i.toString() - val maxSeqNo: Long = getMaxSeqNo(monitorCtx.client!!, index, shard) - updatedLastRunContext[shard] = maxSeqNo.toString() - } - return updatedLastRunContext - } - - private fun validate(monitor: Monitor) { - if (monitor.inputs.size > 1) { - throw IOException("Only one input is supported with document-level-monitor.") - } - - if (monitor.inputs[0].name() != DocLevelMonitorInput.DOC_LEVEL_INPUT_FIELD) { - throw IOException("Invalid input with document-level-monitor.") - } - - if ((monitor.inputs[0] as DocLevelMonitorInput).indices.isEmpty()) { - throw IllegalArgumentException("DocLevelMonitorInput has no indices") - } - } - - // Checks if the index was created from the last execution run or when the monitor was last updated to ensure that - // new index is monitored from the beginning of that index - private fun createdRecently( - monitor: Monitor, - periodStart: Instant, - periodEnd: Instant, - indexMetadata: IndexMetadata - ): Boolean { - val lastExecutionTime = if (periodStart == periodEnd) monitor.lastUpdateTime else periodStart - val indexCreationDate = indexMetadata.settings.get("index.creation_date")?.toLong() ?: 0L - return indexCreationDate > lastExecutionTime.toEpochMilli() - } - - /** - * Get the current max seq number of the shard. We find it by searching the last document - * in the primary shard. - */ - private suspend fun getMaxSeqNo(client: Client, index: String, shard: String): Long { - val request: SearchRequest = SearchRequest() - .indices(index) - .preference("_shards:$shard") - .source( - SearchSourceBuilder() - .version(true) - .sort("_seq_no", SortOrder.DESC) - .seqNoAndPrimaryTerm(true) - .query(QueryBuilders.matchAllQuery()) - .size(1) - ) - val response: SearchResponse = client.suspendUntil { client.search(request, it) } - if (response.status() !== RestStatus.OK) { - throw IOException("Failed to get max seq no for shard: $shard") - } - if (response.hits.hits.isEmpty()) { - return -1L - } - - return response.hits.hits[0].seqNo - } - - private fun getShardsCount(clusterService: ClusterService, index: String): Int { - val allShards: List = clusterService!!.state().routingTable().allShards(index) - return allShards.filter { it.primary() }.size - } - - private suspend fun getMatchingDocs( - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - docExecutionCtx: DocumentExecutionContext, - index: String, - concreteIndex: String, - conflictingFields: List, - docIds: List? = null - ): List> { - val count: Int = docExecutionCtx.updatedLastRunContext["shards_count"] as Int - val matchingDocs = mutableListOf>() - for (i: Int in 0 until count) { - val shard = i.toString() - try { - val maxSeqNo: Long = docExecutionCtx.updatedLastRunContext[shard].toString().toLong() - val prevSeqNo = docExecutionCtx.lastRunContext[shard].toString().toLongOrNull() - - val hits: SearchHits = searchShard( - monitorCtx, - concreteIndex, - shard, - prevSeqNo, - maxSeqNo, - null, - docIds - ) - - if (hits.hits.isNotEmpty()) { - matchingDocs.addAll(getAllDocs(hits, index, concreteIndex, monitor.id, conflictingFields)) - } - } catch (e: Exception) { - logger.warn("Failed to run for shard $shard. Error: ${e.message}") - } - } - return matchingDocs - } - - private suspend fun searchShard( - monitorCtx: MonitorRunnerExecutionContext, - index: String, - shard: String, - prevSeqNo: Long?, - maxSeqNo: Long, - query: String?, - docIds: List? = null - ): SearchHits { - if (prevSeqNo?.equals(maxSeqNo) == true && maxSeqNo != 0L) { - return SearchHits.empty() - } - val boolQueryBuilder = BoolQueryBuilder() - boolQueryBuilder.filter(QueryBuilders.rangeQuery("_seq_no").gt(prevSeqNo).lte(maxSeqNo)) - - if (query != null) { - boolQueryBuilder.must(QueryBuilders.queryStringQuery(query)) - } - - if (!docIds.isNullOrEmpty()) { - boolQueryBuilder.filter(QueryBuilders.termsQuery("_id", docIds)) - } - - val request: SearchRequest = SearchRequest() - .indices(index) - .preference("_shards:$shard") - .source( - SearchSourceBuilder() - .version(true) - .query(boolQueryBuilder) - .size(10000) // fixme: make this configurable. - ) - .preference(Preference.PRIMARY_FIRST.type()) - val response: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(request, it) } - if (response.status() !== RestStatus.OK) { - throw IOException("Failed to search shard: $shard") - } - return response.hits - } - - private suspend fun getMatchedQueries( - monitorCtx: MonitorRunnerExecutionContext, - docs: List, - monitor: Monitor, - monitorMetadata: MonitorMetadata, - index: String, - concreteIndex: String - ): SearchHits { - val boolQueryBuilder = BoolQueryBuilder().must(QueryBuilders.matchQuery("index", index).operator(Operator.AND)) - - val percolateQueryBuilder = PercolateQueryBuilderExt("query", docs, XContentType.JSON) - if (monitor.id.isNotEmpty()) { - boolQueryBuilder.must(QueryBuilders.matchQuery("monitor_id", monitor.id).operator(Operator.AND)) - } - boolQueryBuilder.filter(percolateQueryBuilder) - - val queryIndex = monitorMetadata.sourceToQueryIndexMapping[index + monitor.id] - if (queryIndex == null) { - val message = "Failed to resolve concrete queryIndex from sourceIndex during monitor execution!" + - " sourceIndex:$concreteIndex queryIndex:${monitor.dataSources.queryIndex}" - logger.error(message) - throw AlertingException.wrap( - OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) - ) - } - val searchRequest = SearchRequest(queryIndex).preference(Preference.PRIMARY_FIRST.type()) - val searchSourceBuilder = SearchSourceBuilder() - searchSourceBuilder.query(boolQueryBuilder) - searchRequest.source(searchSourceBuilder) - - var response: SearchResponse - try { - response = monitorCtx.client!!.suspendUntil { - monitorCtx.client!!.execute(SearchAction.INSTANCE, searchRequest, it) - } - } catch (e: Exception) { - throw IllegalStateException( - "Failed to run percolate search for sourceIndex [$index] and queryIndex [$queryIndex] for ${docs.size} document(s)", e - ) - } - - if (response.status() !== RestStatus.OK) { - throw IOException("Failed to search percolate index: $queryIndex") - } - return response.hits - } - - private fun getAllDocs( - hits: SearchHits, - index: String, - concreteIndex: String, - monitorId: String, - conflictingFields: List - ): List> { - return hits.map { hit -> - val sourceMap = hit.sourceAsMap - - transformDocumentFieldNames( - sourceMap, - conflictingFields, - "_${index}_$monitorId", - "_${concreteIndex}_$monitorId", - "" - ) - - var xContentBuilder = XContentFactory.jsonBuilder().map(sourceMap) - - val sourceRef = BytesReference.bytes(xContentBuilder) - - logger.debug("Document [${hit.id}] payload after transform: ", sourceRef.utf8ToString()) - - Pair(hit.id, sourceRef) - } - } - - /** - * Traverses document fields in leaves recursively and appends [fieldNameSuffixIndex] to field names with same names - * but different mappings & [fieldNameSuffixPattern] to field names which have unique names. - * - * Example for index name is my_log_index and Monitor ID is TReewWdsf2gdJFV: - * { { - * "a": { "a": { - * "b": 1234 ----> "b_my_log_index_TReewWdsf2gdJFV": 1234 - * } } - * } - * - * @param jsonAsMap Input JSON (as Map) - * @param fieldNameSuffix Field suffix which is appended to existing field name - */ - private fun transformDocumentFieldNames( - jsonAsMap: MutableMap, - conflictingFields: List, - fieldNameSuffixPattern: String, - fieldNameSuffixIndex: String, - fieldNamePrefix: String - ) { - val tempMap = mutableMapOf() - val it: MutableIterator> = jsonAsMap.entries.iterator() - while (it.hasNext()) { - val entry = it.next() - if (entry.value is Map<*, *>) { - transformDocumentFieldNames( - entry.value as MutableMap, - conflictingFields, - fieldNameSuffixPattern, - fieldNameSuffixIndex, - if (fieldNamePrefix == "") entry.key else "$fieldNamePrefix.${entry.key}" - ) - } else if (!entry.key.endsWith(fieldNameSuffixPattern) && !entry.key.endsWith(fieldNameSuffixIndex)) { - var alreadyReplaced = false - conflictingFields.forEach { conflictingField -> - if (conflictingField == "$fieldNamePrefix.${entry.key}" || (fieldNamePrefix == "" && conflictingField == entry.key)) { - tempMap["${entry.key}$fieldNameSuffixIndex"] = entry.value - it.remove() - alreadyReplaced = true - } - } - if (!alreadyReplaced) { - tempMap["${entry.key}$fieldNameSuffixPattern"] = entry.value - it.remove() - } - } - } - jsonAsMap.putAll(tempMap) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/InputService.kt b/alerting/bin/main/org/opensearch/alerting/InputService.kt deleted file mode 100644 index b31e21d5f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/InputService.kt +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.TriggerAfterKey -import org.opensearch.alerting.opensearchapi.convertToMap -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.util.AggregationQueryRewriter -import org.opensearch.alerting.util.addUserBackendRolesFilter -import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.executeTransportAction -import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.toMap -import org.opensearch.alerting.util.getRoleFilterEnabled -import org.opensearch.alerting.workflow.WorkflowRunContext -import org.opensearch.client.Client -import org.opensearch.cluster.routing.Preference -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ClusterMetricsInput -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput -import org.opensearch.core.common.io.stream.NamedWriteableRegistry -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.MatchQueryBuilder -import org.opensearch.index.query.QueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.query.TermsQueryBuilder -import org.opensearch.script.Script -import org.opensearch.script.ScriptService -import org.opensearch.script.ScriptType -import org.opensearch.script.TemplateScript -import org.opensearch.search.builder.SearchSourceBuilder -import java.time.Instant - -/** Service that handles the collection of input results for Monitor executions */ -class InputService( - val client: Client, - val scriptService: ScriptService, - val namedWriteableRegistry: NamedWriteableRegistry, - val xContentRegistry: NamedXContentRegistry, - val clusterService: ClusterService, - val settings: Settings -) { - - private val logger = LogManager.getLogger(InputService::class.java) - - suspend fun collectInputResults( - monitor: Monitor, - periodStart: Instant, - periodEnd: Instant, - prevResult: InputRunResults? = null, - workflowRunContext: WorkflowRunContext? = null - ): InputRunResults { - return try { - val results = mutableListOf>() - val aggTriggerAfterKey: MutableMap = mutableMapOf() - - // If monitor execution is triggered from a workflow - val matchingDocIdsPerIndex = workflowRunContext?.matchingDocIdsPerIndex - - // TODO: If/when multiple input queries are supported for Bucket-Level Monitor execution, aggTriggerAfterKeys will - // need to be updated to account for it - monitor.inputs.forEach { input -> - when (input) { - is SearchInput -> { - // TODO: Figure out a way to use SearchTemplateRequest without bringing in the entire TransportClient - val searchParams = mapOf( - "period_start" to periodStart.toEpochMilli(), - "period_end" to periodEnd.toEpochMilli() - ) - - // Deep copying query before passing it to rewriteQuery since otherwise, the monitor.input is modified directly - // which causes a strange bug where the rewritten query persists on the Monitor across executions - val rewrittenQuery = AggregationQueryRewriter.rewriteQuery(deepCopyQuery(input.query), prevResult, monitor.triggers) - - // Rewrite query to consider the doc ids per given index - if (chainedFindingExist(matchingDocIdsPerIndex) && rewrittenQuery.query() != null) { - val updatedSourceQuery = updateInputQueryWithFindingDocIds(rewrittenQuery.query(), matchingDocIdsPerIndex!!) - rewrittenQuery.query(updatedSourceQuery) - } - - val searchSource = scriptService.compile( - Script( - ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, - rewrittenQuery.toString(), searchParams - ), - TemplateScript.CONTEXT - ) - .newInstance(searchParams) - .execute() - - val searchRequest = SearchRequest() - .indices(*input.indices.toTypedArray()) - .preference(Preference.PRIMARY_FIRST.type()) - XContentType.JSON.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource).use { - searchRequest.source(SearchSourceBuilder.fromXContent(it)) - } - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - aggTriggerAfterKey += AggregationQueryRewriter.getAfterKeysFromSearchResponse( - searchResponse, - monitor.triggers, - prevResult?.aggTriggersAfterKey - ) - results += searchResponse.convertToMap() - } - is ClusterMetricsInput -> { - logger.debug("ClusterMetricsInput clusterMetricType: ${input.clusterMetricType}") - val response = executeTransportAction(input, client) - results += response.toMap() - } - else -> { - throw IllegalArgumentException("Unsupported input type: ${input.name()}.") - } - } - } - InputRunResults(results.toList(), aggTriggersAfterKey = aggTriggerAfterKey) - } catch (e: Exception) { - logger.info("Error collecting inputs for monitor: ${monitor.id}", e) - InputRunResults(emptyList(), e) - } - } - - /** - * Extends the given query builder with query that filters the given indices with the given doc ids per index - * Used whenever we want to select the documents that were found in chained delegate execution of the current workflow run - * - * @param query Original bucket monitor query - * @param matchingDocIdsPerIndex Map of finding doc ids grouped by index - */ - private fun updateInputQueryWithFindingDocIds( - query: QueryBuilder, - matchingDocIdsPerIndex: Map>, - ): QueryBuilder { - val queryBuilder = QueryBuilders.boolQuery().must(query) - val shouldQuery = QueryBuilders.boolQuery() - - matchingDocIdsPerIndex.forEach { entry -> - shouldQuery - .should() - .add( - BoolQueryBuilder() - .must(MatchQueryBuilder("_index", entry.key)) - .must(TermsQueryBuilder("_id", entry.value)) - ) - } - return queryBuilder.must(shouldQuery) - } - - private fun chainedFindingExist(indexToDocIds: Map>?) = - !indexToDocIds.isNullOrEmpty() - - private fun deepCopyQuery(query: SearchSourceBuilder): SearchSourceBuilder { - val out = BytesStreamOutput() - query.writeTo(out) - val sin = NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry) - return SearchSourceBuilder(sin) - } - - /** - * We moved anomaly result index to system index list. So common user could not directly query - * this index any more. This method will stash current thread context to pass security check. - * So monitor job can access anomaly result index. We will add monitor user roles filter in - * search query to only return documents the monitor user can access. - * - * On alerting Kibana, monitor users can only see detectors that they have read access. So they - * can't create monitor on other user's detector which they have no read access. Even they know - * other user's detector id and use it to create monitor, this method will only return anomaly - * results they can read. - */ - suspend fun collectInputResultsForADMonitor(monitor: Monitor, periodStart: Instant, periodEnd: Instant): InputRunResults { - return try { - val results = mutableListOf>() - val input = monitor.inputs[0] as SearchInput - - val searchParams = mapOf("period_start" to periodStart.toEpochMilli(), "period_end" to periodEnd.toEpochMilli()) - val searchSource = scriptService.compile( - Script( - ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, - input.query.toString(), searchParams - ), - TemplateScript.CONTEXT - ) - .newInstance(searchParams) - .execute() - - val searchRequest = SearchRequest() - .indices(*input.indices.toTypedArray()) - .preference(Preference.PRIMARY_FIRST.type()) - XContentType.JSON.xContent().createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, searchSource).use { - searchRequest.source(SearchSourceBuilder.fromXContent(it)) - } - - // Add user role filter for AD result - client.threadPool().threadContext.stashContext().use { - // Possible long term solution: - // 1.Use secure rest client to send request to AD search result API. If no permission exception, - // that mean user has read access on AD result. Then don't need to add user role filter when query - // AD result if AD backend role filter is disabled. - // 2.Security provide some transport action to verify if user has permission to search AD result. - // Monitor runner will send transport request to check permission first. If security plugin response - // is yes, user has permission to query AD result. If AD role filter enabled, we will add user role - // filter to protect data at user role level; otherwise, user can query any AD result. - if (getRoleFilterEnabled(clusterService, settings, "plugins.anomaly_detection.filter_by_backend_roles")) { - addUserBackendRolesFilter(monitor.user, searchRequest.source()) - } - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - results += searchResponse.convertToMap() - } - InputRunResults(results.toList()) - } catch (e: Exception) { - logger.info("Error collecting anomaly result inputs for monitor: ${monitor.id}", e) - InputRunResults(emptyList(), e) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt b/alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt deleted file mode 100644 index c1a5c9aea..000000000 --- a/alerting/bin/main/org/opensearch/alerting/MonitorMetadataService.kt +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import kotlinx.coroutines.CoroutineName -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.SupervisorJob -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.OpenSearchSecurityException -import org.opensearch.action.DocWriteRequest -import org.opensearch.action.DocWriteResponse -import org.opensearch.action.admin.indices.get.GetIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexResponse -import org.opensearch.action.admin.indices.stats.IndicesStatsAction -import org.opensearch.action.admin.indices.stats.IndicesStatsRequest -import org.opensearch.action.admin.indices.stats.IndicesStatsResponse -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.model.MonitorMetadata -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.IndexUtils -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.seqno.SequenceNumbers -import org.opensearch.transport.RemoteTransportException - -private val log = LogManager.getLogger(MonitorMetadataService::class.java) - -object MonitorMetadataService : - CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("MonitorMetadataService")) { - - private lateinit var client: Client - private lateinit var xContentRegistry: NamedXContentRegistry - private lateinit var clusterService: ClusterService - private lateinit var settings: Settings - - @Volatile - private lateinit var indexTimeout: TimeValue - - fun initialize( - client: Client, - clusterService: ClusterService, - xContentRegistry: NamedXContentRegistry, - settings: Settings, - ) { - this.clusterService = clusterService - this.client = client - this.xContentRegistry = xContentRegistry - this.settings = settings - this.indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) - this.clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.INDEX_TIMEOUT) { indexTimeout = it } - } - - @Suppress("ComplexMethod", "ReturnCount") - suspend fun upsertMetadata(metadata: MonitorMetadata, updating: Boolean): MonitorMetadata { - try { - val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(metadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .id(metadata.id) - .routing(metadata.monitorId) - .setIfSeqNo(metadata.seqNo) - .setIfPrimaryTerm(metadata.primaryTerm) - .timeout(indexTimeout) - - if (updating) { - indexRequest.id(metadata.id).setIfSeqNo(metadata.seqNo).setIfPrimaryTerm(metadata.primaryTerm) - } else { - indexRequest.opType(DocWriteRequest.OpType.CREATE) - } - val response: IndexResponse = client.suspendUntil { index(indexRequest, it) } - when (response.result) { - DocWriteResponse.Result.DELETED, DocWriteResponse.Result.NOOP, DocWriteResponse.Result.NOT_FOUND, null -> { - val failureReason = "The upsert metadata call failed with a ${response.result?.lowercase} result" - log.error(failureReason) - throw AlertingException(failureReason, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureReason)) - } - DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED -> { - log.debug("Successfully upserted MonitorMetadata:${metadata.id} ") - } - } - return metadata.copy( - seqNo = response.seqNo, - primaryTerm = response.primaryTerm - ) - } catch (e: Exception) { - throw AlertingException.wrap(e) - } - } - - /** - * Document monitors are keeping the context of the last run. - * Since one monitor can be part of multiple workflows we need to be sure that execution of the current workflow - * doesn't interfere with the other workflows that are dependent on the given monitor - */ - suspend fun getOrCreateMetadata( - monitor: Monitor, - createWithRunContext: Boolean = true, - skipIndex: Boolean = false, - workflowMetadataId: String? = null - ): Pair { - try { - val created = true - val metadata = getMetadata(monitor, workflowMetadataId) - return if (metadata != null) { - metadata to !created - } else { - val newMetadata = createNewMetadata(monitor, createWithRunContext = createWithRunContext, workflowMetadataId) - if (skipIndex) { - newMetadata to created - } else { - upsertMetadata(newMetadata, updating = false) to created - } - } - } catch (e: Exception) { - throw AlertingException.wrap(e) - } - } - - suspend fun getMetadata(monitor: Monitor, workflowMetadataId: String? = null): MonitorMetadata? { - try { - val metadataId = MonitorMetadata.getId(monitor, workflowMetadataId) - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, metadataId).routing(monitor.id) - - val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } - return if (getResponse.isExists) { - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - getResponse.sourceAsBytesRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - MonitorMetadata.parse(xcp, getResponse.id, getResponse.seqNo, getResponse.primaryTerm) - } else { - null - } - } catch (e: Exception) { - if (e.message?.contains("no such index") == true) { - return null - } else { - throw AlertingException.wrap(e) - } - } - } - - suspend fun recreateRunContext(metadata: MonitorMetadata, monitor: Monitor): MonitorMetadata { - try { - val monitorIndex = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - (monitor.inputs[0] as DocLevelMonitorInput).indices[0] - } else null - val runContext = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - createFullRunContext(monitorIndex, metadata.lastRunContext as MutableMap>) - } else null - return if (runContext != null) { - metadata.copy( - lastRunContext = runContext - ) - } else { - metadata - } - } catch (e: Exception) { - throw AlertingException.wrap(e) - } - } - - private suspend fun createNewMetadata( - monitor: Monitor, - createWithRunContext: Boolean, - workflowMetadataId: String? = null, - ): MonitorMetadata { - val monitorIndex = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) - (monitor.inputs[0] as DocLevelMonitorInput).indices[0] - else null - val runContext = if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR && createWithRunContext) - createFullRunContext(monitorIndex) - else emptyMap() - return MonitorMetadata( - id = MonitorMetadata.getId(monitor, workflowMetadataId), - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, - primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, - monitorId = monitor.id, - lastActionExecutionTimes = emptyList(), - lastRunContext = runContext, - sourceToQueryIndexMapping = mutableMapOf() - ) - } - - suspend fun createFullRunContext( - index: String?, - existingRunContext: MutableMap>? = null, - ): MutableMap> { - val lastRunContext = existingRunContext?.toMutableMap() ?: mutableMapOf() - try { - if (index == null) return mutableMapOf() - - val indices = mutableListOf() - if (IndexUtils.isAlias(index, clusterService.state()) || - IndexUtils.isDataStream(index, clusterService.state()) - ) { - IndexUtils.getWriteIndex(index, clusterService.state())?.let { indices.add(it) } - } else { - val getIndexRequest = GetIndexRequest().indices(index) - val getIndexResponse: GetIndexResponse = client.suspendUntil { - client.admin().indices().getIndex(getIndexRequest, it) - } - indices.addAll(getIndexResponse.indices()) - } - - indices.forEach { indexName -> - if (!lastRunContext.containsKey(indexName)) { - lastRunContext[indexName] = createRunContextForIndex(indexName) - } - } - } catch (e: RemoteTransportException) { - val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception - throw AlertingException("Failed fetching index stats", RestStatus.INTERNAL_SERVER_ERROR, unwrappedException) - } catch (e: OpenSearchSecurityException) { - throw AlertingException( - "Failed fetching index stats - missing required index permissions: ${e.localizedMessage}", - RestStatus.INTERNAL_SERVER_ERROR, - e - ) - } catch (e: Exception) { - throw AlertingException("Failed fetching index stats", RestStatus.INTERNAL_SERVER_ERROR, e) - } - return lastRunContext - } - - suspend fun createRunContextForIndex(index: String, createdRecently: Boolean = false): MutableMap { - val request = IndicesStatsRequest().indices(index).clear() - val response: IndicesStatsResponse = client.suspendUntil { execute(IndicesStatsAction.INSTANCE, request, it) } - if (response.status != RestStatus.OK) { - val errorMessage = "Failed fetching index stats for index:$index" - throw AlertingException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(errorMessage)) - } - val shards = response.shards.filter { it.shardRouting.primary() && it.shardRouting.active() } - val lastRunContext = HashMap() - lastRunContext["index"] = index - val count = shards.size - lastRunContext["shards_count"] = count - - for (shard in shards) { - lastRunContext[shard.shardRouting.id.toString()] = - if (createdRecently) -1L - else shard.seqNoStats?.globalCheckpoint ?: SequenceNumbers.UNASSIGNED_SEQ_NO - } - return lastRunContext - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt deleted file mode 100644 index 3340cac43..000000000 --- a/alerting/bin/main/org/opensearch/alerting/MonitorRunner.kt +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.opensearch.OpenSearchSecurityException -import org.opensearch.alerting.action.GetDestinationsAction -import org.opensearch.alerting.action.GetDestinationsRequest -import org.opensearch.alerting.action.GetDestinationsResponse -import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.opensearchapi.InjectorContextElement -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.opensearchapi.withClosableContext -import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext -import org.opensearch.alerting.script.TriggerExecutionContext -import org.opensearch.alerting.util.destinationmigration.NotificationActionConfigs -import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils.Companion.getNotificationConfigInfo -import org.opensearch.alerting.util.destinationmigration.getTitle -import org.opensearch.alerting.util.destinationmigration.publishLegacyNotification -import org.opensearch.alerting.util.destinationmigration.sendNotification -import org.opensearch.alerting.util.isAllowed -import org.opensearch.alerting.util.isTestAction -import org.opensearch.alerting.workflow.WorkflowRunContext -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.Table -import org.opensearch.commons.alerting.model.action.Action -import org.opensearch.commons.notifications.model.NotificationConfigInfo -import org.opensearch.core.common.Strings -import java.time.Instant - -abstract class MonitorRunner { - - abstract suspend fun runMonitor( - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - dryRun: Boolean, - workflowRunContext: WorkflowRunContext? = null, - executionId: String - ): MonitorRunResult<*> - - suspend fun runAction( - action: Action, - ctx: TriggerExecutionContext, - monitorCtx: MonitorRunnerExecutionContext, - monitor: Monitor, - dryrun: Boolean - ): ActionRunResult { - return try { - if (ctx is QueryLevelTriggerExecutionContext && !MonitorRunnerService.isActionActionable(action, ctx.alert)) { - return ActionRunResult(action.id, action.name, mapOf(), true, null, null) - } - val actionOutput = mutableMapOf() - actionOutput[Action.SUBJECT] = if (action.subjectTemplate != null) { - MonitorRunnerService.compileTemplate(action.subjectTemplate!!, ctx) - } else "" - actionOutput[Action.MESSAGE] = MonitorRunnerService.compileTemplate(action.messageTemplate, ctx) - if (Strings.isNullOrEmpty(actionOutput[Action.MESSAGE])) { - throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") - } - if (!dryrun) { - val client = monitorCtx.client - client!!.threadPool().threadContext.stashContext().use { - withClosableContext( - InjectorContextElement( - monitor.id, - monitorCtx.settings!!, - monitorCtx.threadPool!!.threadContext, - monitor.user?.roles, - monitor.user - ) - ) { - actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( - action, - monitorCtx, - actionOutput[Action.SUBJECT], - actionOutput[Action.MESSAGE]!! - ) - } - } - } - ActionRunResult(action.id, action.name, actionOutput, false, MonitorRunnerService.currentTime(), null) - } catch (e: Exception) { - ActionRunResult(action.id, action.name, mapOf(), false, MonitorRunnerService.currentTime(), e) - } - } - - protected suspend fun getConfigAndSendNotification( - action: Action, - monitorCtx: MonitorRunnerExecutionContext, - subject: String?, - message: String - ): String { - val config = getConfigForNotificationAction(action, monitorCtx) - if (config.destination == null && config.channel == null) { - throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.destinationId}]") - } - - // Adding a check on TEST_ACTION Destination type here to avoid supporting it as a LegacyBaseMessage type - // just for Alerting integration tests - if (config.destination?.isTestAction() == true) { - return "test action" - } - - if (config.destination?.isAllowed(monitorCtx.allowList) == false) { - throw IllegalStateException( - "Monitor contains a Destination type that is not allowed: ${config.destination.type}" - ) - } - - var actionResponseContent = "" - actionResponseContent = config.channel - ?.sendNotification( - monitorCtx.client!!, - config.channel.getTitle(subject), - message - ) ?: actionResponseContent - - actionResponseContent = config.destination - ?.buildLegacyBaseMessage(subject, message, monitorCtx.destinationContextFactory!!.getDestinationContext(config.destination)) - ?.publishLegacyNotification(monitorCtx.client!!) - ?: actionResponseContent - - return actionResponseContent - } - - /** - * The "destination" ID referenced in a Monitor Action could either be a Notification config or a Destination config - * depending on whether the background migration process has already migrated it from a Destination to a Notification config. - * - * To cover both of these cases, the Notification config will take precedence and if it is not found, the Destination will be retrieved. - */ - private suspend fun getConfigForNotificationAction( - action: Action, - monitorCtx: MonitorRunnerExecutionContext - ): NotificationActionConfigs { - var destination: Destination? = null - var notificationPermissionException: Exception? = null - - var channel: NotificationConfigInfo? = null - try { - channel = getNotificationConfigInfo(monitorCtx.client as NodeClient, action.destinationId) - } catch (e: OpenSearchSecurityException) { - notificationPermissionException = e - } - - // If the channel was not found, try to retrieve the Destination - if (channel == null) { - destination = try { - val table = Table( - "asc", - "destination.name.keyword", - null, - 1, - 0, - null - ) - val getDestinationsRequest = GetDestinationsRequest( - action.destinationId, - 0L, - null, - table, - "ALL" - ) - - val getDestinationsResponse: GetDestinationsResponse = monitorCtx.client!!.suspendUntil { - monitorCtx.client!!.execute(GetDestinationsAction.INSTANCE, getDestinationsRequest, it) - } - getDestinationsResponse.destinations.firstOrNull() - } catch (e: IllegalStateException) { - // Catching the exception thrown when the Destination was not found so the NotificationActionConfigs object can be returned - null - } catch (e: OpenSearchSecurityException) { - if (notificationPermissionException != null) { - throw notificationPermissionException - } else { - throw e - } - } - - if (destination == null && notificationPermissionException != null) { - throw notificationPermissionException - } - } - - return NotificationActionConfigs(destination, channel) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt deleted file mode 100644 index 41a26bb79..000000000 --- a/alerting/bin/main/org/opensearch/alerting/MonitorRunnerExecutionContext.kt +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.destination.DestinationContextFactory -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.settings.DestinationSettings -import org.opensearch.alerting.settings.LegacyOpenDistroDestinationSettings -import org.opensearch.alerting.util.DocLevelMonitorQueries -import org.opensearch.client.Client -import org.opensearch.cluster.metadata.IndexNameExpressionResolver -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.script.ScriptService -import org.opensearch.threadpool.ThreadPool - -data class MonitorRunnerExecutionContext( - - var clusterService: ClusterService? = null, - var client: Client? = null, - var xContentRegistry: NamedXContentRegistry? = null, - var indexNameExpressionResolver: IndexNameExpressionResolver? = null, - var scriptService: ScriptService? = null, - var settings: Settings? = null, - var threadPool: ThreadPool? = null, - var alertIndices: AlertIndices? = null, - var inputService: InputService? = null, - var triggerService: TriggerService? = null, - var alertService: AlertService? = null, - var docLevelMonitorQueries: DocLevelMonitorQueries? = null, - var workflowService: WorkflowService? = null, - - @Volatile var retryPolicy: BackoffPolicy? = null, - @Volatile var moveAlertsRetryPolicy: BackoffPolicy? = null, - - @Volatile var allowList: List = DestinationSettings.ALLOW_LIST_NONE, - @Volatile var hostDenyList: List = LegacyOpenDistroDestinationSettings.HOST_DENY_LIST_NONE, - - @Volatile var destinationSettings: Map? = null, - @Volatile var destinationContextFactory: DestinationContextFactory? = null, - - @Volatile var maxActionableAlertCount: Long = AlertingSettings.DEFAULT_MAX_ACTIONABLE_ALERT_COUNT, - @Volatile var indexTimeout: TimeValue? = null -) diff --git a/alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt b/alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt deleted file mode 100644 index ca223f7a0..000000000 --- a/alerting/bin/main/org/opensearch/alerting/MonitorRunnerService.kt +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.Job -import kotlinx.coroutines.SupervisorJob -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.alerts.AlertMover.Companion.moveAlerts -import org.opensearch.alerting.core.JobRunner -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.WorkflowRunResult -import org.opensearch.alerting.model.destination.DestinationContextFactory -import org.opensearch.alerting.opensearchapi.retry -import org.opensearch.alerting.script.TriggerExecutionContext -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_COUNT -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_BACKOFF_MILLIS -import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTIONABLE_ALERT_COUNT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_COUNT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MOVE_ALERTS_BACKOFF_MILLIS -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.settings.DestinationSettings.Companion.HOST_DENY_LIST -import org.opensearch.alerting.settings.DestinationSettings.Companion.loadDestinationSettings -import org.opensearch.alerting.util.DocLevelMonitorQueries -import org.opensearch.alerting.util.IndexUtils -import org.opensearch.alerting.util.isDocLevelMonitor -import org.opensearch.alerting.workflow.CompositeWorkflowRunner -import org.opensearch.client.Client -import org.opensearch.cluster.metadata.IndexNameExpressionResolver -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.lifecycle.AbstractLifecycleComponent -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.alerting.model.action.Action -import org.opensearch.commons.alerting.util.isBucketLevelMonitor -import org.opensearch.core.action.ActionListener -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.script.Script -import org.opensearch.script.ScriptService -import org.opensearch.script.TemplateScript -import org.opensearch.threadpool.ThreadPool -import java.time.Instant -import java.time.LocalDateTime -import java.time.ZoneOffset -import java.util.UUID -import kotlin.coroutines.CoroutineContext - -object MonitorRunnerService : JobRunner, CoroutineScope, AbstractLifecycleComponent() { - - private val logger = LogManager.getLogger(javaClass) - - var monitorCtx: MonitorRunnerExecutionContext = MonitorRunnerExecutionContext() - private lateinit var runnerSupervisor: Job - override val coroutineContext: CoroutineContext - get() = Dispatchers.Default + runnerSupervisor - - fun registerClusterService(clusterService: ClusterService): MonitorRunnerService { - this.monitorCtx.clusterService = clusterService - return this - } - - fun registerClient(client: Client): MonitorRunnerService { - this.monitorCtx.client = client - return this - } - - fun registerNamedXContentRegistry(xContentRegistry: NamedXContentRegistry): MonitorRunnerService { - this.monitorCtx.xContentRegistry = xContentRegistry - return this - } - - fun registerindexNameExpressionResolver(indexNameExpressionResolver: IndexNameExpressionResolver): MonitorRunnerService { - this.monitorCtx.indexNameExpressionResolver = indexNameExpressionResolver - return this - } - - fun registerScriptService(scriptService: ScriptService): MonitorRunnerService { - this.monitorCtx.scriptService = scriptService - return this - } - - fun registerSettings(settings: Settings): MonitorRunnerService { - this.monitorCtx.settings = settings - return this - } - - fun registerThreadPool(threadPool: ThreadPool): MonitorRunnerService { - this.monitorCtx.threadPool = threadPool - return this - } - - fun registerAlertIndices(alertIndices: AlertIndices): MonitorRunnerService { - this.monitorCtx.alertIndices = alertIndices - return this - } - - fun registerInputService(inputService: InputService): MonitorRunnerService { - this.monitorCtx.inputService = inputService - return this - } - - fun registerTriggerService(triggerService: TriggerService): MonitorRunnerService { - this.monitorCtx.triggerService = triggerService - return this - } - - fun registerAlertService(alertService: AlertService): MonitorRunnerService { - this.monitorCtx.alertService = alertService - return this - } - - fun registerDocLevelMonitorQueries(docLevelMonitorQueries: DocLevelMonitorQueries): MonitorRunnerService { - this.monitorCtx.docLevelMonitorQueries = docLevelMonitorQueries - return this - } - - fun registerWorkflowService(workflowService: WorkflowService): MonitorRunnerService { - this.monitorCtx.workflowService = workflowService - return this - } - - // Must be called after registerClusterService and registerSettings in AlertingPlugin - fun registerConsumers(): MonitorRunnerService { - monitorCtx.retryPolicy = BackoffPolicy.constantBackoff( - ALERT_BACKOFF_MILLIS.get(monitorCtx.settings), - ALERT_BACKOFF_COUNT.get(monitorCtx.settings) - ) - monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(ALERT_BACKOFF_MILLIS, ALERT_BACKOFF_COUNT) { millis, count -> - monitorCtx.retryPolicy = BackoffPolicy.constantBackoff(millis, count) - } - - monitorCtx.moveAlertsRetryPolicy = - BackoffPolicy.exponentialBackoff( - MOVE_ALERTS_BACKOFF_MILLIS.get(monitorCtx.settings), - MOVE_ALERTS_BACKOFF_COUNT.get(monitorCtx.settings) - ) - monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer( - MOVE_ALERTS_BACKOFF_MILLIS, - MOVE_ALERTS_BACKOFF_COUNT - ) { millis, count -> - monitorCtx.moveAlertsRetryPolicy = BackoffPolicy.exponentialBackoff(millis, count) - } - - monitorCtx.allowList = ALLOW_LIST.get(monitorCtx.settings) - monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { - monitorCtx.allowList = it - } - - // Host deny list is not a dynamic setting so no consumer is registered but the variable is set here - monitorCtx.hostDenyList = HOST_DENY_LIST.get(monitorCtx.settings) - - monitorCtx.maxActionableAlertCount = MAX_ACTIONABLE_ALERT_COUNT.get(monitorCtx.settings) - monitorCtx.clusterService!!.clusterSettings.addSettingsUpdateConsumer(MAX_ACTIONABLE_ALERT_COUNT) { - monitorCtx.maxActionableAlertCount = it - } - - monitorCtx.indexTimeout = INDEX_TIMEOUT.get(monitorCtx.settings) - - return this - } - - // To be safe, call this last as it depends on a number of other components being registered beforehand (client, settings, etc.) - fun registerDestinationSettings(): MonitorRunnerService { - monitorCtx.destinationSettings = loadDestinationSettings(monitorCtx.settings!!) - monitorCtx.destinationContextFactory = - DestinationContextFactory(monitorCtx.client!!, monitorCtx.xContentRegistry!!, monitorCtx.destinationSettings!!) - return this - } - - // Updates destination settings when the reload API is called so that new keystore values are visible - fun reloadDestinationSettings(settings: Settings) { - monitorCtx.destinationSettings = loadDestinationSettings(settings) - - // Update destinationContextFactory as well since destinationSettings has been updated - monitorCtx.destinationContextFactory!!.updateDestinationSettings(monitorCtx.destinationSettings!!) - } - - override fun doStart() { - runnerSupervisor = SupervisorJob() - } - - override fun doStop() { - runnerSupervisor.cancel() - } - - override fun doClose() {} - - override fun postIndex(job: ScheduledJob) { - if (job is Monitor) { - launch { - try { - monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { - if (monitorCtx.alertIndices!!.isAlertInitialized(job.dataSources)) { - moveAlerts(monitorCtx.client!!, job.id, job) - } - } - } catch (e: Exception) { - logger.error("Failed to move active alerts for monitor [${job.id}].", e) - } - } - } else if (job is Workflow) { - launch { - try { - monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { - moveAlerts(monitorCtx.client!!, job.id, job, monitorCtx) - } - } catch (e: Exception) { - logger.error("Failed to move active alerts for monitor [${job.id}].", e) - } - } - } else { - throw IllegalArgumentException("Invalid job type") - } - } - - override fun postDelete(jobId: String) { - launch { - try { - monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { - moveAlerts(monitorCtx.client!!, jobId, null, monitorCtx) - } - } catch (e: Exception) { - logger.error("Failed to move active alerts for workflow [$jobId]. Could be a monitor", e) - } - try { - monitorCtx.moveAlertsRetryPolicy!!.retry(logger) { - if (monitorCtx.alertIndices!!.isAlertInitialized()) { - moveAlerts(monitorCtx.client!!, jobId, null) - } - } - } catch (e: Exception) { - logger.error("Failed to move active alerts for monitor [$jobId].", e) - } - } - } - - override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { - when (job) { - is Workflow -> { - launch { - runJob(job, periodStart, periodEnd, false) - } - } - is Monitor -> { - launch { - runJob(job, periodStart, periodEnd, false) - } - } - else -> { - throw IllegalArgumentException("Invalid job type") - } - } - } - - suspend fun runJob(workflow: Workflow, periodStart: Instant, periodEnd: Instant, dryrun: Boolean): WorkflowRunResult { - return CompositeWorkflowRunner.runWorkflow(workflow, monitorCtx, periodStart, periodEnd, dryrun) - } - - suspend fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant, dryrun: Boolean): MonitorRunResult<*> { - // Updating the scheduled job index at the start of monitor execution runs for when there is an upgrade the the schema mapping - // has not been updated. - if (!IndexUtils.scheduledJobIndexUpdated && monitorCtx.clusterService != null && monitorCtx.client != null) { - IndexUtils.updateIndexMapping( - ScheduledJob.SCHEDULED_JOBS_INDEX, - ScheduledJobIndices.scheduledJobMappings(), monitorCtx.clusterService!!.state(), monitorCtx.client!!.admin().indices(), - object : ActionListener { - override fun onResponse(response: AcknowledgedResponse) { - } - - override fun onFailure(t: Exception) { - logger.error("Failed to update config index schema", t) - } - } - ) - } - - if (job is Workflow) { - logger.info("Executing scheduled workflow - id: ${job.id}, periodStart: $periodStart, periodEnd: $periodEnd, dryrun: $dryrun") - CompositeWorkflowRunner.runWorkflow(workflow = job, monitorCtx, periodStart, periodEnd, dryrun) - } - val monitor = job as Monitor - val executionId = "${monitor.id}_${LocalDateTime.now(ZoneOffset.UTC)}_${UUID.randomUUID()}" - logger.info( - "Executing scheduled monitor - id: ${monitor.id}, type: ${monitor.monitorType.name}, periodStart: $periodStart, " + - "periodEnd: $periodEnd, dryrun: $dryrun, executionId: $executionId" - ) - val runResult = if (monitor.isBucketLevelMonitor()) { - BucketLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) - } else if (monitor.isDocLevelMonitor()) { - DocumentLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) - } else { - QueryLevelMonitorRunner.runMonitor(monitor, monitorCtx, periodStart, periodEnd, dryrun, executionId = executionId) - } - return runResult - } - - // TODO: See if we can move below methods (or few of these) to a common utils - internal fun getRolesForMonitor(monitor: Monitor): List { - /* - * We need to handle 3 cases: - * 1. Monitors created by older versions and never updated. These monitors wont have User details in the - * monitor object. `monitor.user` will be null. Insert `all_access, AmazonES_all_access` role. - * 2. Monitors are created when security plugin is disabled, these will have empty User object. - * (`monitor.user.name`, `monitor.user.roles` are empty ) - * 3. Monitors are created when security plugin is enabled, these will have an User object. - */ - return if (monitor.user == null) { - // fixme: discuss and remove hardcoded to settings? - // TODO: Remove "AmazonES_all_access" role? - monitorCtx.settings!!.getAsList("", listOf("all_access", "AmazonES_all_access")) - } else { - monitor.user!!.roles - } - } - - // TODO: Can this be updated to just use 'Instant.now()'? - // 'threadPool.absoluteTimeInMillis()' is referring to a cached value of System.currentTimeMillis() that by default updates every 200ms - internal fun currentTime() = Instant.ofEpochMilli(monitorCtx.threadPool!!.absoluteTimeInMillis()) - - internal fun isActionActionable(action: Action, alert: Alert?): Boolean { - if (alert != null && alert.state == Alert.State.AUDIT) - return false - if (alert == null || action.throttle == null) { - return true - } - if (action.throttleEnabled) { - val result = alert.actionExecutionResults.firstOrNull { r -> r.actionId == action.id } - val lastExecutionTime: Instant? = result?.lastExecutionTime - val throttledTimeBound = currentTime().minus(action.throttle!!.value.toLong(), action.throttle!!.unit) - return (lastExecutionTime == null || lastExecutionTime.isBefore(throttledTimeBound)) - } - return true - } - - internal fun compileTemplate(template: Script, ctx: TriggerExecutionContext): String { - return monitorCtx.scriptService!!.compile(template, TemplateScript.CONTEXT) - .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) - .execute() - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt b/alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt deleted file mode 100644 index 691071517..000000000 --- a/alerting/bin/main/org/opensearch/alerting/QueryLevelMonitorRunner.kt +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.QueryLevelTriggerRunResult -import org.opensearch.alerting.opensearchapi.InjectorContextElement -import org.opensearch.alerting.opensearchapi.withClosableContext -import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext -import org.opensearch.alerting.util.isADMonitor -import org.opensearch.alerting.workflow.WorkflowRunContext -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import java.time.Instant - -object QueryLevelMonitorRunner : MonitorRunner() { - private val logger = LogManager.getLogger(javaClass) - - override suspend fun runMonitor( - monitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - dryrun: Boolean, - workflowRunContext: WorkflowRunContext?, - executionId: String - ): MonitorRunResult { - val roles = MonitorRunnerService.getRolesForMonitor(monitor) - logger.debug("Running monitor: ${monitor.name} with roles: $roles Thread: ${Thread.currentThread().name}") - - if (periodStart == periodEnd) { - logger.warn("Start and end time are the same: $periodStart. This monitor will probably only run once.") - } - - var monitorResult = MonitorRunResult(monitor.name, periodStart, periodEnd) - val currentAlerts = try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex(monitor.dataSources) - monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(monitor.dataSources) - monitorCtx.alertService!!.loadCurrentAlertsForQueryLevelMonitor(monitor, workflowRunContext) - } catch (e: Exception) { - // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts - val id = if (monitor.id.trim().isEmpty()) "_na_" else monitor.id - logger.error("Error loading alerts for monitor: $id", e) - return monitorResult.copy(error = e) - } - if (!isADMonitor(monitor)) { - withClosableContext(InjectorContextElement(monitor.id, monitorCtx.settings!!, monitorCtx.threadPool!!.threadContext, roles)) { - monitorResult = monitorResult.copy( - inputResults = monitorCtx.inputService!!.collectInputResults(monitor, periodStart, periodEnd, null, workflowRunContext) - ) - } - } else { - monitorResult = monitorResult.copy( - inputResults = monitorCtx.inputService!!.collectInputResultsForADMonitor(monitor, periodStart, periodEnd) - ) - } - - val updatedAlerts = mutableListOf() - val triggerResults = mutableMapOf() - for (trigger in monitor.triggers) { - val currentAlert = currentAlerts[trigger] - val triggerCtx = QueryLevelTriggerExecutionContext(monitor, trigger as QueryLevelTrigger, monitorResult, currentAlert) - val triggerResult = monitorCtx.triggerService!!.runQueryLevelTrigger(monitor, trigger, triggerCtx) - triggerResults[trigger.id] = triggerResult - - if (monitorCtx.triggerService!!.isQueryLevelTriggerActionable(triggerCtx, triggerResult, workflowRunContext)) { - val actionCtx = triggerCtx.copy(error = monitorResult.error ?: triggerResult.error) - for (action in trigger.actions) { - triggerResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, monitor, dryrun) - } - } - - val updatedAlert = monitorCtx.alertService!!.composeQueryLevelAlert( - triggerCtx, - triggerResult, - monitorResult.alertError() ?: triggerResult.alertError(), - executionId, - workflowRunContext - ) - if (updatedAlert != null) updatedAlerts += updatedAlert - } - - // Don't save alerts if this is a test monitor - if (!dryrun && monitor.id != Monitor.NO_ID) { - monitorCtx.retryPolicy?.let { - monitorCtx.alertService!!.saveAlerts( - monitor.dataSources, - updatedAlerts, - it, - routingId = monitor.id - ) - } - } - return monitorResult.copy(triggerResults = triggerResults) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/TriggerService.kt b/alerting/bin/main/org/opensearch/alerting/TriggerService.kt deleted file mode 100644 index f2356eddf..000000000 --- a/alerting/bin/main/org/opensearch/alerting/TriggerService.kt +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser -import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.ChainedAlertTriggerRunResult -import org.opensearch.alerting.model.DocumentLevelTriggerRunResult -import org.opensearch.alerting.model.QueryLevelTriggerRunResult -import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext -import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext -import org.opensearch.alerting.script.QueryLevelTriggerExecutionContext -import org.opensearch.alerting.script.TriggerScript -import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser -import org.opensearch.alerting.util.getBucketKeysHash -import org.opensearch.alerting.workflow.WorkflowRunContext -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.BUCKET_INDICES -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices.Fields.PARENT_BUCKET_PATH -import org.opensearch.commons.alerting.model.AggregationResultBucket -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.script.Script -import org.opensearch.script.ScriptService -import org.opensearch.search.aggregations.Aggregation -import org.opensearch.search.aggregations.Aggregations -import org.opensearch.search.aggregations.support.AggregationPath - -/** Service that handles executing Triggers */ -class TriggerService(val scriptService: ScriptService) { - - private val logger = LogManager.getLogger(TriggerService::class.java) - private val ALWAYS_RUN = Script("return true") - private val NEVER_RUN = Script("return false") - - fun isQueryLevelTriggerActionable( - ctx: QueryLevelTriggerExecutionContext, - result: QueryLevelTriggerRunResult, - workflowRunContext: WorkflowRunContext?, - ): Boolean { - if (workflowRunContext?.auditDelegateMonitorAlerts == true) return false - // Suppress actions if the current alert is acknowledged and there are no errors. - val suppress = ctx.alert?.state == Alert.State.ACKNOWLEDGED && result.error == null && ctx.error == null - return result.triggered && !suppress - } - - fun isChainedAlertTriggerActionable( - ctx: ChainedAlertTriggerExecutionContext, - result: ChainedAlertTriggerRunResult, - ): Boolean { - // Suppress actions if the current alert is acknowledged and there are no errors. - val suppress = ctx.alert?.state == Alert.State.ACKNOWLEDGED && result.error == null && ctx.error == null - return result.triggered && !suppress - } - - fun runQueryLevelTrigger( - monitor: Monitor, - trigger: QueryLevelTrigger, - ctx: QueryLevelTriggerExecutionContext - ): QueryLevelTriggerRunResult { - return try { - val triggered = scriptService.compile(trigger.condition, TriggerScript.CONTEXT) - .newInstance(trigger.condition.params) - .execute(ctx) - QueryLevelTriggerRunResult(trigger.name, triggered, null) - } catch (e: Exception) { - logger.info("Error running script for monitor ${monitor.id}, trigger: ${trigger.id}", e) - // if the script fails we need to send an alert so set triggered = true - QueryLevelTriggerRunResult(trigger.name, true, e) - } - } - - // TODO: improve performance and support match all and match any - fun runDocLevelTrigger( - monitor: Monitor, - trigger: DocumentLevelTrigger, - queryToDocIds: Map> - ): DocumentLevelTriggerRunResult { - return try { - var triggeredDocs = mutableListOf() - - if (trigger.condition.idOrCode.equals(ALWAYS_RUN.idOrCode)) { - for (value in queryToDocIds.values) { - triggeredDocs.addAll(value) - } - } else if (!trigger.condition.idOrCode.equals(NEVER_RUN.idOrCode)) { - triggeredDocs = TriggerExpressionParser(trigger.condition.idOrCode).parse() - .evaluate(queryToDocIds).toMutableList() - } - - DocumentLevelTriggerRunResult(trigger.name, triggeredDocs, null) - } catch (e: Exception) { - logger.info("Error running script for monitor ${monitor.id}, trigger: ${trigger.id}", e) - // if the script fails we need to send an alert so set triggered = true - DocumentLevelTriggerRunResult(trigger.name, emptyList(), e) - } - } - - fun runChainedAlertTrigger( - workflow: Workflow, - trigger: ChainedAlertTrigger, - alertGeneratingMonitors: Set, - monitorIdToAlertIdsMap: Map>, - ): ChainedAlertTriggerRunResult { - val associatedAlertIds = mutableSetOf() - return try { - val parsedTriggerCondition = ChainedAlertExpressionParser(trigger.condition.idOrCode).parse() - val evaluate = parsedTriggerCondition.evaluate(alertGeneratingMonitors) - if (evaluate) { - val monitorIdsInTriggerCondition = parsedTriggerCondition.getMonitorIds(parsedTriggerCondition) - monitorIdsInTriggerCondition.forEach { associatedAlertIds.addAll(monitorIdToAlertIdsMap.getOrDefault(it, emptySet())) } - } - ChainedAlertTriggerRunResult(trigger.name, triggered = evaluate, null, associatedAlertIds = associatedAlertIds) - } catch (e: Exception) { - logger.error("Error running chained alert trigger script for workflow ${workflow.id}, trigger: ${trigger.id}", e) - ChainedAlertTriggerRunResult( - triggerName = trigger.name, - triggered = false, - error = e, - associatedAlertIds = emptySet() - ) - } - } - - @Suppress("UNCHECKED_CAST") - fun runBucketLevelTrigger( - monitor: Monitor, - trigger: BucketLevelTrigger, - ctx: BucketLevelTriggerExecutionContext - ): BucketLevelTriggerRunResult { - return try { - val bucketIndices = - ((ctx.results[0][Aggregations.AGGREGATIONS_FIELD] as HashMap<*, *>)[trigger.id] as HashMap<*, *>)[BUCKET_INDICES] as List<*> - val parentBucketPath = ( - (ctx.results[0][Aggregations.AGGREGATIONS_FIELD] as HashMap<*, *>) - .get(trigger.id) as HashMap<*, *> - )[PARENT_BUCKET_PATH] as String - val aggregationPath = AggregationPath.parse(parentBucketPath) - // TODO test this part by passing sub-aggregation path - var parentAgg = (ctx.results[0][Aggregations.AGGREGATIONS_FIELD] as HashMap<*, *>) - aggregationPath.pathElementsAsStringList.forEach { sub_agg -> - parentAgg = (parentAgg[sub_agg] as HashMap<*, *>) - } - val buckets = parentAgg[Aggregation.CommonFields.BUCKETS.preferredName] as List<*> - val selectedBuckets = mutableMapOf() - for (bucketIndex in bucketIndices) { - val bucketDict = buckets[bucketIndex as Int] as Map - val bucketKeyValuesList = getBucketKeyValuesList(bucketDict) - val aggResultBucket = AggregationResultBucket(parentBucketPath, bucketKeyValuesList, bucketDict) - selectedBuckets[aggResultBucket.getBucketKeysHash()] = aggResultBucket - } - BucketLevelTriggerRunResult(trigger.name, null, selectedBuckets) - } catch (e: Exception) { - logger.info("Error running trigger [${trigger.id}] for monitor [${monitor.id}]", e) - BucketLevelTriggerRunResult(trigger.name, e, emptyMap()) - } - } - - @Suppress("UNCHECKED_CAST") - private fun getBucketKeyValuesList(bucket: Map): List { - val keyField = Aggregation.CommonFields.KEY.preferredName - val keyValuesList = mutableListOf() - when { - bucket[keyField] is List<*> && bucket.containsKey(Aggregation.CommonFields.KEY_AS_STRING.preferredName) -> - keyValuesList.add(bucket[Aggregation.CommonFields.KEY_AS_STRING.preferredName] as String) - bucket[keyField] is String -> keyValuesList.add(bucket[keyField] as String) - // In the case where the key field is an Int - bucket[keyField] is Int -> keyValuesList.add(bucket[keyField].toString()) - // In the case where the key field is an object with multiple values (such as a composite aggregation with more than one source) - // the values will be iterated through and converted into a string - bucket[keyField] is Map<*, *> -> (bucket[keyField] as Map).values.map { keyValuesList.add(it.toString()) } - else -> throw IllegalArgumentException("Unexpected format for key in bucket [$bucket]") - } - - return keyValuesList - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt b/alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt deleted file mode 100644 index 9dc4fbcdd..000000000 --- a/alerting/bin/main/org/opensearch/alerting/WorkflowMetadataService.kt +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import kotlinx.coroutines.CoroutineName -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.SupervisorJob -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchException -import org.opensearch.action.DocWriteRequest -import org.opensearch.action.DocWriteResponse -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.model.WorkflowMetadata -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import java.time.Instant -import java.time.LocalDateTime -import java.time.ZoneOffset -import java.util.UUID - -object WorkflowMetadataService : - CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("WorkflowMetadataService")) { - private val log = LogManager.getLogger(this::class.java) - - private lateinit var client: Client - private lateinit var xContentRegistry: NamedXContentRegistry - private lateinit var clusterService: ClusterService - private lateinit var settings: Settings - - @Volatile private lateinit var indexTimeout: TimeValue - - fun initialize( - client: Client, - clusterService: ClusterService, - xContentRegistry: NamedXContentRegistry, - settings: Settings - ) { - this.clusterService = clusterService - this.client = client - this.xContentRegistry = xContentRegistry - this.settings = settings - this.indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) - this.clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.INDEX_TIMEOUT) { indexTimeout = it } - } - - @Suppress("ComplexMethod", "ReturnCount") - suspend fun upsertWorkflowMetadata(metadata: WorkflowMetadata, updating: Boolean): WorkflowMetadata { - try { - val indexRequest = IndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(metadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .id(metadata.id) - .routing(metadata.workflowId) - .timeout(indexTimeout) - - if (updating) { - indexRequest.id(metadata.id) - } else { - indexRequest.opType(DocWriteRequest.OpType.CREATE) - } - val response: IndexResponse = client.suspendUntil { index(indexRequest, it) } - when (response.result) { - DocWriteResponse.Result.DELETED, DocWriteResponse.Result.NOOP, DocWriteResponse.Result.NOT_FOUND, null -> { - val failureReason = "The upsert metadata call failed with a ${response.result?.lowercase} result" - log.error(failureReason) - throw AlertingException(failureReason, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureReason)) - } - DocWriteResponse.Result.CREATED, DocWriteResponse.Result.UPDATED -> { - log.debug("Successfully upserted WorkflowMetadata:${metadata.id} ") - } - } - return metadata - } catch (e: Exception) { - // If the update is set to false and id is set conflict exception will be thrown - if (e is OpenSearchException && e.status() == RestStatus.CONFLICT && !updating) { - log.debug( - "Metadata with ${metadata.id} for workflow ${metadata.workflowId} already exist." + - " Instead of creating new, updating existing metadata will be performed" - ) - return upsertWorkflowMetadata(metadata, true) - } - log.error("Error saving metadata", e) - throw AlertingException.wrap(e) - } - } - - suspend fun getOrCreateWorkflowMetadata( - workflow: Workflow, - skipIndex: Boolean = false, - executionId: String - ): Pair { - try { - val created = true - val metadata = getWorkflowMetadata(workflow) - return if (metadata != null) { - metadata to !created - } else { - val newMetadata = createNewWorkflowMetadata(workflow, executionId, skipIndex) - if (skipIndex) { - newMetadata to created - } else { - upsertWorkflowMetadata(newMetadata, updating = false) to created - } - } - } catch (e: Exception) { - throw AlertingException.wrap(e) - } - } - - private suspend fun getWorkflowMetadata(workflow: Workflow): WorkflowMetadata? { - try { - val metadataId = WorkflowMetadata.getId(workflow.id) - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, metadataId).routing(workflow.id) - - val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } - return if (getResponse.isExists) { - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - getResponse.sourceAsBytesRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - WorkflowMetadata.parse(xcp) - } else { - null - } - } catch (e: Exception) { - if (e.message?.contains("no such index") == true) { - return null - } else { - throw AlertingException.wrap(e) - } - } - } - - private fun createNewWorkflowMetadata(workflow: Workflow, executionId: String, isTempWorkflow: Boolean): WorkflowMetadata { - // In the case of temp workflow (ie. workflow is in dry-run) use timestampWithUUID-metadata format - // In the case of regular workflow execution, use the workflowId-metadata format - val id = if (isTempWorkflow) "${LocalDateTime.now(ZoneOffset.UTC)}${UUID.randomUUID()}" else workflow.id - return WorkflowMetadata( - id = WorkflowMetadata.getId(id), - workflowId = workflow.id, - monitorIds = (workflow.inputs[0] as CompositeInput).getMonitorIds(), - latestRunTime = Instant.now(), - latestExecutionId = executionId - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/WorkflowService.kt b/alerting/bin/main/org/opensearch/alerting/WorkflowService.kt deleted file mode 100644 index 04bd64b8d..000000000 --- a/alerting/bin/main/org/opensearch/alerting/WorkflowService.kt +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchException -import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest -import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder - -private val log = LogManager.getLogger(WorkflowService::class.java) - -/** - * Contains util methods used in workflow execution - */ -class WorkflowService( - val client: Client, - val xContentRegistry: NamedXContentRegistry, -) { - /** - * Returns finding doc ids per index for the given workflow execution - * Used for pre-filtering the dataset in the case of creating a workflow with chained findings - * - * @param chainedMonitors Monitors that have previously executed - * @param workflowExecutionId Execution id of the current workflow - */ - suspend fun getFindingDocIdsByExecutionId(chainedMonitors: List, workflowExecutionId: String): Map> { - if (chainedMonitors.isEmpty()) - return emptyMap() - val dataSources = chainedMonitors[0].dataSources - try { - val existsResponse: IndicesExistsResponse = client.admin().indices().suspendUntil { - exists(IndicesExistsRequest(dataSources.findingsIndex).local(true), it) - } - if (existsResponse.isExists == false) return emptyMap() - // Search findings index to match id of monitors and workflow execution id - val bqb = QueryBuilders.boolQuery() - .filter( - QueryBuilders.termsQuery( - Finding.MONITOR_ID_FIELD, - chainedMonitors.map { it.id } - ) - ) - .filter(QueryBuilders.termQuery(Finding.EXECUTION_ID_FIELD, workflowExecutionId)) - val searchRequest = SearchRequest() - .source( - SearchSourceBuilder() - .query(bqb) - .version(true) - .seqNoAndPrimaryTerm(true) - ) - .indices(dataSources.findingsIndex) - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - - // Get the findings docs - val findings = mutableListOf() - for (hit in searchResponse.hits) { - val xcp = XContentType.JSON.xContent() - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val finding = Finding.parse(xcp) - findings.add(finding) - } - // Based on the findings get the document ids - val indexToRelatedDocIdsMap = mutableMapOf>() - for (finding in findings) { - indexToRelatedDocIdsMap.getOrPut(finding.index) { mutableListOf() }.addAll(finding.relatedDocIds) - } - return indexToRelatedDocIdsMap - } catch (t: Exception) { - log.error("Error getting finding doc ids: ${t.message}", t) - throw AlertingException.wrap(t) - } - } - - /** - * Returns the list of monitors for the given ids - * Used in workflow execution in order to figure out the monitor type - * - * @param monitors List of monitor ids - * @param size Expected number of monitors - */ - suspend fun getMonitorsById(monitors: List, size: Int): List { - try { - val bqb = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", monitors)) - - val searchRequest = SearchRequest() - .source( - SearchSourceBuilder() - .query(bqb) - .version(true) - .seqNoAndPrimaryTerm(true) - .size(size) - ) - .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) - - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - return parseMonitors(searchResponse) - } catch (e: Exception) { - log.error("Error getting monitors: ${e.message}", e) - throw AlertingException.wrap(e) - } - } - - private fun parseMonitors(response: SearchResponse): List { - if (response.isTimedOut) { - log.error("Request for getting monitors timeout") - throw OpenSearchException("Cannot determine that the ${ScheduledJob.SCHEDULED_JOBS_INDEX} index is healthy") - } - val monitors = mutableListOf() - try { - for (hit in response.hits) { - XContentType.JSON.xContent().createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, hit.sourceAsString - ).use { hitsParser -> - val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor - monitors.add(monitor) - } - } - } catch (e: Exception) { - log.error("Error parsing monitors: ${e.message}", e) - throw AlertingException.wrap(e) - } - return monitors - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt deleted file mode 100644 index 4cc869b37..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class ExecuteMonitorAction private constructor() : ActionType(NAME, ::ExecuteMonitorResponse) { - companion object { - val INSTANCE = ExecuteMonitorAction() - const val NAME = "cluster:admin/opendistro/alerting/monitor/execute" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt deleted file mode 100644 index ecc504677..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorRequest.kt +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.common.unit.TimeValue -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import java.io.IOException - -class ExecuteMonitorRequest : ActionRequest { - val dryrun: Boolean - val requestEnd: TimeValue - val monitorId: String? - val monitor: Monitor? - - constructor( - dryrun: Boolean, - requestEnd: TimeValue, - monitorId: String?, - monitor: Monitor? - ) : super() { - this.dryrun = dryrun - this.requestEnd = requestEnd - this.monitorId = monitorId - this.monitor = monitor - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readBoolean(), // dryrun - sin.readTimeValue(), // requestEnd - sin.readOptionalString(), // monitorId - if (sin.readBoolean()) { - Monitor.readFrom(sin) // monitor - } else null - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeBoolean(dryrun) - out.writeTimeValue(requestEnd) - out.writeOptionalString(monitorId) - if (monitor != null) { - out.writeBoolean(true) - monitor.writeTo(out) - } else { - out.writeBoolean(false) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt deleted file mode 100644 index 8d7a7c25a..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/ExecuteMonitorResponse.kt +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentObject -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException - -class ExecuteMonitorResponse : ActionResponse, ToXContentObject { - - val monitorRunResult: MonitorRunResult<*> - - constructor(monitorRunResult: MonitorRunResult<*>) : super() { - this.monitorRunResult = monitorRunResult - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - MonitorRunResult.readFrom(sin) // monitorRunResult - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - monitorRunResult.writeTo(out) - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return monitorRunResult.toXContent(builder, ToXContent.EMPTY_PARAMS) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt deleted file mode 100644 index efed1087d..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class ExecuteWorkflowAction private constructor() : ActionType(NAME, ::ExecuteWorkflowResponse) { - companion object { - val INSTANCE = ExecuteWorkflowAction() - const val NAME = "cluster:admin/opensearch/alerting/workflow/execute" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt deleted file mode 100644 index 3b3d48ed2..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowRequest.kt +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.ValidateActions -import org.opensearch.common.unit.TimeValue -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import java.io.IOException - -/** - * A class containing workflow details. - */ -class ExecuteWorkflowRequest : ActionRequest { - val dryrun: Boolean - val requestEnd: TimeValue - val workflowId: String? - val workflow: Workflow? - - constructor( - dryrun: Boolean, - requestEnd: TimeValue, - workflowId: String?, - workflow: Workflow?, - ) : super() { - this.dryrun = dryrun - this.requestEnd = requestEnd - this.workflowId = workflowId - this.workflow = workflow - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readBoolean(), - sin.readTimeValue(), - sin.readOptionalString(), - if (sin.readBoolean()) { - Workflow.readFrom(sin) - } else null - ) - - override fun validate(): ActionRequestValidationException? { - var validationException: ActionRequestValidationException? = null - if (workflowId == null && workflow == null) { - validationException = ValidateActions.addValidationError( - "Both workflow and workflow id are missing", validationException - ) - } - return validationException - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeBoolean(dryrun) - out.writeTimeValue(requestEnd) - out.writeOptionalString(workflowId) - if (workflow != null) { - out.writeBoolean(true) - workflow.writeTo(out) - } else { - out.writeBoolean(false) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt deleted file mode 100644 index 7312a9470..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/ExecuteWorkflowResponse.kt +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.model.WorkflowRunResult -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentObject -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException - -class ExecuteWorkflowResponse : ActionResponse, ToXContentObject { - val workflowRunResult: WorkflowRunResult - constructor( - workflowRunResult: WorkflowRunResult - ) : super() { - this.workflowRunResult = workflowRunResult - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - WorkflowRunResult(sin) - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - workflowRunResult.writeTo(out) - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return workflowRunResult.toXContent(builder, params) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt deleted file mode 100644 index 76adde8c6..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class GetDestinationsAction private constructor() : ActionType(NAME, ::GetDestinationsResponse) { - companion object { - val INSTANCE = GetDestinationsAction() - const val NAME = "cluster:admin/opendistro/alerting/destination/get" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt deleted file mode 100644 index 92fae8247..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsRequest.kt +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.commons.alerting.model.Table -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.search.fetch.subphase.FetchSourceContext -import java.io.IOException - -class GetDestinationsRequest : ActionRequest { - val destinationId: String? - val version: Long - val srcContext: FetchSourceContext? - val table: Table - val destinationType: String - - constructor( - destinationId: String?, - version: Long, - srcContext: FetchSourceContext?, - table: Table, - destinationType: String - ) : super() { - this.destinationId = destinationId - this.version = version - this.srcContext = srcContext - this.table = table - this.destinationType = destinationType - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - destinationId = sin.readOptionalString(), - version = sin.readLong(), - srcContext = if (sin.readBoolean()) { - FetchSourceContext(sin) - } else null, - table = Table.readFrom(sin), - destinationType = sin.readString() - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeOptionalString(destinationId) - out.writeLong(version) - out.writeBoolean(srcContext != null) - srcContext?.writeTo(out) - table.writeTo(out) - out.writeString(destinationType) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt deleted file mode 100644 index 01d79a50e..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetDestinationsResponse.kt +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentObject -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException - -class GetDestinationsResponse : ActionResponse, ToXContentObject { - var status: RestStatus - - // totalDestinations is not the same as the size of destinations because there can be 30 destinations from the request, but - // the request only asked for 5 destinations, so totalDestinations will be 30, but alerts will only contain 5 destinations - var totalDestinations: Int? - var destinations: List - - constructor( - status: RestStatus, - totalDestinations: Int?, - destinations: List - ) : super() { - this.status = status - this.totalDestinations = totalDestinations - this.destinations = destinations - } - - @Throws(IOException::class) - constructor(sin: StreamInput) { - this.status = sin.readEnum(RestStatus::class.java) - val destinations = mutableListOf() - this.totalDestinations = sin.readOptionalInt() - var currentSize = sin.readInt() - for (i in 0 until currentSize) { - destinations.add(Destination.readFrom(sin)) - } - this.destinations = destinations - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeEnum(status) - out.writeOptionalInt(totalDestinations) - out.writeInt(destinations.size) - for (destination in destinations) { - destination.writeTo(out) - } - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field("totalDestinations", totalDestinations) - .field("destinations", destinations) - - return builder.endObject() - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt deleted file mode 100644 index c16a28e17..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class GetEmailAccountAction private constructor() : ActionType(NAME, ::GetEmailAccountResponse) { - companion object { - val INSTANCE = GetEmailAccountAction() - const val NAME = "cluster:admin/opendistro/alerting/destination/email_account/get" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt deleted file mode 100644 index 94b79726e..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountRequest.kt +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.rest.RestRequest -import org.opensearch.search.fetch.subphase.FetchSourceContext -import java.io.IOException - -class GetEmailAccountRequest : ActionRequest { - val emailAccountID: String - val version: Long - val method: RestRequest.Method - val srcContext: FetchSourceContext? - - constructor( - emailAccountID: String, - version: Long, - method: RestRequest.Method, - srcContext: FetchSourceContext? - ) : super() { - this.emailAccountID = emailAccountID - this.version = version - this.method = method - this.srcContext = srcContext - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // emailAccountID - sin.readLong(), // version - sin.readEnum(RestRequest.Method::class.java), // method - if (sin.readBoolean()) { - FetchSourceContext(sin) // srcContext - } else null - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(emailAccountID) - out.writeLong(version) - out.writeEnum(method) - if (srcContext != null) { - out.writeBoolean(true) - srcContext.writeTo(out) - } else { - out.writeBoolean(false) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt deleted file mode 100644 index 6f26326dd..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetEmailAccountResponse.kt +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.commons.alerting.util.IndexUtils.Companion._ID -import org.opensearch.commons.alerting.util.IndexUtils.Companion._PRIMARY_TERM -import org.opensearch.commons.alerting.util.IndexUtils.Companion._SEQ_NO -import org.opensearch.commons.alerting.util.IndexUtils.Companion._VERSION -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentObject -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException - -class GetEmailAccountResponse : ActionResponse, ToXContentObject { - var id: String - var version: Long - var seqNo: Long - var primaryTerm: Long - var status: RestStatus - var emailAccount: EmailAccount? - - constructor( - id: String, - version: Long, - seqNo: Long, - primaryTerm: Long, - status: RestStatus, - emailAccount: EmailAccount? - ) : super() { - this.id = id - this.version = version - this.seqNo = seqNo - this.primaryTerm = primaryTerm - this.status = status - this.emailAccount = emailAccount - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readLong(), // version - sin.readLong(), // seqNo - sin.readLong(), // primaryTerm - sin.readEnum(RestStatus::class.java), // RestStatus - if (sin.readBoolean()) { - EmailAccount.readFrom(sin) // emailAccount - } else null - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeLong(seqNo) - out.writeLong(primaryTerm) - out.writeEnum(status) - if (emailAccount != null) { - out.writeBoolean(true) - emailAccount?.writeTo(out) - } else { - out.writeBoolean(false) - } - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(_ID, id) - .field(_VERSION, version) - .field(_SEQ_NO, seqNo) - .field(_PRIMARY_TERM, primaryTerm) - if (emailAccount != null) { - builder.field("email_account", emailAccount) - } - - return builder.endObject() - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt deleted file mode 100644 index a9793f156..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupAction.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType - -class GetEmailGroupAction private constructor() : ActionType(NAME, ::GetEmailGroupResponse) { - companion object { - val INSTANCE = GetEmailGroupAction() - const val NAME = "cluster:admin/opendistro/alerting/destination/email_group/get" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt deleted file mode 100644 index bb245b075..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupRequest.kt +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.rest.RestRequest -import org.opensearch.search.fetch.subphase.FetchSourceContext -import java.io.IOException - -class GetEmailGroupRequest : ActionRequest { - val emailGroupID: String - val version: Long - val method: RestRequest.Method - val srcContext: FetchSourceContext? - - constructor( - emailGroupID: String, - version: Long, - method: RestRequest.Method, - srcContext: FetchSourceContext? - ) : super() { - this.emailGroupID = emailGroupID - this.version = version - this.method = method - this.srcContext = srcContext - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // emailGroupID - sin.readLong(), // version - sin.readEnum(RestRequest.Method::class.java), // method - if (sin.readBoolean()) { - FetchSourceContext(sin) // srcContext - } else null - ) - - override fun validate(): ActionRequestValidationException? { - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(emailGroupID) - out.writeLong(version) - out.writeEnum(method) - if (srcContext != null) { - out.writeBoolean(true) - srcContext.writeTo(out) - } else { - out.writeBoolean(false) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt b/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt deleted file mode 100644 index c688033e4..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/GetEmailGroupResponse.kt +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.commons.alerting.util.IndexUtils.Companion._ID -import org.opensearch.commons.alerting.util.IndexUtils.Companion._PRIMARY_TERM -import org.opensearch.commons.alerting.util.IndexUtils.Companion._SEQ_NO -import org.opensearch.commons.alerting.util.IndexUtils.Companion._VERSION -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentObject -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException - -class GetEmailGroupResponse : ActionResponse, ToXContentObject { - var id: String - var version: Long - var seqNo: Long - var primaryTerm: Long - var status: RestStatus - var emailGroup: EmailGroup? - - constructor( - id: String, - version: Long, - seqNo: Long, - primaryTerm: Long, - status: RestStatus, - emailGroup: EmailGroup? - ) : super() { - this.id = id - this.version = version - this.seqNo = seqNo - this.primaryTerm = primaryTerm - this.status = status - this.emailGroup = emailGroup - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // id - sin.readLong(), // version - sin.readLong(), // seqNo - sin.readLong(), // primaryTerm - sin.readEnum(RestStatus::class.java), // RestStatus - if (sin.readBoolean()) { - EmailGroup.readFrom(sin) // emailGroup - } else null - ) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeLong(seqNo) - out.writeLong(primaryTerm) - out.writeEnum(status) - if (emailGroup != null) { - out.writeBoolean(true) - emailGroup?.writeTo(out) - } else { - out.writeBoolean(false) - } - } - - @Throws(IOException::class) - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(_ID, id) - .field(_VERSION, version) - .field(_SEQ_NO, seqNo) - .field(_PRIMARY_TERM, primaryTerm) - if (emailGroup != null) { - builder.field("email_group", emailGroup) - } - - return builder.endObject() - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt deleted file mode 100644 index 12cf21299..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/SearchEmailAccountAction.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType -import org.opensearch.action.search.SearchResponse - -class SearchEmailAccountAction private constructor() : ActionType(NAME, ::SearchResponse) { - companion object { - val INSTANCE = SearchEmailAccountAction() - const val NAME = "cluster:admin/opendistro/alerting/destination/email_account/search" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt deleted file mode 100644 index da113c857..000000000 --- a/alerting/bin/main/org/opensearch/alerting/action/SearchEmailGroupAction.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.action.ActionType -import org.opensearch.action.search.SearchResponse - -class SearchEmailGroupAction private constructor() : ActionType(NAME, ::SearchResponse) { - companion object { - val INSTANCE = SearchEmailGroupAction() - const val NAME = "cluster:admin/opendistro/alerting/destination/email_group/search" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt b/alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt deleted file mode 100644 index bf8701c0f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/alerts/AlertIndices.kt +++ /dev/null @@ -1,588 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.alerts - -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.ResourceAlreadyExistsException -import org.opensearch.action.admin.cluster.state.ClusterStateRequest -import org.opensearch.action.admin.cluster.state.ClusterStateResponse -import org.opensearch.action.admin.indices.alias.Alias -import org.opensearch.action.admin.indices.create.CreateIndexRequest -import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest -import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest -import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse -import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest -import org.opensearch.action.admin.indices.rollover.RolloverRequest -import org.opensearch.action.admin.indices.rollover.RolloverResponse -import org.opensearch.action.support.IndicesOptions -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_HISTORY_WRITE_INDEX -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_ENABLED -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_INDEX_MAX_AGE -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_MAX_DOCS -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_RETENTION_PERIOD -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERT_HISTORY_ROLLOVER_PERIOD -import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_ENABLED -import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_INDEX_MAX_AGE -import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_MAX_DOCS -import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_RETENTION_PERIOD -import org.opensearch.alerting.settings.AlertingSettings.Companion.FINDING_HISTORY_ROLLOVER_PERIOD -import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.IndexUtils -import org.opensearch.client.Client -import org.opensearch.cluster.ClusterChangedEvent -import org.opensearch.cluster.ClusterStateListener -import org.opensearch.cluster.metadata.IndexMetadata -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.core.action.ActionListener -import org.opensearch.threadpool.Scheduler.Cancellable -import org.opensearch.threadpool.ThreadPool -import java.time.Instant - -/** - * Class to manage the creation and rollover of alert indices and alert history indices. In progress alerts are stored - * in [ALERT_INDEX]. Completed alerts are written to [ALERT_HISTORY_WRITE_INDEX] which is an alias that points at the - * current index to which completed alerts are written. [ALERT_HISTORY_WRITE_INDEX] is periodically rolled over to a new - * date based index. The frequency of rolling over indices is controlled by the `opendistro.alerting.alert_rollover_period` setting. - * - * These indexes are created when first used and are then rolled over every `alert_rollover_period`. The rollover is - * initiated on the cluster manager node to ensure only a single node tries to roll it over. Once we have a curator functionality - * in Scheduled Jobs we can migrate to using that to rollover the index. - */ -// TODO: reafactor to make a generic version of this class for finding and alerts -class AlertIndices( - settings: Settings, - private val client: Client, - private val threadPool: ThreadPool, - private val clusterService: ClusterService -) : ClusterStateListener { - - init { - clusterService.addListener(this) - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ENABLED) { alertHistoryEnabled = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_MAX_DOCS) { alertHistoryMaxDocs = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_INDEX_MAX_AGE) { alertHistoryMaxAge = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_ROLLOVER_PERIOD) { - alertHistoryRolloverPeriod = it - rescheduleAlertRollover() - } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERT_HISTORY_RETENTION_PERIOD) { - alertHistoryRetentionPeriod = it - } - clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } - - clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_ENABLED) { findingHistoryEnabled = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_MAX_DOCS) { findingHistoryMaxDocs = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_INDEX_MAX_AGE) { findingHistoryMaxAge = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_ROLLOVER_PERIOD) { - findingHistoryRolloverPeriod = it - rescheduleFindingRollover() - } - clusterService.clusterSettings.addSettingsUpdateConsumer(FINDING_HISTORY_RETENTION_PERIOD) { - findingHistoryRetentionPeriod = it - } - } - - companion object { - - /** The in progress alert history index. */ - const val ALERT_INDEX = ".opendistro-alerting-alerts" - - /** The alias of the index in which to write alert history */ - const val ALERT_HISTORY_WRITE_INDEX = ".opendistro-alerting-alert-history-write" - - /** The alias of the index in which to write alert finding */ - const val FINDING_HISTORY_WRITE_INDEX = ".opensearch-alerting-finding-history-write" - - /** The index name pattern referring to all alert history indices */ - const val ALERT_HISTORY_ALL = ".opendistro-alerting-alert-history*" - - /** The index name pattern referring to all alert history indices */ - const val FINDING_HISTORY_ALL = ".opensearch-alerting-finding-history*" - - /** The index name pattern to create alert history indices */ - const val ALERT_HISTORY_INDEX_PATTERN = "<.opendistro-alerting-alert-history-{now/d}-1>" - - /** The index name pattern to create finding history indices */ - const val FINDING_HISTORY_INDEX_PATTERN = "<.opensearch-alerting-finding-history-{now/d}-1>" - - /** The index name pattern to query all alerts, history and current alerts. */ - const val ALL_ALERT_INDEX_PATTERN = ".opendistro-alerting-alert*" - - /** The index name pattern to query all findings, history and current findings. */ - const val ALL_FINDING_INDEX_PATTERN = ".opensearch-alerting-finding*" - - @JvmStatic - fun alertMapping() = - AlertIndices::class.java.getResource("alert_mapping.json").readText() - - @JvmStatic - fun findingMapping() = - AlertIndices::class.java.getResource("finding_mapping.json").readText() - - private val logger = LogManager.getLogger(AlertIndices::class.java) - } - - @Volatile private var alertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) - - @Volatile private var findingHistoryEnabled = AlertingSettings.FINDING_HISTORY_ENABLED.get(settings) - - @Volatile private var alertHistoryMaxDocs = AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings) - - @Volatile private var findingHistoryMaxDocs = AlertingSettings.FINDING_HISTORY_MAX_DOCS.get(settings) - - @Volatile private var alertHistoryMaxAge = AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings) - - @Volatile private var findingHistoryMaxAge = AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.get(settings) - - @Volatile private var alertHistoryRolloverPeriod = AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings) - - @Volatile private var findingHistoryRolloverPeriod = AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.get(settings) - - @Volatile private var alertHistoryRetentionPeriod = AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.get(settings) - - @Volatile private var findingHistoryRetentionPeriod = AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD.get(settings) - - @Volatile private var requestTimeout = AlertingSettings.REQUEST_TIMEOUT.get(settings) - - @Volatile private var isClusterManager = false - - // for JobsMonitor to report - var lastRolloverTime: TimeValue? = null - - private var alertHistoryIndexInitialized: Boolean = false - - private var findingHistoryIndexInitialized: Boolean = false - - private var alertIndexInitialized: Boolean = false - - private var scheduledAlertRollover: Cancellable? = null - - private var scheduledFindingRollover: Cancellable? = null - - fun onMaster() { - try { - // try to rollover immediately as we might be restarting the cluster - rolloverAlertHistoryIndex() - rolloverFindingHistoryIndex() - // schedule the next rollover for approx MAX_AGE later - scheduledAlertRollover = threadPool - .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) - scheduledFindingRollover = threadPool - .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) - } catch (e: Exception) { - // This should be run on cluster startup - logger.error( - "Error creating alert/finding indices. " + - "Alerts/Findings can't be recorded until master node is restarted.", - e - ) - } - } - - fun offMaster() { - scheduledAlertRollover?.cancel() - scheduledFindingRollover?.cancel() - } - - private fun executorName(): String { - return ThreadPool.Names.MANAGEMENT - } - - override fun clusterChanged(event: ClusterChangedEvent) { - // Instead of using a LocalNodeClusterManagerListener to track master changes, this service will - // track them here to avoid conditions where master listener events run after other - // listeners that depend on what happened in the master listener - if (this.isClusterManager != event.localNodeClusterManager()) { - this.isClusterManager = event.localNodeClusterManager() - if (this.isClusterManager) { - onMaster() - } else { - offMaster() - } - } - - // if the indexes have been deleted they need to be reinitialized - alertIndexInitialized = event.state().routingTable().hasIndex(ALERT_INDEX) - alertHistoryIndexInitialized = event.state().metadata().hasAlias(ALERT_HISTORY_WRITE_INDEX) - findingHistoryIndexInitialized = event.state().metadata().hasAlias(FINDING_HISTORY_WRITE_INDEX) - } - - private fun rescheduleAlertRollover() { - if (clusterService.state().nodes.isLocalNodeElectedMaster) { - scheduledAlertRollover?.cancel() - scheduledAlertRollover = threadPool - .scheduleWithFixedDelay({ rolloverAndDeleteAlertHistoryIndices() }, alertHistoryRolloverPeriod, executorName()) - } - } - - private fun rescheduleFindingRollover() { - if (clusterService.state().nodes.isLocalNodeElectedMaster) { - scheduledFindingRollover?.cancel() - scheduledFindingRollover = threadPool - .scheduleWithFixedDelay({ rolloverAndDeleteFindingHistoryIndices() }, findingHistoryRolloverPeriod, executorName()) - } - } - - fun isAlertInitialized(): Boolean { - return alertIndexInitialized && alertHistoryIndexInitialized - } - - fun isAlertInitialized(dataSources: DataSources): Boolean { - val alertsIndex = dataSources.alertsIndex - val alertsHistoryIndex = dataSources.alertsHistoryIndex - if (alertsIndex == ALERT_INDEX && alertsHistoryIndex == ALERT_HISTORY_WRITE_INDEX) { - return alertIndexInitialized && alertHistoryIndexInitialized - } - if ( - clusterService.state().metadata.indices.containsKey(alertsIndex) && - clusterService.state().metadata.hasAlias(alertsHistoryIndex) - ) { - return true - } - return false - } - - fun isAlertHistoryEnabled(): Boolean { - return alertHistoryEnabled - } - - fun isFindingHistoryEnabled(): Boolean = findingHistoryEnabled - - suspend fun createOrUpdateAlertIndex() { - if (!alertIndexInitialized) { - alertIndexInitialized = createIndex(ALERT_INDEX, alertMapping()) - if (alertIndexInitialized) IndexUtils.alertIndexUpdated() - } else { - if (!IndexUtils.alertIndexUpdated) updateIndexMapping(ALERT_INDEX, alertMapping()) - } - alertIndexInitialized - } - suspend fun createOrUpdateAlertIndex(dataSources: DataSources) { - if (dataSources.alertsIndex == ALERT_INDEX) { - return createOrUpdateAlertIndex() - } - val alertsIndex = dataSources.alertsIndex - if (!clusterService.state().routingTable().hasIndex(alertsIndex)) { - alertIndexInitialized = createIndex(alertsIndex!!, alertMapping()) - } else { - updateIndexMapping(alertsIndex!!, alertMapping()) - } - } - - suspend fun createOrUpdateInitialAlertHistoryIndex(dataSources: DataSources) { - if (dataSources.alertsIndex == ALERT_INDEX) { - return createOrUpdateInitialAlertHistoryIndex() - } - if (!clusterService.state().metadata.hasAlias(dataSources.alertsHistoryIndex)) { - createIndex( - dataSources.alertsHistoryIndexPattern ?: ALERT_HISTORY_INDEX_PATTERN, - alertMapping(), - dataSources.alertsHistoryIndex - ) - } else { - updateIndexMapping( - dataSources.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX, - alertMapping(), - true - ) - } - } - suspend fun createOrUpdateInitialAlertHistoryIndex() { - if (!alertHistoryIndexInitialized) { - alertHistoryIndexInitialized = createIndex(ALERT_HISTORY_INDEX_PATTERN, alertMapping(), ALERT_HISTORY_WRITE_INDEX) - if (alertHistoryIndexInitialized) { - IndexUtils.lastUpdatedAlertHistoryIndex = IndexUtils.getIndexNameWithAlias( - clusterService.state(), - ALERT_HISTORY_WRITE_INDEX - ) - } - } else { - updateIndexMapping(ALERT_HISTORY_WRITE_INDEX, alertMapping(), true) - } - alertHistoryIndexInitialized - } - - suspend fun createOrUpdateInitialFindingHistoryIndex() { - if (!findingHistoryIndexInitialized) { - findingHistoryIndexInitialized = createIndex(FINDING_HISTORY_INDEX_PATTERN, findingMapping(), FINDING_HISTORY_WRITE_INDEX) - if (findingHistoryIndexInitialized) { - IndexUtils.lastUpdatedFindingHistoryIndex = IndexUtils.getIndexNameWithAlias( - clusterService.state(), - FINDING_HISTORY_WRITE_INDEX - ) - } - } else { - updateIndexMapping(FINDING_HISTORY_WRITE_INDEX, findingMapping(), true) - } - findingHistoryIndexInitialized - } - - suspend fun createOrUpdateInitialFindingHistoryIndex(dataSources: DataSources) { - if (dataSources.findingsIndex == FINDING_HISTORY_WRITE_INDEX) { - return createOrUpdateInitialFindingHistoryIndex() - } - val findingsIndex = dataSources.findingsIndex - val findingsIndexPattern = dataSources.findingsIndexPattern ?: FINDING_HISTORY_INDEX_PATTERN - if (!clusterService.state().metadata().hasAlias(findingsIndex)) { - createIndex( - findingsIndexPattern, - findingMapping(), - findingsIndex - ) - } else { - updateIndexMapping(findingsIndex, findingMapping(), true) - } - } - - private suspend fun createIndex(index: String, schemaMapping: String, alias: String? = null): Boolean { - // This should be a fast check of local cluster state. Should be exceedingly rare that the local cluster - // state does not contain the index and multiple nodes concurrently try to create the index. - // If it does happen that error is handled we catch the ResourceAlreadyExistsException - val existsResponse: IndicesExistsResponse = client.admin().indices().suspendUntil { - exists(IndicesExistsRequest(index).local(true), it) - } - if (existsResponse.isExists) return true - - val request = CreateIndexRequest(index) - .mapping(schemaMapping) - .settings(Settings.builder().put("index.hidden", true).build()) - - if (alias != null) request.alias(Alias(alias)) - return try { - val createIndexResponse: CreateIndexResponse = client.admin().indices().suspendUntil { create(request, it) } - createIndexResponse.isAcknowledged - } catch (t: Exception) { - if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { - true - } else { - throw AlertingException.wrap(t) - } - } - } - - private suspend fun updateIndexMapping(index: String, mapping: String, alias: Boolean = false) { - val clusterState = clusterService.state() - var targetIndex = index - if (alias) { - targetIndex = IndexUtils.getIndexNameWithAlias(clusterState, index) - } - - // TODO call getMapping and compare actual mappings here instead of this - if (targetIndex == IndexUtils.lastUpdatedAlertHistoryIndex || targetIndex == IndexUtils.lastUpdatedFindingHistoryIndex) { - return - } - - val putMappingRequest: PutMappingRequest = PutMappingRequest(targetIndex) - .source(mapping, XContentType.JSON) - val updateResponse: AcknowledgedResponse = client.admin().indices().suspendUntil { putMapping(putMappingRequest, it) } - if (updateResponse.isAcknowledged) { - logger.info("Index mapping of $targetIndex is updated") - setIndexUpdateFlag(index, targetIndex) - } else { - logger.info("Failed to update index mapping of $targetIndex") - } - } - - private fun setIndexUpdateFlag(index: String, targetIndex: String) { - when (index) { - ALERT_INDEX -> IndexUtils.alertIndexUpdated() - ALERT_HISTORY_WRITE_INDEX -> IndexUtils.lastUpdatedAlertHistoryIndex = targetIndex - FINDING_HISTORY_WRITE_INDEX -> IndexUtils.lastUpdatedFindingHistoryIndex = targetIndex - } - } - - private fun rolloverAndDeleteAlertHistoryIndices() { - if (alertHistoryEnabled) rolloverAlertHistoryIndex() - deleteOldIndices("History", ALERT_HISTORY_ALL) - } - - private fun rolloverAndDeleteFindingHistoryIndices() { - if (findingHistoryEnabled) rolloverFindingHistoryIndex() - deleteOldIndices("Finding", FINDING_HISTORY_ALL) - } - - private fun rolloverIndex( - initialized: Boolean, - index: String, - pattern: String, - map: String, - docsCondition: Long, - ageCondition: TimeValue, - writeIndex: String - ) { - if (!initialized) { - return - } - - // We have to pass null for newIndexName in order to get Elastic to increment the index count. - val request = RolloverRequest(index, null) - request.createIndexRequest.index(pattern) - .mapping(map) - .settings(Settings.builder().put("index.hidden", true).build()) - request.addMaxIndexDocsCondition(docsCondition) - request.addMaxIndexAgeCondition(ageCondition) - client.admin().indices().rolloverIndex( - request, - object : ActionListener { - override fun onResponse(response: RolloverResponse) { - if (!response.isRolledOver) { - logger.info("$writeIndex not rolled over. Conditions were: ${response.conditionStatus}") - } else { - lastRolloverTime = TimeValue.timeValueMillis(threadPool.absoluteTimeInMillis()) - } - } - override fun onFailure(e: Exception) { - logger.error("$writeIndex not roll over failed.") - } - } - ) - } - - private fun rolloverAlertHistoryIndex() { - rolloverIndex( - alertHistoryIndexInitialized, - ALERT_HISTORY_WRITE_INDEX, - ALERT_HISTORY_INDEX_PATTERN, - alertMapping(), - alertHistoryMaxDocs, - alertHistoryMaxAge, - ALERT_HISTORY_WRITE_INDEX - ) - } - - private fun rolloverFindingHistoryIndex() { - rolloverIndex( - findingHistoryIndexInitialized, - FINDING_HISTORY_WRITE_INDEX, - FINDING_HISTORY_INDEX_PATTERN, - findingMapping(), - findingHistoryMaxDocs, - findingHistoryMaxAge, - FINDING_HISTORY_WRITE_INDEX - ) - } - - private fun deleteOldIndices(tag: String, indices: String) { - logger.error("info deleteOldIndices") - val clusterStateRequest = ClusterStateRequest() - .clear() - .indices(indices) - .metadata(true) - .local(true) - .indicesOptions(IndicesOptions.strictExpand()) - client.admin().cluster().state( - clusterStateRequest, - object : ActionListener { - override fun onResponse(clusterStateResponse: ClusterStateResponse) { - if (clusterStateResponse.state.metadata.indices.isNotEmpty()) { - val indicesToDelete = getIndicesToDelete(clusterStateResponse) - logger.info("Deleting old $tag indices viz $indicesToDelete") - deleteAllOldHistoryIndices(indicesToDelete) - } else { - logger.info("No Old $tag Indices to delete") - } - } - override fun onFailure(e: Exception) { - logger.error("Error fetching cluster state") - } - } - ) - } - - private fun getIndicesToDelete(clusterStateResponse: ClusterStateResponse): List { - val indicesToDelete = mutableListOf() - for (entry in clusterStateResponse.state.metadata.indices) { - val indexMetaData = entry.value - getHistoryIndexToDelete(indexMetaData, alertHistoryRetentionPeriod.millis, ALERT_HISTORY_WRITE_INDEX, alertHistoryEnabled) - ?.let { indicesToDelete.add(it) } - getHistoryIndexToDelete(indexMetaData, findingHistoryRetentionPeriod.millis, FINDING_HISTORY_WRITE_INDEX, findingHistoryEnabled) - ?.let { indicesToDelete.add(it) } - } - return indicesToDelete - } - - private fun getHistoryIndexToDelete( - indexMetadata: IndexMetadata, - retentionPeriodMillis: Long, - writeIndex: String, - historyEnabled: Boolean - ): String? { - val creationTime = indexMetadata.creationDate - if ((Instant.now().toEpochMilli() - creationTime) > retentionPeriodMillis) { - val alias = indexMetadata.aliases.entries.firstOrNull { writeIndex == it.value.alias } - if (alias != null) { - if (historyEnabled) { - // If the index has the write alias and history is enabled, don't delete the index - return null - } else if (writeIndex == ALERT_HISTORY_WRITE_INDEX) { - // Otherwise reset alertHistoryIndexInitialized since index will be deleted - alertHistoryIndexInitialized = false - } else if (writeIndex == FINDING_HISTORY_WRITE_INDEX) { - // Otherwise reset findingHistoryIndexInitialized since index will be deleted - findingHistoryIndexInitialized = false - } - } - - return indexMetadata.index.name - } - return null - } - - private fun deleteAllOldHistoryIndices(indicesToDelete: List) { - if (indicesToDelete.isNotEmpty()) { - val deleteIndexRequest = DeleteIndexRequest(*indicesToDelete.toTypedArray()) - client.admin().indices().delete( - deleteIndexRequest, - object : ActionListener { - override fun onResponse(deleteIndicesResponse: AcknowledgedResponse) { - if (!deleteIndicesResponse.isAcknowledged) { - logger.error( - "Could not delete one or more Alerting/Finding history indices: $indicesToDelete. Retrying one by one." - ) - deleteOldHistoryIndex(indicesToDelete) - } - } - override fun onFailure(e: Exception) { - logger.error("Delete for Alerting/Finding History Indices $indicesToDelete Failed. Retrying one By one.") - deleteOldHistoryIndex(indicesToDelete) - } - } - ) - } - } - - private fun deleteOldHistoryIndex(indicesToDelete: List) { - for (index in indicesToDelete) { - val singleDeleteRequest = DeleteIndexRequest(*indicesToDelete.toTypedArray()) - client.admin().indices().delete( - singleDeleteRequest, - object : ActionListener { - override fun onResponse(acknowledgedResponse: AcknowledgedResponse?) { - if (acknowledgedResponse != null) { - if (!acknowledgedResponse.isAcknowledged) { - logger.error("Could not delete one or more Alerting/Finding history indices: $index") - } - } - } - override fun onFailure(e: Exception) { - logger.debug("Exception ${e.message} while deleting the index $index") - } - } - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt b/alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt deleted file mode 100644 index 07b1a3a91..000000000 --- a/alerting/bin/main/org/opensearch/alerting/alerts/AlertMover.kt +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.alerts - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.MonitorRunnerExecutionContext -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_HISTORY_WRITE_INDEX -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALERT_INDEX -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.util.ScheduledJobUtils -import org.opensearch.client.Client -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.VersionType -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder - -private val log = LogManager.getLogger(AlertMover::class.java) - -class AlertMover { - companion object { - /** - * Moves defunct active alerts to the alert history index when the corresponding monitor or trigger is deleted. - * - * The logic for moving alerts consists of: - * 1. Find active alerts: - * a. matching monitorId if no monitor is provided (postDelete) - * b. matching monitorId and no triggerIds if monitor is provided (postIndex) - * 2. Move alerts over to DataSources.alertsHistoryIndex as DELETED - * 3. Delete alerts from monitor's DataSources.alertsIndex - * 4. Schedule a retry if there were any failures - */ - suspend fun moveAlerts(client: Client, monitorId: String, monitor: Monitor?) { - var alertIndex = monitor?.dataSources?.alertsIndex ?: ALERT_INDEX - var alertHistoryIndex = monitor?.dataSources?.alertsHistoryIndex ?: ALERT_HISTORY_WRITE_INDEX - - val boolQuery = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, monitorId)) - - if (monitor != null) { - boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, monitor.triggers.map { it.id })) - } - - val activeAlertsQuery = SearchSourceBuilder.searchSource() - .query(boolQuery) - .version(true) - - val activeAlertsRequest = SearchRequest(alertIndex) - .routing(monitorId) - .source(activeAlertsQuery) - val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } - - // If no alerts are found, simply return - if (response.hits.totalHits?.value == 0L) return - val indexRequests = response.hits.map { hit -> - IndexRequest(alertHistoryIndex) - .routing(monitorId) - .source( - Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) - .copy(state = Alert.State.DELETED) - .toXContentWithUser(XContentFactory.jsonBuilder()) - ) - .version(hit.version) - .versionType(VersionType.EXTERNAL_GTE) - .id(hit.id) - } - val copyRequest = BulkRequest().add(indexRequests) - val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } - - val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { - DeleteRequest(alertIndex, it.id) - .routing(monitorId) - .version(it.version) - .versionType(VersionType.EXTERNAL_GTE) - } - val deleteResponse: BulkResponse = client.suspendUntil { bulk(BulkRequest().add(deleteRequests), it) } - - if (copyResponse.hasFailures()) { - val retryCause = copyResponse.items.filter { it.isFailed } - .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } - ?.failure?.cause - throw RuntimeException( - "Failed to copy alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + - copyResponse.buildFailureMessage(), - retryCause - ) - } - if (deleteResponse.hasFailures()) { - val retryCause = deleteResponse.items.filter { it.isFailed } - .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } - ?.failure?.cause - throw RuntimeException( - "Failed to delete alerts for [$monitorId, ${monitor?.triggers?.map { it.id }}]: " + - deleteResponse.buildFailureMessage(), - retryCause - ) - } - } - - private fun alertContentParser(bytesReference: BytesReference): XContentParser { - val xcp = XContentHelper.createParser( - NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, - bytesReference, XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - return xcp - } - - /** - * Moves defunct active alerts to the alert history index when the corresponding workflow or trigger is deleted. - * - * The logic for moving alerts consists of: - * 1. Find active alerts: - * a. matching workflowId if no workflow is provided (postDelete) - * b. matching workflowid and chained alert triggerIds if monitor is provided (postIndex) - * 2. Move alerts over to DataSources.alertsHistoryIndex as DELETED - * 3. Delete alerts from monitor's DataSources.alertsIndex - * 4. Schedule a retry if there were any failures - */ - suspend fun moveAlerts(client: Client, workflowId: String, workflow: Workflow?, monitorCtx: MonitorRunnerExecutionContext) { - var alertIndex = ALERT_INDEX - var alertHistoryIndex = ALERT_HISTORY_WRITE_INDEX - if (workflow != null) { - if ( - workflow.inputs.isNotEmpty() && workflow.inputs[0] is CompositeInput && - (workflow.inputs[0] as CompositeInput).sequence.delegates.isNotEmpty() - ) { - var i = 0 - val delegates = (workflow.inputs[0] as CompositeInput).sequence.delegates - try { - var getResponse: GetResponse? = null - while (i < delegates.size && (getResponse == null || getResponse.isExists == false)) { - getResponse = - client.suspendUntil { - client.get( - GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, delegates[i].monitorId), - it - ) - } - if (getResponse!!.isExists) { - val monitor = - ScheduledJobUtils.parseMonitorFromScheduledJobDocSource( - monitorCtx.xContentRegistry!!, - response = getResponse - ) - - alertIndex = monitor.dataSources.alertsIndex - alertHistoryIndex = - if (monitor.dataSources.alertsHistoryIndex == null) alertHistoryIndex - else monitor.dataSources.alertsHistoryIndex!! - } - i++ - } - } catch (e: Exception) { - log.error("Failed to get delegate monitor for workflow $workflowId. Assuming default alert indices", e) - } - } - } - val dataSources = DataSources().copy(alertsHistoryIndex = alertHistoryIndex, alertsIndex = alertIndex) - /** check if alert index is initialized **/ - if (monitorCtx.alertIndices!!.isAlertInitialized(dataSources) == false) - return - val boolQuery = QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(Alert.WORKFLOW_ID_FIELD, workflowId)) - - if (workflow != null) { - boolQuery.mustNot(QueryBuilders.termsQuery(Alert.TRIGGER_ID_FIELD, workflow.triggers.map { it.id })) - } - - val activeAlertsQuery = SearchSourceBuilder.searchSource() - .query(boolQuery) - .version(true) - - val activeAlertsRequest = SearchRequest(alertIndex) - .routing(workflowId) - .source(activeAlertsQuery) - val response: SearchResponse = client.suspendUntil { search(activeAlertsRequest, it) } - - // If no alerts are found, simply return - if (response.hits.totalHits?.value == 0L) return - val indexRequests = response.hits.map { hit -> - IndexRequest(alertHistoryIndex) - .routing(workflowId) - .source( - Alert.parse(alertContentParser(hit.sourceRef), hit.id, hit.version) - .copy(state = Alert.State.DELETED) - .toXContentWithUser(XContentFactory.jsonBuilder()) - ) - .version(hit.version) - .versionType(VersionType.EXTERNAL_GTE) - .id(hit.id) - } - val copyRequest = BulkRequest().add(indexRequests) - val copyResponse: BulkResponse = client.suspendUntil { bulk(copyRequest, it) } - - val deleteRequests = copyResponse.items.filterNot { it.isFailed }.map { - DeleteRequest(alertIndex, it.id) - .routing(workflowId) - .version(it.version) - .versionType(VersionType.EXTERNAL_GTE) - } - val deleteResponse: BulkResponse = client.suspendUntil { bulk(BulkRequest().add(deleteRequests), it) } - - if (copyResponse.hasFailures()) { - val retryCause = copyResponse.items.filter { it.isFailed } - .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } - ?.failure?.cause - throw RuntimeException( - "Failed to copy alerts for [$workflowId, ${workflow?.triggers?.map { it.id }}]: " + - copyResponse.buildFailureMessage(), - retryCause - ) - } - if (deleteResponse.hasFailures()) { - val retryCause = deleteResponse.items.filter { it.isFailed } - .firstOrNull { it.status() == RestStatus.TOO_MANY_REQUESTS } - ?.failure?.cause - throw RuntimeException( - "Failed to delete alerts for [$workflowId, ${workflow?.triggers?.map { it.id }}]: " + - deleteResponse.buildFailureMessage(), - retryCause - ) - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json b/alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json deleted file mode 100644 index 53fb5b0a2..000000000 --- a/alerting/bin/main/org/opensearch/alerting/alerts/alert_mapping.json +++ /dev/null @@ -1,174 +0,0 @@ -{ - "dynamic": "strict", - "_routing": { - "required": true - }, - "_meta" : { - "schema_version": 5 - }, - "properties": { - "schema_version": { - "type": "integer" - }, - "monitor_id": { - "type": "keyword" - }, - "monitor_version": { - "type": "long" - }, - "id": { - "type": "keyword" - }, - "version": { - "type": "long" - }, - "severity": { - "type": "keyword" - }, - "monitor_name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "monitor_user": { - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "backend_roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "custom_attribute_names": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "execution_id": { - "type": "keyword" - }, - "workflow_id": { - "type": "keyword" - }, - "workflow_name": { - "type": "keyword" - }, - "trigger_id": { - "type": "keyword" - }, - "trigger_name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "finding_ids": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "associated_alert_ids": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "related_doc_ids": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "state": { - "type": "keyword" - }, - "start_time": { - "type": "date" - }, - "last_notification_time": { - "type": "date" - }, - "acknowledged_time": { - "type": "date" - }, - "end_time": { - "type": "date" - }, - "error_message": { - "type": "text" - }, - "alert_history": { - "type": "nested", - "properties": { - "timestamp": { - "type": "date" - }, - "message": { - "type": "text" - } - } - }, - "action_execution_results": { - "type": "nested", - "properties": { - "action_id": { - "type": "keyword" - }, - "last_execution_time": { - "type": "date" - }, - "throttled_count": { - "type": "integer" - } - } - }, - "agg_alert_content": { - "dynamic": true, - "properties": { - "parent_bucket_path": { - "type": "text" - }, - "bucket_key": { - "type": "text" - } - } - } - } -} \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json b/alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json deleted file mode 100644 index d2ecc0907..000000000 --- a/alerting/bin/main/org/opensearch/alerting/alerts/finding_mapping.json +++ /dev/null @@ -1,71 +0,0 @@ -{ - "dynamic": "strict", - "_meta" : { - "schema_version": 4 - }, - "properties": { - "schema_version": { - "type": "integer" - }, - "related_doc_ids": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "monitor_id": { - "type": "keyword" - }, - "monitor_name": { - "type": "keyword" - }, - "id": { - "type": "keyword" - }, - "index": { - "type": "keyword" - }, - "queries" : { - "type": "nested", - "properties": { - "id": { - "type": "keyword" - }, - "name": { - "type": "keyword" - }, - "query": { - "type": "text" - }, - "tags": { - "type": "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "fields": { - "type": "text" - } - } - }, - "timestamp": { - "type": "long" - }, - "correlated_doc_ids": { - "type" : "text", - "analyzer": "whitespace", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "execution_id": { - "type": "keyword" - } - } -} \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt deleted file mode 100644 index 999b9a977..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionParser.kt +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.parsers - -import org.opensearch.alerting.chainedAlertCondition.resolvers.ChainedAlertRPNResolver -import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator - -/** - * The postfix (Reverse Polish Notation) parser. - * Uses the Shunting-yard algorithm to parse a mathematical expression - * @param triggerExpression String containing the trigger expression for the monitor - */ -class ChainedAlertExpressionParser( - triggerExpression: String -) : ChainedAlertExpressionRPNBaseParser(triggerExpression) { - - override fun parse(): ChainedAlertRPNResolver { - val expression = expressionToParse.replace(" ", "") - - val splitters = ArrayList() - CAExpressionOperator.values().forEach { splitters.add(it.value) } - - val breaks = ArrayList().apply { add(expression) } - for (s in splitters) { - val a = ArrayList() - for (ind in 0 until breaks.size) { - breaks[ind].let { - if (it.length > 1) { - a.addAll(breakString(breaks[ind], s)) - } else a.add(it) - } - } - breaks.clear() - breaks.addAll(a) - } - - return ChainedAlertRPNResolver(convertInfixToPostfix(breaks)) - } - - private fun breakString(input: String, delimeter: String): ArrayList { - val tokens = input.split(delimeter) - val array = ArrayList() - for (t in tokens) { - array.add(t) - array.add(delimeter) - } - array.removeAt(array.size - 1) - return array - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt deleted file mode 100644 index ff3c29db7..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ChainedAlertExpressionRPNBaseParser.kt +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.parsers - -import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator -import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionToken -import org.opensearch.alerting.chainedAlertCondition.tokens.ChainedAlertExpressionConstant -import org.opensearch.alerting.chainedAlertCondition.tokens.ExpressionToken -import java.util.Stack - -/** - * This is the abstract base class which holds the trigger expression parsing logic; - * using the Infix to Postfix a.k.a. Reverse Polish Notation (RPN) parser. - * It also uses the Shunting-Yard algorithm to parse the given trigger expression. - * - * @param expressionToParse Complete string containing the trigger expression - */ -abstract class ChainedAlertExpressionRPNBaseParser( - protected val expressionToParse: String -) : ExpressionParser { - /** - * To perform the Infix-to-postfix conversion of the trigger expression - */ - protected fun convertInfixToPostfix(expTokens: List): ArrayList { - val expTokenStack = Stack() - val outputExpTokens = ArrayList() - - for (tokenString in expTokens) { - if (tokenString.isEmpty()) continue - when (val expToken = assignToken(tokenString)) { - is CAExpressionToken -> outputExpTokens.add(expToken) - is CAExpressionOperator -> { - when (expToken) { - CAExpressionOperator.PAR_LEFT -> expTokenStack.push(expToken) - CAExpressionOperator.PAR_RIGHT -> { - var topExpToken = expTokenStack.popExpTokenOrNull() - while (topExpToken != null && topExpToken != CAExpressionOperator.PAR_LEFT) { - outputExpTokens.add(topExpToken) - topExpToken = expTokenStack.popExpTokenOrNull() - } - if (topExpToken != CAExpressionOperator.PAR_LEFT) - throw java.lang.IllegalArgumentException("No matching left parenthesis.") - } - else -> { - var op2 = expTokenStack.peekExpTokenOrNull() - while (op2 != null) { - val c = expToken.precedence.compareTo(op2.precedence) - if (c < 0 || !expToken.rightAssociative && c <= 0) { - outputExpTokens.add(expTokenStack.pop()) - } else { - break - } - op2 = expTokenStack.peekExpTokenOrNull() - } - expTokenStack.push(expToken) - } - } - } - } - } - - while (!expTokenStack.isEmpty()) { - expTokenStack.peekExpTokenOrNull()?.let { - if (it == CAExpressionOperator.PAR_LEFT) - throw java.lang.IllegalArgumentException("No matching right parenthesis.") - } - val top = expTokenStack.pop() - outputExpTokens.add(top) - } - - return outputExpTokens - } - - /** - * Looks up and maps the expression token that matches the string version of that expression unit - */ - private fun assignToken(tokenString: String): ExpressionToken { - - // Check "query" string in trigger expression such as in 'query[name="abc"]' - if (tokenString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) - return CAExpressionToken(tokenString) - - // Check operators in trigger expression such as in [&&, ||, !] - for (op in CAExpressionOperator.values()) { - if (op.value == tokenString) return op - } - - // Check any constants in trigger expression such as in ["name, "id", "tag", [", "]", "="] - for (con in ChainedAlertExpressionConstant.ConstantType.values()) { - if (tokenString == con.ident) return ChainedAlertExpressionConstant(con) - } - - throw IllegalArgumentException("Error while processing the trigger expression '$tokenString'") - } - - private inline fun Stack.popExpTokenOrNull(): T? { - return try { - pop() as T - } catch (e: java.lang.Exception) { - null - } - } - - private inline fun Stack.peekExpTokenOrNull(): T? { - return try { - peek() as T - } catch (e: java.lang.Exception) { - null - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt deleted file mode 100644 index e2ece9d40..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/parsers/ExpressionParser.kt +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.parsers - -import org.opensearch.alerting.chainedAlertCondition.resolvers.ChainedAlertTriggerResolver - -interface ExpressionParser { - fun parse(): ChainedAlertTriggerResolver -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt deleted file mode 100644 index dfec9614f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertRPNResolver.kt +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.resolvers - -import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionOperator -import org.opensearch.alerting.chainedAlertCondition.tokens.CAExpressionToken -import org.opensearch.alerting.chainedAlertCondition.tokens.ChainedAlertExpressionConstant -import org.opensearch.alerting.chainedAlertCondition.tokens.ExpressionToken -import java.util.Stack - -/** - * Solves the Trigger Expression using the Reverse Polish Notation (RPN) based solver - * @param polishNotation an array of expression tokens organized in the RPN order - */ -class ChainedAlertRPNResolver( - private val polishNotation: ArrayList, -) : ChainedAlertTriggerResolver { - - private val eqString by lazy { - val stringBuilder = StringBuilder() - for (expToken in polishNotation) { - when (expToken) { - is CAExpressionToken -> stringBuilder.append(expToken.value) - is CAExpressionOperator -> stringBuilder.append(expToken.value) - is ChainedAlertExpressionConstant -> stringBuilder.append(expToken.type.ident) - else -> throw Exception() - } - stringBuilder.append(" ") - } - stringBuilder.toString() - } - - override fun toString(): String = eqString - - /** - * Evaluates the trigger expression expressed provided in form of the RPN token array. - * @param queryToDocIds Map to hold the resultant document id per query id - * @return evaluates the final set of document id - */ - override fun evaluate(alertGeneratingMonitors: Set): Boolean { - val tokenStack = Stack() - val res = true - for (expToken in polishNotation) { - when (expToken) { - is CAExpressionToken -> tokenStack.push(resolveMonitorExpression(expToken.value, alertGeneratingMonitors)) - is CAExpressionOperator -> { - val right = tokenStack.pop() - val expr = when (expToken) { - CAExpressionOperator.AND -> ChainedAlertTriggerExpression.And(tokenStack.pop(), right) - CAExpressionOperator.OR -> ChainedAlertTriggerExpression.Or(tokenStack.pop(), right) - CAExpressionOperator.NOT -> ChainedAlertTriggerExpression.Not(res, right) - else -> throw IllegalArgumentException("No matching operator.") - } - tokenStack.push(expr.resolve()) - } - } - } - return tokenStack.pop() - } - - override fun getMonitorIds(parsedTriggerCondition: ChainedAlertRPNResolver): Set { - val monitorIds = mutableSetOf() - for (expToken in polishNotation) { - when (expToken) { - is CAExpressionToken -> { - val monitorExpString = expToken.value - if (!monitorExpString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) - continue - val token = monitorExpString.substringAfter(ChainedAlertExpressionConstant.ConstantType.BRACKET_LEFT.ident) - .substringBefore(ChainedAlertExpressionConstant.ConstantType.BRACKET_RIGHT.ident) - if (token.isEmpty()) continue - val tokens = token.split(ChainedAlertExpressionConstant.ConstantType.EQUALS.ident) - if (tokens.isEmpty() || tokens.size != 2) continue - val identifier = tokens[0] - val value = tokens[1] - when (identifier) { - ChainedAlertExpressionConstant.ConstantType.ID.ident -> { - monitorIds.add(value) - } - } - } - is CAExpressionOperator -> { - continue - } - } - } - return monitorIds - } - - private fun resolveMonitorExpression(monitorExpString: String, alertGeneratingMonitors: Set): Boolean { - if (!monitorExpString.startsWith(ChainedAlertExpressionConstant.ConstantType.MONITOR.ident)) return false - val token = monitorExpString.substringAfter(ChainedAlertExpressionConstant.ConstantType.BRACKET_LEFT.ident) - .substringBefore(ChainedAlertExpressionConstant.ConstantType.BRACKET_RIGHT.ident) - if (token.isEmpty()) return false - - val tokens = token.split(ChainedAlertExpressionConstant.ConstantType.EQUALS.ident) - if (tokens.isEmpty() || tokens.size != 2) return false - - val identifier = tokens[0] - val value = tokens[1] - - return when (identifier) { - ChainedAlertExpressionConstant.ConstantType.ID.ident -> alertGeneratingMonitors.contains(value) - else -> false - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt deleted file mode 100644 index 4b373d853..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerExpression.kt +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.resolvers - -sealed class ChainedAlertTriggerExpression { - - fun resolve(): Boolean = when (this) { - is And -> resolveAnd(boolean1, boolean2) - is Or -> resolveOr(boolean1, boolean2) - is Not -> resolveNot(result, boolean2) - } - - private fun resolveAnd(boolean1: Boolean, boolean2: Boolean): Boolean { - return boolean1 && boolean2 - } - - private fun resolveOr(boolean1: Boolean, boolean2: Boolean): Boolean { - return boolean1 || boolean2 - } - - private fun resolveNot(result: Boolean, boolean2: Boolean): Boolean { - return result && !boolean2 - } - - // Operators implemented as operator functions - class And(val boolean1: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() - class Or(val boolean1: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() - class Not(val result: Boolean, val boolean2: Boolean) : ChainedAlertTriggerExpression() -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt deleted file mode 100644 index 6f2ff2de0..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/resolvers/ChainedAlertTriggerResolver.kt +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.resolvers - -interface ChainedAlertTriggerResolver { - fun getMonitorIds(parsedTriggerCondition: ChainedAlertRPNResolver): Set - fun evaluate(alertGeneratingMonitors: Set): Boolean -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt deleted file mode 100644 index 084b6aa70..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionOperator.kt +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.tokens - -/** - * To define all the operators used in the trigger expression - */ -enum class CAExpressionOperator(val value: String, val precedence: Int, val rightAssociative: Boolean) : ExpressionToken { - - AND("&&", 2, false), - OR("||", 2, false), - - NOT("!", 3, true), - - PAR_LEFT("(", 1, false), - PAR_RIGHT(")", 1, false) -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt deleted file mode 100644 index ddf439d3f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/CAExpressionToken.kt +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.tokens - -/** - * To define the tokens in Trigger expression such as monitor[id=“id1"] or monitor[id=“id2"] and monitor[id=“id3"] - */ -internal data class CAExpressionToken(val value: String) : ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt deleted file mode 100644 index 4b35bc4a8..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ChainedAlertExpressionConstant.kt +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.tokens - -/** - * To define all the tokens which could be part of expression constant such as query[id=new_id], query[name=new_name], - * query[tag=new_tag] - */ -class ChainedAlertExpressionConstant(val type: ConstantType) : ExpressionToken { - - enum class ConstantType(val ident: String) { - MONITOR("monitor"), - - ID("id"), - - BRACKET_LEFT("["), - BRACKET_RIGHT("]"), - - EQUALS("=") - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt deleted file mode 100644 index 38efed313..000000000 --- a/alerting/bin/main/org/opensearch/alerting/chainedAlertCondition/tokens/ExpressionToken.kt +++ /dev/null @@ -1,8 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition.tokens - -interface ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt b/alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt deleted file mode 100644 index 2e2b24b19..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/AlertingConfigAccessor.kt +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.withContext -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.client.Client -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.xcontent.NamedXContentRegistry - -/** - * This is an accessor class to retrieve documents/information from the Alerting config index. - */ -class AlertingConfigAccessor { - companion object { - - suspend fun getEmailAccountInfo(client: Client, xContentRegistry: NamedXContentRegistry, emailAccountId: String): EmailAccount { - val source = getAlertingConfigDocumentSource(client, "Email account", emailAccountId) - return withContext(Dispatchers.IO) { - val xcp = XContentHelper.createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source, XContentType.JSON) - val emailAccount = EmailAccount.parseWithType(xcp) - emailAccount - } - } - - suspend fun getEmailGroupInfo(client: Client, xContentRegistry: NamedXContentRegistry, emailGroupId: String): EmailGroup { - val source = getAlertingConfigDocumentSource(client, "Email group", emailGroupId) - return withContext(Dispatchers.IO) { - val xcp = XContentHelper.createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, source, XContentType.JSON) - val emailGroup = EmailGroup.parseWithType(xcp) - emailGroup - } - } - - private suspend fun getAlertingConfigDocumentSource( - client: Client, - type: String, - docId: String - ): BytesReference { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, docId).routing(docId) - val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } - if (!getResponse.isExists || getResponse.isSourceEmpty) { - throw IllegalStateException("$type document with id $docId not found or source is empty") - } - - val finalResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } - - return finalResponse.sourceAsBytesRef - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt deleted file mode 100644 index ffc302d98..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/BucketLevelTriggerRunResult.kt +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.commons.alerting.model.AggregationResultBucket -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException - -data class BucketLevelTriggerRunResult( - override var triggerName: String, - override var error: Exception? = null, - var aggregationResultBuckets: Map, - var actionResultsMap: MutableMap> = mutableMapOf() -) : TriggerRunResult(triggerName, error) { - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - constructor(sin: StreamInput) : this( - sin.readString(), - sin.readException() as Exception?, // error - sin.readMap(StreamInput::readString, ::AggregationResultBucket), - sin.readMap() as MutableMap> - ) - - override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder - .field(AGG_RESULT_BUCKETS, aggregationResultBuckets) - .field(ACTIONS_RESULTS, actionResultsMap as Map) - } - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - override fun writeTo(out: StreamOutput) { - super.writeTo(out) - out.writeMap(aggregationResultBuckets, StreamOutput::writeString) { - valueOut: StreamOutput, aggResultBucket: AggregationResultBucket -> - aggResultBucket.writeTo(valueOut) - } - out.writeMap(actionResultsMap as Map) - } - - companion object { - const val AGG_RESULT_BUCKETS = "agg_result_buckets" - const val ACTIONS_RESULTS = "action_results" - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): TriggerRunResult { - return BucketLevelTriggerRunResult(sin) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt deleted file mode 100644 index b95e533e9..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/ChainedAlertTriggerRunResult.kt +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.commons.alerting.alerts.AlertError -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.script.ScriptException -import java.io.IOException -import java.time.Instant - -data class ChainedAlertTriggerRunResult( - override var triggerName: String, - var triggered: Boolean, - override var error: Exception?, - var actionResults: MutableMap = mutableMapOf(), - val associatedAlertIds: Set, -) : TriggerRunResult(triggerName, error) { - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - constructor(sin: StreamInput) : this( - triggerName = sin.readString(), - error = sin.readException(), - triggered = sin.readBoolean(), - actionResults = sin.readMap() as MutableMap, - associatedAlertIds = sin.readStringList().toSet() - ) - - override fun alertError(): AlertError? { - if (error != null) { - return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") - } - for (actionResult in actionResults.values) { - if (actionResult.error != null) { - return AlertError(Instant.now(), "Failed running action:\n${actionResult.error.userErrorMessage()}") - } - } - return null - } - - override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) - return builder - .field("triggered", triggered) - .field("action_results", actionResults as Map) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - super.writeTo(out) - out.writeBoolean(triggered) - out.writeMap(actionResults as Map) - out.writeStringCollection(associatedAlertIds) - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): TriggerRunResult { - return ChainedAlertTriggerRunResult(sin) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt deleted file mode 100644 index 0caad1f4a..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/DocumentExecutionContext.kt +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.commons.alerting.model.DocLevelQuery - -data class DocumentExecutionContext( - val queries: List, - val lastRunContext: Map, - val updatedLastRunContext: Map -) diff --git a/alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt deleted file mode 100644 index 9d98aab42..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/DocumentLevelTriggerRunResult.kt +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.script.ScriptException -import java.io.IOException - -data class DocumentLevelTriggerRunResult( - override var triggerName: String, - var triggeredDocs: List, - override var error: Exception?, - var actionResultsMap: MutableMap> = mutableMapOf() -) : TriggerRunResult(triggerName, error) { - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - constructor(sin: StreamInput) : this( - triggerName = sin.readString(), - error = sin.readException(), - triggeredDocs = sin.readStringList(), - actionResultsMap = sin.readMap() as MutableMap> - ) - - override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) - return builder - .field("triggeredDocs", triggeredDocs as List) - .field("action_results", actionResultsMap as Map) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - super.writeTo(out) - out.writeStringCollection(triggeredDocs) - out.writeMap(actionResultsMap as Map) - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): TriggerRunResult { - return DocumentLevelTriggerRunResult(sin) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt b/alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt deleted file mode 100644 index d1c5c240e..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/MonitorMetadata.kt +++ /dev/null @@ -1,198 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.model.destination.Destination.Companion.NO_ID -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.util.instant -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.seqno.SequenceNumbers -import java.io.IOException -import java.time.Instant - -data class MonitorMetadata( - val id: String, - val seqNo: Long = SequenceNumbers.UNASSIGNED_SEQ_NO, - val primaryTerm: Long = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, - val monitorId: String, - val lastActionExecutionTimes: List, - val lastRunContext: Map, - // Maps (sourceIndex + monitorId) --> concreteQueryIndex - val sourceToQueryIndexMapping: MutableMap = mutableMapOf() -) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - id = sin.readString(), - seqNo = sin.readLong(), - primaryTerm = sin.readLong(), - monitorId = sin.readString(), - lastActionExecutionTimes = sin.readList(ActionExecutionTime::readFrom), - lastRunContext = Monitor.suppressWarning(sin.readMap()), - sourceToQueryIndexMapping = sin.readMap() as MutableMap - ) - - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(seqNo) - out.writeLong(primaryTerm) - out.writeString(monitorId) - out.writeCollection(lastActionExecutionTimes) - out.writeMap(lastRunContext) - out.writeMap(sourceToQueryIndexMapping as MutableMap) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - if (params.paramAsBoolean("with_type", false)) builder.startObject(METADATA) - builder.field(MONITOR_ID_FIELD, monitorId) - .field(LAST_ACTION_EXECUTION_FIELD, lastActionExecutionTimes.toTypedArray()) - if (lastRunContext.isNotEmpty()) builder.field(LAST_RUN_CONTEXT_FIELD, lastRunContext) - if (sourceToQueryIndexMapping.isNotEmpty()) { - builder.field(SOURCE_TO_QUERY_INDEX_MAP_FIELD, sourceToQueryIndexMapping as MutableMap) - } - if (params.paramAsBoolean("with_type", false)) builder.endObject() - return builder.endObject() - } - - companion object { - const val METADATA = "metadata" - const val MONITOR_ID_FIELD = "monitor_id" - const val LAST_ACTION_EXECUTION_FIELD = "last_action_execution_times" - const val LAST_RUN_CONTEXT_FIELD = "last_run_context" - const val SOURCE_TO_QUERY_INDEX_MAP_FIELD = "source_to_query_index_mapping" - - @JvmStatic - @JvmOverloads - @Throws(IOException::class) - fun parse( - xcp: XContentParser, - id: String = NO_ID, - seqNo: Long = SequenceNumbers.UNASSIGNED_SEQ_NO, - primaryTerm: Long = SequenceNumbers.UNASSIGNED_PRIMARY_TERM - ): MonitorMetadata { - lateinit var monitorId: String - val lastActionExecutionTimes = mutableListOf() - var lastRunContext: Map = mapOf() - var sourceToQueryIndexMapping: MutableMap = mutableMapOf() - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - MONITOR_ID_FIELD -> monitorId = xcp.text() - LAST_ACTION_EXECUTION_FIELD -> { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - lastActionExecutionTimes.add(ActionExecutionTime.parse(xcp)) - } - } - LAST_RUN_CONTEXT_FIELD -> lastRunContext = xcp.map() - SOURCE_TO_QUERY_INDEX_MAP_FIELD -> sourceToQueryIndexMapping = xcp.map() as MutableMap - } - } - - return MonitorMetadata( - if (id != NO_ID) id else "$monitorId-metadata", - seqNo = seqNo, - primaryTerm = primaryTerm, - monitorId = monitorId, - lastActionExecutionTimes = lastActionExecutionTimes, - lastRunContext = lastRunContext, - sourceToQueryIndexMapping = sourceToQueryIndexMapping - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): MonitorMetadata { - return MonitorMetadata(sin) - } - - /** workflowMetadataId is used as key for monitor metadata in the case when the workflow execution happens - so the monitor lastRunContext (in the case of doc level monitor) is not interfering with the monitor execution - WorkflowMetadataId will be either workflowId-metadata (when executing the workflow as it is scheduled) - or timestampWithUUID-metadata (when a workflow is executed in a dry-run mode) - In the case of temp workflow, doc level monitors must have lastRunContext created from scratch - That's why we are using workflowMetadataId - in order to ensure that the doc level monitor metadata is created from scratch - **/ - fun getId(monitor: Monitor, workflowMetadataId: String? = null): String { - return if (workflowMetadataId.isNullOrEmpty()) "${monitor.id}-metadata" - // WorkflowMetadataId already contains -metadata suffix - else "$workflowMetadataId-${monitor.id}-metadata" - } - } -} - -/** - * A value object containing action execution time. - */ -data class ActionExecutionTime( - val actionId: String, - val executionTime: Instant -) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // actionId - sin.readInstant() // executionTime - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field(ACTION_ID_FIELD, actionId) - .field(EXECUTION_TIME_FIELD, executionTime) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(actionId) - out.writeInstant(executionTime) - } - - companion object { - const val ACTION_ID_FIELD = "action_id" - const val EXECUTION_TIME_FIELD = "execution_time" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): ActionExecutionTime { - lateinit var actionId: String - lateinit var executionTime: Instant - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - ACTION_ID_FIELD -> actionId = xcp.text() - EXECUTION_TIME_FIELD -> executionTime = xcp.instant()!! - } - } - - return ActionExecutionTime( - actionId, - executionTime - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): ActionExecutionTime { - return ActionExecutionTime(sin) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt deleted file mode 100644 index 07d839291..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/MonitorRunResult.kt +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchException -import org.opensearch.commons.alerting.alerts.AlertError -import org.opensearch.commons.alerting.model.Trigger -import org.opensearch.commons.alerting.util.optionalTimeField -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.script.ScriptException -import java.io.IOException -import java.time.Instant - -data class MonitorRunResult( - val monitorName: String, - val periodStart: Instant, - val periodEnd: Instant, - val error: Exception? = null, - val inputResults: InputRunResults = InputRunResults(), - val triggerResults: Map = mapOf() -) : Writeable, ToXContent { - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - constructor(sin: StreamInput) : this( - sin.readString(), // monitorName - sin.readInstant(), // periodStart - sin.readInstant(), // periodEnd - sin.readException(), // error - InputRunResults.readFrom(sin), // inputResults - suppressWarning(sin.readMap()) as Map // triggerResults - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field("monitor_name", monitorName) - .optionalTimeField("period_start", periodStart) - .optionalTimeField("period_end", periodEnd) - .field("error", error?.message) - .field("input_results", inputResults) - .field("trigger_results", triggerResults) - .endObject() - } - - /** Returns error information to store in the Alert. Currently it's just the stack trace but it can be more */ - fun alertError(): AlertError? { - if (error != null) { - return AlertError(Instant.now(), "Failed running monitor:\n${error.userErrorMessage()}") - } - - if (inputResults.error != null) { - return AlertError(Instant.now(), "Failed fetching inputs:\n${inputResults.error.userErrorMessage()}") - } - return null - } - - fun scriptContextError(trigger: Trigger): Exception? { - return error ?: inputResults.error ?: triggerResults[trigger.id]?.error - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): MonitorRunResult { - return MonitorRunResult(sin) - } - - @Suppress("UNCHECKED_CAST") - fun suppressWarning(map: MutableMap?): Map { - return map as Map - } - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(monitorName) - out.writeInstant(periodStart) - out.writeInstant(periodEnd) - out.writeException(error) - inputResults.writeTo(out) - out.writeMap(triggerResults) - } -} - -data class InputRunResults( - val results: List> = listOf(), - val error: Exception? = null, - val aggTriggersAfterKey: MutableMap? = null -) : Writeable, ToXContent { - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field("results", results) - .field("error", error?.message) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeVInt(results.size) - for (map in results) { - out.writeMap(map) - } - out.writeException(error) - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): InputRunResults { - val count = sin.readVInt() // count - val list = mutableListOf>() - for (i in 0 until count) { - list.add(suppressWarning(sin.readMap())) // result(map) - } - val error = sin.readException() // error - return InputRunResults(list, error) - } - - @Suppress("UNCHECKED_CAST") - fun suppressWarning(map: MutableMap?): Map { - return map as Map - } - } - - fun afterKeysPresent(): Boolean { - aggTriggersAfterKey?.forEach { - if (it.value.afterKey != null && !it.value.lastPage) { - return true - } - } - return false - } -} - -data class TriggerAfterKey(val afterKey: Map?, val lastPage: Boolean) - -data class ActionRunResult( - val actionId: String, - val actionName: String, - val output: Map, - val throttled: Boolean = false, - val executionTime: Instant? = null, - val error: Exception? = null -) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString(), // actionId - sin.readString(), // actionName - suppressWarning(sin.readMap()), // output - sin.readBoolean(), // throttled - sin.readOptionalInstant(), // executionTime - sin.readException() // error - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field("id", actionId) - .field("name", actionName) - .field("output", output) - .field("throttled", throttled) - .optionalTimeField("executionTime", executionTime) - .field("error", error?.message) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(actionId) - out.writeString(actionName) - out.writeMap(output) - out.writeBoolean(throttled) - out.writeOptionalInstant(executionTime) - out.writeException(error) - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): ActionRunResult { - return ActionRunResult(sin) - } - - @Suppress("UNCHECKED_CAST") - fun suppressWarning(map: MutableMap?): MutableMap { - return map as MutableMap - } - } -} - -private val logger = LogManager.getLogger(MonitorRunResult::class.java) - -/** Constructs an error message from an exception suitable for human consumption. */ -fun Throwable.userErrorMessage(): String { - return when { - this is ScriptException -> this.scriptStack.joinToString(separator = "\n", limit = 100) - this is OpenSearchException -> this.detailedMessage - this.message != null -> { - logger.info("Internal error: ${this.message}. See the opensearch.log for details", this) - this.message!! - } - else -> { - logger.info("Unknown Internal error. See the OpenSearch log for details.", this) - "Unknown Internal error. See the OpenSearch log for details." - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt deleted file mode 100644 index d123dbae4..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/QueryLevelTriggerRunResult.kt +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.commons.alerting.alerts.AlertError -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.script.ScriptException -import java.io.IOException -import java.time.Instant - -data class QueryLevelTriggerRunResult( - override var triggerName: String, - var triggered: Boolean, - override var error: Exception?, - var actionResults: MutableMap = mutableMapOf() -) : TriggerRunResult(triggerName, error) { - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - constructor(sin: StreamInput) : this( - triggerName = sin.readString(), - error = sin.readException(), - triggered = sin.readBoolean(), - actionResults = sin.readMap() as MutableMap - ) - - override fun alertError(): AlertError? { - if (error != null) { - return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") - } - for (actionResult in actionResults.values) { - if (actionResult.error != null) { - return AlertError(Instant.now(), "Failed running action:\n${actionResult.error.userErrorMessage()}") - } - } - return null - } - - override fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - if (error is ScriptException) error = Exception((error as ScriptException).toJsonString(), error) - return builder - .field("triggered", triggered) - .field("action_results", actionResults as Map) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - super.writeTo(out) - out.writeBoolean(triggered) - out.writeMap(actionResults as Map) - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): TriggerRunResult { - return QueryLevelTriggerRunResult(sin) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt deleted file mode 100644 index c3aec89f2..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/TriggerRunResult.kt +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.commons.alerting.alerts.AlertError -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException -import java.time.Instant - -abstract class TriggerRunResult( - open var triggerName: String, - open var error: Exception? = null -) : Writeable, ToXContent { - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field("name", triggerName) - - internalXContent(builder, params) - val msg = error?.message - - builder.field("error", msg) - .endObject() - return builder - } - - abstract fun internalXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder - - /** Returns error information to store in the Alert. Currently it's just the stack trace but it can be more */ - open fun alertError(): AlertError? { - if (error != null) { - return AlertError(Instant.now(), "Failed evaluating trigger:\n${error!!.userErrorMessage()}") - } - return null - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(triggerName) - out.writeException(error) - } - - companion object { - @Suppress("UNCHECKED_CAST") - fun suppressWarning(map: MutableMap?): MutableMap { - return map as MutableMap - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt b/alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt deleted file mode 100644 index 9ab7b43f8..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/WorkflowMetadata.kt +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.commons.alerting.util.instant -import org.opensearch.commons.alerting.util.optionalTimeField -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import java.io.IOException -import java.time.Instant - -data class WorkflowMetadata( - val id: String, - val workflowId: String, - val monitorIds: List, - val latestRunTime: Instant, - val latestExecutionId: String -) : Writeable, ToXContent { - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - id = sin.readString(), - workflowId = sin.readString(), - monitorIds = sin.readStringList(), - latestRunTime = sin.readInstant(), - latestExecutionId = sin.readString() - ) - - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeString(workflowId) - out.writeStringCollection(monitorIds) - out.writeInstant(latestRunTime) - out.writeString(latestExecutionId) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - if (params.paramAsBoolean("with_type", false)) builder.startObject(METADATA) - builder.field(WORKFLOW_ID_FIELD, workflowId) - .field(MONITOR_IDS_FIELD, monitorIds) - .optionalTimeField(LATEST_RUN_TIME, latestRunTime) - .field(LATEST_EXECUTION_ID, latestExecutionId) - if (params.paramAsBoolean("with_type", false)) builder.endObject() - return builder.endObject() - } - - companion object { - const val METADATA = "workflow_metadata" - const val WORKFLOW_ID_FIELD = "workflow_id" - const val MONITOR_IDS_FIELD = "monitor_ids" - const val LATEST_RUN_TIME = "latest_run_time" - const val LATEST_EXECUTION_ID = "latest_execution_id" - - @JvmStatic @JvmOverloads - @Throws(IOException::class) - fun parse(xcp: XContentParser): WorkflowMetadata { - lateinit var workflowId: String - var monitorIds = mutableListOf() - lateinit var latestRunTime: Instant - lateinit var latestExecutionId: String - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - WORKFLOW_ID_FIELD -> workflowId = xcp.text() - MONITOR_IDS_FIELD -> { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - monitorIds.add(xcp.text()) - } - } - LATEST_RUN_TIME -> latestRunTime = xcp.instant()!! - LATEST_EXECUTION_ID -> latestExecutionId = xcp.text() - } - } - return WorkflowMetadata( - id = "$workflowId-metadata", - workflowId = workflowId, - monitorIds = monitorIds, - latestRunTime = latestRunTime, - latestExecutionId = latestExecutionId - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): WorkflowMetadata { - return WorkflowMetadata(sin) - } - - fun getId(workflowId: String? = null) = "$workflowId-metadata" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt b/alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt deleted file mode 100644 index cabdc6330..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/WorkflowRunResult.kt +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException -import java.lang.Exception -import java.time.Instant - -data class WorkflowRunResult( - val workflowId: String, - val workflowName: String, - val monitorRunResults: List> = mutableListOf(), - val executionStartTime: Instant, - var executionEndTime: Instant? = null, - val executionId: String, - val error: Exception? = null, - val triggerResults: Map = mapOf(), -) : Writeable, ToXContent { - - @Throws(IOException::class) - @Suppress("UNCHECKED_CAST") - constructor(sin: StreamInput) : this( - workflowId = sin.readString(), - workflowName = sin.readString(), - monitorRunResults = sin.readList> { s: StreamInput -> MonitorRunResult.readFrom(s) }, - executionStartTime = sin.readInstant(), - executionEndTime = sin.readOptionalInstant(), - executionId = sin.readString(), - error = sin.readException(), - triggerResults = suppressWarning(sin.readMap()) as Map - ) - - override fun writeTo(out: StreamOutput) { - out.writeString(workflowId) - out.writeString(workflowName) - out.writeList(monitorRunResults) - out.writeInstant(executionStartTime) - out.writeOptionalInstant(executionEndTime) - out.writeString(executionId) - out.writeException(error) - out.writeMap(triggerResults) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - builder.field("execution_id", executionId) - builder.field("workflow_name", workflowName) - builder.field("workflow_id", workflowId) - builder.field("trigger_results", triggerResults) - builder.startArray("monitor_run_results") - for (monitorResult in monitorRunResults) { - monitorResult.toXContent(builder, ToXContent.EMPTY_PARAMS) - } - builder.endArray() - .field("execution_start_time", executionStartTime) - .field("execution_end_time", executionEndTime) - .field("error", error?.message) - .endObject() - return builder - } - - companion object { - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): WorkflowRunResult { - return WorkflowRunResult(sin) - } - - @Suppress("UNCHECKED_CAST") - fun suppressWarning(map: MutableMap?): Map { - return map as Map - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt deleted file mode 100644 index 06d066ded..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/Chime.kt +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination - -import org.opensearch.core.common.Strings -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -/** - * A value object that represents a Chime message. Chime message will be - * submitted to the Chime destination - */ -data class Chime(val url: String) : ToXContent { - - init { - require(!Strings.isNullOrEmpty(url)) { "URL is null or empty" } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject(TYPE) - .field(URL, url) - .endObject() - } - - @Throws(IOException::class) - fun writeTo(out: StreamOutput) { - out.writeString(url) - } - - companion object { - const val URL = "url" - const val TYPE = "chime" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): Chime { - lateinit var url: String - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - URL -> url = xcp.text() - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing Chime destination") - } - } - } - return Chime(url) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Chime? { - return if (sin.readBoolean()) { - Chime(sin.readString()) - } else null - } - } - - // Complete JSON structure is now constructed in the notification plugin - fun constructMessageContent(subject: String?, message: String): String { - return if (Strings.isNullOrEmpty(subject)) message else "$subject \n\n $message" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt deleted file mode 100644 index 5758576d8..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/CustomWebhook.kt +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination - -import org.opensearch.core.common.Strings -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -/** - * A value object that represents a Custom webhook message. Webhook message will be - * submitted to the Custom webhook destination - */ -data class CustomWebhook( - val url: String?, - val scheme: String?, - val host: String?, - val port: Int, - val path: String?, - val method: String?, - val queryParams: Map, - val headerParams: Map, - val username: String?, - val password: String? -) : ToXContent { - - init { - require(!(Strings.isNullOrEmpty(url) && Strings.isNullOrEmpty(host))) { - "Url or Host name must be provided." - } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject(TYPE) - .field(URL, url) - .field(SCHEME_FIELD, scheme) - .field(HOST_FIELD, host) - .field(PORT_FIELD, port) - .field(PATH_FIELD, path) - .field(METHOD_FIELD, method) - .field(QUERY_PARAMS_FIELD, queryParams) - .field(HEADER_PARAMS_FIELD, headerParams) - .field(USERNAME_FIELD, username) - .field(PASSWORD_FIELD, password) - .endObject() - } - - @Throws(IOException::class) - fun writeTo(out: StreamOutput) { - out.writeString(url) - out.writeOptionalString(scheme) - out.writeString(host) - out.writeOptionalInt(port) - out.writeOptionalString(path) - out.writeOptionalString(method) - out.writeMap(queryParams) - out.writeMap(headerParams) - out.writeOptionalString(username) - out.writeOptionalString(password) - } - - companion object { - const val URL = "url" - const val TYPE = "custom_webhook" - const val SCHEME_FIELD = "scheme" - const val HOST_FIELD = "host" - const val PORT_FIELD = "port" - const val PATH_FIELD = "path" - const val METHOD_FIELD = "method" - const val QUERY_PARAMS_FIELD = "query_params" - const val HEADER_PARAMS_FIELD = "header_params" - const val USERNAME_FIELD = "username" - const val PASSWORD_FIELD = "password" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): CustomWebhook { - var url: String? = null - var scheme: String? = null - var host: String? = null - var port: Int = -1 - var path: String? = null - var method: String? = null - var queryParams: Map = mutableMapOf() - var headerParams: Map = mutableMapOf() - var username: String? = null - var password: String? = null - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - URL -> url = xcp.textOrNull() - SCHEME_FIELD -> scheme = xcp.textOrNull() - HOST_FIELD -> host = xcp.textOrNull() - PORT_FIELD -> port = xcp.intValue() - PATH_FIELD -> path = xcp.textOrNull() - METHOD_FIELD -> method = xcp.textOrNull() - QUERY_PARAMS_FIELD -> queryParams = xcp.mapStrings() - HEADER_PARAMS_FIELD -> headerParams = xcp.mapStrings() - USERNAME_FIELD -> username = xcp.textOrNull() - PASSWORD_FIELD -> password = xcp.textOrNull() - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing custom webhook destination") - } - } - } - return CustomWebhook(url, scheme, host, port, path, method, queryParams, headerParams, username, password) - } - - @Suppress("UNCHECKED_CAST") - fun suppressWarning(map: MutableMap?): Map { - return map as Map - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): CustomWebhook? { - return if (sin.readBoolean()) { - CustomWebhook( - sin.readString(), // url - sin.readOptionalString(), // scheme - sin.readString(), // host - sin.readOptionalInt(), // port - sin.readOptionalString(), // path - sin.readOptionalString(), // method - suppressWarning(sin.readMap()), // queryParams) - suppressWarning(sin.readMap()), // headerParams) - sin.readOptionalString(), // username - sin.readOptionalString() // password - ) - } else null - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt deleted file mode 100644 index c5c5fc4bd..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/Destination.kt +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.model.destination.email.Email -import org.opensearch.alerting.opensearchapi.convertToMap -import org.opensearch.alerting.util.DestinationType -import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertAlertingToNotificationMethodType -import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION -import org.opensearch.commons.alerting.util.instant -import org.opensearch.commons.alerting.util.optionalTimeField -import org.opensearch.commons.alerting.util.optionalUserField -import org.opensearch.commons.authuser.User -import org.opensearch.commons.destination.message.LegacyBaseMessage -import org.opensearch.commons.destination.message.LegacyChimeMessage -import org.opensearch.commons.destination.message.LegacyCustomWebhookMessage -import org.opensearch.commons.destination.message.LegacyEmailMessage -import org.opensearch.commons.destination.message.LegacySlackMessage -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.time.Instant -import java.util.Locale - -/** - * A value object that represents a Destination message. - */ -data class Destination( - val id: String = NO_ID, - val version: Long = NO_VERSION, - val schemaVersion: Int = NO_SCHEMA_VERSION, - val seqNo: Int = NO_SEQ_NO, - val primaryTerm: Int = NO_PRIMARY_TERM, - val type: DestinationType, - val name: String, - val user: User?, - val lastUpdateTime: Instant, - val chime: Chime?, - val slack: Slack?, - val customWebhook: CustomWebhook?, - val email: Email? -) : ToXContent { - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return createXContentBuilder(builder, params, true) - } - - fun toXContentWithUser(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return createXContentBuilder(builder, params, false) - } - private fun createXContentBuilder(builder: XContentBuilder, params: ToXContent.Params, secure: Boolean): XContentBuilder { - builder.startObject() - if (params.paramAsBoolean("with_type", false)) builder.startObject(DESTINATION) - builder.field(ID_FIELD, id) - .field(TYPE_FIELD, type.value) - .field(NAME_FIELD, name) - - if (!secure) { - builder.optionalUserField(USER_FIELD, user) - } - - builder.field(SCHEMA_VERSION, schemaVersion) - .field(SEQ_NO_FIELD, seqNo) - .field(PRIMARY_TERM_FIELD, primaryTerm) - .optionalTimeField(LAST_UPDATE_TIME_FIELD, lastUpdateTime) - .field(type.value, constructResponseForDestinationType(type)) - if (params.paramAsBoolean("with_type", false)) builder.endObject() - return builder.endObject() - } - fun toXContent(builder: XContentBuilder): XContentBuilder { - return toXContent(builder, ToXContent.EMPTY_PARAMS) - } - - @Throws(IOException::class) - fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeInt(schemaVersion) - out.writeInt(seqNo) - out.writeInt(primaryTerm) - out.writeEnum(type) - out.writeString(name) - out.writeBoolean(user != null) - user?.writeTo(out) - out.writeInstant(lastUpdateTime) - out.writeBoolean(chime != null) - chime?.writeTo(out) - out.writeBoolean(slack != null) - slack?.writeTo(out) - out.writeBoolean(customWebhook != null) - customWebhook?.writeTo(out) - out.writeBoolean(email != null) - email?.writeTo(out) - } - - companion object { - const val DESTINATION = "destination" - const val ID_FIELD = "id" - const val TYPE_FIELD = "type" - const val NAME_FIELD = "name" - const val USER_FIELD = "user" - const val NO_ID = "" - const val NO_VERSION = 1L - const val NO_SEQ_NO = 0 - const val NO_PRIMARY_TERM = 0 - const val SCHEMA_VERSION = "schema_version" - const val SEQ_NO_FIELD = "seq_no" - const val PRIMARY_TERM_FIELD = "primary_term" - const val LAST_UPDATE_TIME_FIELD = "last_update_time" - const val CHIME = "chime" - const val SLACK = "slack" - const val CUSTOMWEBHOOK = "custom_webhook" - const val EMAIL = "email" - - // This constant is used for test actions created part of integ tests - const val TEST_ACTION = "test" - - private val logger = LogManager.getLogger(Destination::class.java) - - @JvmStatic - @JvmOverloads - @Throws(IOException::class) - fun parse( - xcp: XContentParser, - id: String = NO_ID, - version: Long = NO_VERSION, - seqNo: Int = NO_SEQ_NO, - primaryTerm: Int = NO_PRIMARY_TERM - ): Destination { - lateinit var name: String - var user: User? = null - lateinit var type: String - var slack: Slack? = null - var chime: Chime? = null - var customWebhook: CustomWebhook? = null - var email: Email? = null - var lastUpdateTime: Instant? = null - var schemaVersion = NO_SCHEMA_VERSION - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - NAME_FIELD -> name = xcp.text() - USER_FIELD -> user = if (xcp.currentToken() == XContentParser.Token.VALUE_NULL) null else User.parse(xcp) - TYPE_FIELD -> { - type = xcp.text() - val allowedTypes = DestinationType.values().map { it.value } - if (!allowedTypes.contains(type)) { - throw IllegalStateException("Type should be one of the $allowedTypes") - } - } - LAST_UPDATE_TIME_FIELD -> lastUpdateTime = xcp.instant() - CHIME -> { - chime = Chime.parse(xcp) - } - SLACK -> { - slack = Slack.parse(xcp) - } - CUSTOMWEBHOOK -> { - customWebhook = CustomWebhook.parse(xcp) - } - EMAIL -> { - email = Email.parse(xcp) - } - TEST_ACTION -> { - // This condition is for integ tests to avoid parsing - } - SCHEMA_VERSION -> { - schemaVersion = xcp.intValue() - } - else -> { - xcp.skipChildren() - } - } - } - return Destination( - id, - version, - schemaVersion, - seqNo, - primaryTerm, - DestinationType.valueOf(type.uppercase(Locale.ROOT)), - requireNotNull(name) { "Destination name is null" }, - user, - lastUpdateTime ?: Instant.now(), - chime, - slack, - customWebhook, - email - ) - } - - @JvmStatic - @Throws(IOException::class) - fun parseWithType(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): Destination { - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val destination = parse(xcp, id, version) - ensureExpectedToken(XContentParser.Token.END_OBJECT, xcp.nextToken(), xcp) - return destination - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Destination { - return Destination( - id = sin.readString(), - version = sin.readLong(), - schemaVersion = sin.readInt(), - seqNo = sin.readInt(), - primaryTerm = sin.readInt(), - type = sin.readEnum(DestinationType::class.java), - name = sin.readString(), - user = if (sin.readBoolean()) { - User(sin) - } else null, - lastUpdateTime = sin.readInstant(), - chime = Chime.readFrom(sin), - slack = Slack.readFrom(sin), - customWebhook = CustomWebhook.readFrom(sin), - email = Email.readFrom(sin) - ) - } - } - - fun buildLegacyBaseMessage( - compiledSubject: String?, - compiledMessage: String, - destinationCtx: DestinationContext - ): LegacyBaseMessage { - val destinationMessage: LegacyBaseMessage - when (type) { - DestinationType.CHIME -> { - val messageContent = chime?.constructMessageContent(compiledSubject, compiledMessage) - destinationMessage = LegacyChimeMessage.Builder(name) - .withUrl(chime?.url) - .withMessage(messageContent) - .build() - } - DestinationType.SLACK -> { - val messageContent = slack?.constructMessageContent(compiledSubject, compiledMessage) - destinationMessage = LegacySlackMessage.Builder(name) - .withUrl(slack?.url) - .withMessage(messageContent) - .build() - } - DestinationType.CUSTOM_WEBHOOK -> { - destinationMessage = LegacyCustomWebhookMessage.Builder(name) - .withUrl(getLegacyCustomWebhookMessageURL(customWebhook, compiledMessage)) - .withHeaderParams(customWebhook?.headerParams) - .withMessage(compiledMessage).build() - } - DestinationType.EMAIL -> { - val emailAccount = destinationCtx.emailAccount - destinationMessage = LegacyEmailMessage.Builder(name) - .withAccountName(emailAccount?.name) - .withHost(emailAccount?.host) - .withPort(emailAccount?.port) - .withMethod(emailAccount?.method?.let { convertAlertingToNotificationMethodType(it).toString() }) - .withFrom(emailAccount?.email) - .withRecipients(destinationCtx.recipients) - .withSubject(compiledSubject) - .withMessage(compiledMessage).build() - } - else -> throw IllegalArgumentException("Unsupported Destination type [$type] for building legacy message") - } - return destinationMessage - } - - private fun constructResponseForDestinationType(type: DestinationType): Any { - var content: Any? = null - when (type) { - DestinationType.CHIME -> content = chime?.convertToMap()?.get(type.value) - DestinationType.SLACK -> content = slack?.convertToMap()?.get(type.value) - DestinationType.CUSTOM_WEBHOOK -> content = customWebhook?.convertToMap()?.get(type.value) - DestinationType.EMAIL -> content = email?.convertToMap()?.get(type.value) - DestinationType.TEST_ACTION -> content = "dummy" - } - if (content == null) { - throw IllegalArgumentException("Content is NULL for destination type ${type.value}") - } - return content - } - - private fun getLegacyCustomWebhookMessageURL(customWebhook: CustomWebhook?, message: String): String { - return LegacyCustomWebhookMessage.Builder(name) - .withUrl(customWebhook?.url) - .withScheme(customWebhook?.scheme) - .withHost(customWebhook?.host) - .withPort(customWebhook?.port) - .withPath(customWebhook?.path) - .withQueryParams(customWebhook?.queryParams) - .withMessage(message) - .build().uri.toString() - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt deleted file mode 100644 index 5b3febc87..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContext.kt +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination - -import org.opensearch.alerting.model.destination.email.EmailAccount - -/** - * DestinationContext is a value object that contains additional context information needed at runtime to publish to a destination. - * For now it only contains the information retrieved from documents by ID for Email (such as email account and email group recipients). - */ -data class DestinationContext( - val emailAccount: EmailAccount? = null, - val recipients: List = emptyList() -) diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt deleted file mode 100644 index 263962ac7..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/DestinationContextFactory.kt +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination - -import org.opensearch.alerting.model.AlertingConfigAccessor -import org.opensearch.alerting.model.destination.email.Email -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.Recipient -import org.opensearch.alerting.settings.DestinationSettings.Companion.SecureDestinationSettings -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.Client -import org.opensearch.core.common.settings.SecureString -import org.opensearch.core.xcontent.NamedXContentRegistry - -/** - * This class is responsible for generating [DestinationContext]. - */ -class DestinationContextFactory( - val client: Client, - val xContentRegistry: NamedXContentRegistry, - private var destinationSettings: Map -) { - - fun updateDestinationSettings(destinationSettings: Map) { - this.destinationSettings = destinationSettings - } - - suspend fun getDestinationContext(destination: Destination): DestinationContext { - var destinationContext = DestinationContext() - // Populate DestinationContext based on Destination type - if (destination.type == DestinationType.EMAIL) { - val email = destination.email - requireNotNull(email) { "Email in Destination: $destination was null" } - - var emailAccount = AlertingConfigAccessor.getEmailAccountInfo(client, xContentRegistry, email.emailAccountID) - - emailAccount = addEmailCredentials(emailAccount) - - // Get the email recipients as a unique list of email strings since - // recipients can be a combination of EmailGroups and single emails - val uniqueListOfRecipients = getUniqueListOfEmailRecipients(email) - - destinationContext = destinationContext.copy(emailAccount = emailAccount, recipients = uniqueListOfRecipients) - } - - return destinationContext - } - - private fun addEmailCredentials(emailAccount: EmailAccount): EmailAccount { - // Retrieve and populate the EmailAccount object with credentials if authentication is enabled - if (emailAccount.method != EmailAccount.MethodType.NONE) { - val emailUsername: SecureString? = destinationSettings[emailAccount.name]?.emailUsername - val emailPassword: SecureString? = destinationSettings[emailAccount.name]?.emailPassword - - return emailAccount.copy(username = emailUsername, password = emailPassword) - } - - return emailAccount - } - - private suspend fun getUniqueListOfEmailRecipients(email: Email): List { - val uniqueRecipients: MutableSet = mutableSetOf() - email.recipients.forEach { recipient -> - when (recipient.type) { - // Recipient attributes are checked for being non-null based on type during initialization - // so non-null assertion calls are made here - Recipient.RecipientType.EMAIL -> uniqueRecipients.add(recipient.email!!) - Recipient.RecipientType.EMAIL_GROUP -> { - val emailGroup = AlertingConfigAccessor.getEmailGroupInfo(client, xContentRegistry, recipient.emailGroupID!!) - emailGroup.getEmailsAsListOfString().map { uniqueRecipients.add(it) } - } - } - } - - return uniqueRecipients.toList() - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt deleted file mode 100644 index f9c6ec59f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/SNS.kt +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination - -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import java.io.IOException -import java.lang.IllegalStateException -import java.util.regex.Pattern - -data class SNS(val topicARN: String, val roleARN: String) : ToXContent { - - init { - require(SNS_ARN_REGEX.matcher(topicARN).find()) { "Invalid AWS SNS topic ARN: $topicARN" } - require(IAM_ARN_REGEX.matcher(roleARN).find()) { "Invalid AWS role ARN: $roleARN " } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject(SNS_TYPE) - .field(TOPIC_ARN_FIELD, topicARN) - .field(ROLE_ARN_FIELD, roleARN) - .endObject() - } - - companion object { - - private val SNS_ARN_REGEX = Pattern.compile("^arn:aws(-[^:]+)?:sns:([a-zA-Z0-9-]+):([0-9]{12}):([a-zA-Z0-9-_]+)$") - private val IAM_ARN_REGEX = Pattern.compile("^arn:aws(-[^:]+)?:iam::([0-9]{12}):([a-zA-Z0-9-/_]+)$") - - const val TOPIC_ARN_FIELD = "topic_arn" - const val ROLE_ARN_FIELD = "role_arn" - const val SNS_TYPE = "sns" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): SNS { - lateinit var topicARN: String - lateinit var roleARN: String - - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - TOPIC_ARN_FIELD -> topicARN = xcp.textOrNull() - ROLE_ARN_FIELD -> roleARN = xcp.textOrNull() - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing SNS destination") - } - } - } - return SNS( - requireNotNull(topicARN) { "SNS Action topic_arn is null" }, - requireNotNull(roleARN) { "SNS Action role_arn is null" } - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt deleted file mode 100644 index 14f623616..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/Slack.kt +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination - -import org.opensearch.core.common.Strings -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -/** - * A value object that represents a Slack message. Slack message will be - * submitted to the Slack destination - */ -data class Slack(val url: String) : ToXContent { - - init { - require(!Strings.isNullOrEmpty(url)) { "URL is null or empty" } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject(TYPE) - .field(URL, url) - .endObject() - } - - @Throws(IOException::class) - fun writeTo(out: StreamOutput) { - out.writeString(url) - } - - companion object { - const val URL = "url" - const val TYPE = "slack" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): Slack { - lateinit var url: String - - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - URL -> url = xcp.text() - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing Slack destination") - } - } - } - return Slack(url) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Slack? { - return if (sin.readBoolean()) { - Slack(sin.readString()) - } else null - } - } - - // Complete JSON structure is now constructed in the notification plugin - fun constructMessageContent(subject: String?, message: String): String { - return if (Strings.isNullOrEmpty(subject)) message else "$subject \n\n $message" - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt deleted file mode 100644 index 75635ab38..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/email/Email.kt +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination.email - -import org.opensearch.alerting.util.isValidEmail -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParser.Token -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException -import java.lang.IllegalStateException -import java.util.Locale - -/** - * A value object that represents an Email message. Email messages will be - * submitted to the Email destination. - */ -data class Email( - val emailAccountID: String, - val recipients: List -) : Writeable, ToXContent { - - init { - require(recipients.isNotEmpty()) { "At least one recipient must be provided" } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject(TYPE) - .field(EMAIL_ACCOUNT_ID_FIELD, emailAccountID) - .field(RECIPIENTS_FIELD, recipients.toTypedArray()) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(emailAccountID) - out.writeCollection(recipients) - } - - companion object { - const val TYPE = "email" - const val EMAIL_ACCOUNT_ID_FIELD = "email_account_id" - const val RECIPIENTS_FIELD = "recipients" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): Email { - lateinit var emailAccountID: String - val recipients: MutableList = mutableListOf() - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - EMAIL_ACCOUNT_ID_FIELD -> emailAccountID = xcp.text() - RECIPIENTS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - recipients.add(Recipient.parse(xcp)) - } - } - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing email destination") - } - } - } - - return Email( - requireNotNull(emailAccountID) { "Email account ID is null" }, - recipients - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Email? { - return if (sin.readBoolean()) { - Email( - sin.readString(), // emailAccountID - sin.readList(::Recipient) // recipients - ) - } else null - } - } -} - -/** - * A value object containing a recipient for an Email. - */ -data class Recipient( - val type: RecipientType, - val emailGroupID: String?, - val email: String? -) : Writeable, ToXContent { - - init { - when (type) { - RecipientType.EMAIL_GROUP -> requireNotNull(emailGroupID) { "Email group ID is null" } - RecipientType.EMAIL -> { - requireNotNull(email) { "Email is null" } - require(isValidEmail(email)) { "Invalid email" } - } - } - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readEnum(Recipient.RecipientType::class.java), // type - sin.readOptionalString(), // emailGroupId - sin.readOptionalString() // email - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject().field(TYPE_FIELD, type.value) - - when (type) { - RecipientType.EMAIL_GROUP -> builder.field(EMAIL_GROUP_ID_FIELD, emailGroupID) - RecipientType.EMAIL -> builder.field(EMAIL_FIELD, email) - } - - return builder.endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeEnum(type) - out.writeOptionalString(emailGroupID) - out.writeOptionalString(email) - } - - enum class RecipientType(val value: String) { - EMAIL("email"), - EMAIL_GROUP("email_group") - } - - companion object { - const val TYPE_FIELD = "type" - const val EMAIL_GROUP_ID_FIELD = "email_group_id" - const val EMAIL_FIELD = "email" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): Recipient { - lateinit var type: String - var emailGroupID: String? = null - var email: String? = null - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - TYPE_FIELD -> { - type = xcp.text() - val allowedTypes = RecipientType.values().map { it.value } - if (!allowedTypes.contains(type)) { - throw IllegalStateException("Type should be one of $allowedTypes") - } - } - EMAIL_GROUP_ID_FIELD -> emailGroupID = xcp.text() - EMAIL_FIELD -> email = xcp.text() - } - } - - return Recipient( - RecipientType.valueOf(type.uppercase(Locale.ROOT)), - emailGroupID, - email - ) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): Recipient { - return Recipient(sin) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt deleted file mode 100644 index ea18e76a0..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailAccount.kt +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination.email - -import org.opensearch.alerting.util.isValidEmail -import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.common.settings.SecureString -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParser.Token -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -/** - * A value object that represents an Email Account. Email Accounts contain the configuration - * information for sender emails when sending email messages through the Email destination. - */ -data class EmailAccount( - val id: String = NO_ID, - val version: Long = NO_VERSION, - val schemaVersion: Int = NO_SCHEMA_VERSION, - val name: String, - val email: String, - val host: String, - val port: Int, - val method: MethodType, - val username: SecureString? = null, - val password: SecureString? = null -) : Writeable, ToXContent { - - init { - // Excluding dashes (-) from valid names for EmailAccount since the name is used - // to namespace the associated OpenSearch keystore settings and dashes do not work for those settings. - val validNamePattern = Regex("[A-Z0-9_]+", RegexOption.IGNORE_CASE) - require(validNamePattern.matches(name)) { - "Invalid email account name. Valid characters are upper and lowercase a-z, 0-9, and _ (underscore)." - } - - require(isValidEmail(email)) { "Invalid email" } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - if (params.paramAsBoolean("with_type", false)) builder.startObject(EMAIL_ACCOUNT_TYPE) - builder.field(SCHEMA_VERSION, schemaVersion) - .field(NAME_FIELD, name) - .field(EMAIL_FIELD, email) - .field(HOST_FIELD, host) - .field(PORT_FIELD, port) - .field(METHOD_FIELD, method.value) - if (params.paramAsBoolean("with_type", false)) builder.endObject() - return builder.endObject() - } - - fun toXContent(builder: XContentBuilder): XContentBuilder { - return toXContent(builder, ToXContent.EMPTY_PARAMS) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeInt(schemaVersion) - out.writeString(name) - out.writeString(email) - out.writeString(host) - out.writeInt(port) - out.writeEnum(method) - out.writeOptionalSecureString(username) - out.writeOptionalSecureString(password) - } - - enum class MethodType(val value: String) { - NONE("none"), - SSL("ssl"), - TLS("starttls"); - - companion object { - private val values = values() - - // Created this method since MethodType value does not necessarily match enum name - fun getByValue(value: String) = values.firstOrNull { it.value == value } - } - } - - companion object { - const val EMAIL_ACCOUNT_TYPE = "email_account" - const val NO_ID = "" - const val NO_VERSION = 1L - const val SCHEMA_VERSION = "schema_version" - const val NAME_FIELD = "name" - const val EMAIL_FIELD = "email" - const val HOST_FIELD = "host" - const val PORT_FIELD = "port" - const val METHOD_FIELD = "method" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailAccount { - var schemaVersion = NO_SCHEMA_VERSION - lateinit var name: String - lateinit var email: String - lateinit var host: String - var port: Int = -1 - lateinit var method: String - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - SCHEMA_VERSION -> schemaVersion = xcp.intValue() - NAME_FIELD -> name = xcp.text() - EMAIL_FIELD -> email = xcp.text() - HOST_FIELD -> host = xcp.text() - PORT_FIELD -> port = xcp.intValue() - METHOD_FIELD -> { - method = xcp.text() - val allowedMethods = MethodType.values().map { it.value } - if (!allowedMethods.contains(method)) { - throw IllegalStateException("Method should be one of $allowedMethods") - } - } - } - } - - return EmailAccount( - id, - version, - schemaVersion, - name, - email, - host, - port, - requireNotNull(MethodType.getByValue(method)) { "Method type was null" } - ) - } - - @JvmStatic - @Throws(IOException::class) - fun parseWithType(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailAccount { - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - val emailAccount = parse(xcp, id, version) - ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) - return emailAccount - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): EmailAccount { - return EmailAccount( - sin.readString(), // id - sin.readLong(), // version - sin.readInt(), // schemaVersion - sin.readString(), // name - sin.readString(), // email - sin.readString(), // host - sin.readInt(), // port - sin.readEnum(MethodType::class.java), // method - sin.readOptionalSecureString(), // username - sin.readOptionalSecureString() // password - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt b/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt deleted file mode 100644 index a960da5f5..000000000 --- a/alerting/bin/main/org/opensearch/alerting/model/destination/email/EmailGroup.kt +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model.destination.email - -import org.opensearch.alerting.util.isValidEmail -import org.opensearch.commons.alerting.util.IndexUtils.Companion.NO_SCHEMA_VERSION -import org.opensearch.core.common.Strings -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParser.Token -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import java.io.IOException - -/** - * A value object that represents a group of recipient emails to send emails to. - */ -data class EmailGroup( - val id: String = NO_ID, - val version: Long = NO_VERSION, - val schemaVersion: Int = NO_SCHEMA_VERSION, - val name: String, - val emails: List -) : Writeable, ToXContent { - - init { - val validNamePattern = Regex("[A-Z0-9_-]+", RegexOption.IGNORE_CASE) - require(validNamePattern.matches(name)) { - "Invalid email group name. Valid characters are upper and lowercase a-z, 0-9, _ (underscore) and - (hyphen)." - } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - if (params.paramAsBoolean("with_type", false)) builder.startObject(EMAIL_GROUP_TYPE) - builder.field(SCHEMA_VERSION, schemaVersion) - .field(NAME_FIELD, name) - .field(EMAILS_FIELD, emails.toTypedArray()) - if (params.paramAsBoolean("with_type", false)) builder.endObject() - return builder.endObject() - } - - fun toXContent(builder: XContentBuilder): XContentBuilder { - return toXContent(builder, ToXContent.EMPTY_PARAMS) - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(id) - out.writeLong(version) - out.writeInt(schemaVersion) - out.writeString(name) - out.writeCollection(emails) - } - - fun getEmailsAsListOfString(): List { - val emailsAsListOfString: MutableList = mutableListOf() - emails.map { emailsAsListOfString.add(it.email) } - return emailsAsListOfString - } - - companion object { - const val EMAIL_GROUP_TYPE = "email_group" - const val NO_ID = "" - const val NO_VERSION = 1L - const val SCHEMA_VERSION = "schema_version" - const val NAME_FIELD = "name" - const val EMAILS_FIELD = "emails" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailGroup { - var schemaVersion = NO_SCHEMA_VERSION - lateinit var name: String - val emails: MutableList = mutableListOf() - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - SCHEMA_VERSION -> schemaVersion = xcp.intValue() - NAME_FIELD -> name = xcp.text() - EMAILS_FIELD -> { - ensureExpectedToken(Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_ARRAY) { - emails.add(EmailEntry.parse(xcp)) - } - } - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing email group") - } - } - } - - return EmailGroup( - id, - version, - schemaVersion, - requireNotNull(name) { "Email group name is null" }, - emails - ) - } - - @JvmStatic - @Throws(IOException::class) - fun parseWithType(xcp: XContentParser, id: String = NO_ID, version: Long = NO_VERSION): EmailGroup { - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - ensureExpectedToken(Token.FIELD_NAME, xcp.nextToken(), xcp) - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - val emailGroup = parse(xcp, id, version) - ensureExpectedToken(Token.END_OBJECT, xcp.nextToken(), xcp) - return emailGroup - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): EmailGroup { - return EmailGroup( - sin.readString(), // id - sin.readLong(), // version - sin.readInt(), // schemaVersion - sin.readString(), // name - sin.readList(::EmailEntry) // emails - ) - } - } -} - -data class EmailEntry(val email: String) : Writeable, ToXContent { - - init { - require(!Strings.isEmpty(email)) { "Email entry must have a non-empty email" } - require(isValidEmail(email)) { "Invalid email" } - } - - @Throws(IOException::class) - constructor(sin: StreamInput) : this( - sin.readString() // email - ) - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - return builder.startObject() - .field(EMAIL_FIELD, email) - .endObject() - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - out.writeString(email) - } - - companion object { - const val EMAIL_FIELD = "email" - - @JvmStatic - @Throws(IOException::class) - fun parse(xcp: XContentParser): EmailEntry { - lateinit var email: String - - ensureExpectedToken(Token.START_OBJECT, xcp.currentToken(), xcp) - while (xcp.nextToken() != Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - - when (fieldName) { - EMAIL_FIELD -> email = xcp.text() - else -> { - throw IllegalStateException("Unexpected field: $fieldName, while parsing email entry") - } - } - } - - return EmailEntry(email) - } - - @JvmStatic - @Throws(IOException::class) - fun readFrom(sin: StreamInput): EmailEntry { - return EmailEntry(sin) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt b/alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt deleted file mode 100644 index bd1f94482..000000000 --- a/alerting/bin/main/org/opensearch/alerting/org.opensearch.alerting.txt +++ /dev/null @@ -1,52 +0,0 @@ - # Copyright OpenSearch Contributors -# SPDX-License-Identifier: Apache-2.0 - -# Painless definition of classes used by alerting plugin - -class org.opensearch.alerting.script.TriggerScript { - Map getParams() - boolean execute(QueryLevelTriggerExecutionContext) - String[] PARAMETERS -} - -class org.opensearch.alerting.script.TriggerScript$Factory { - TriggerScript newInstance(Map) -} - -class org.opensearch.alerting.script.TriggerExecutionContext { - Monitor getMonitor() - List getResults() - java.time.Instant getPeriodStart() - java.time.Instant getPeriodEnd() - Exception getError() -} - -class org.opensearch.alerting.script.QueryLevelTriggerExecutionContext { - Monitor getMonitor() - QueryLevelTrigger getTrigger() - List getResults() - java.time.Instant getPeriodStart() - java.time.Instant getPeriodEnd() - Alert getAlert() - Exception getError() -} - -class org.opensearch.commons.alerting.model.Monitor { - String getId() - long getVersion() - String getName() - boolean getEnabled() -} - -class org.opensearch.commons.alerting.model.QueryLevelTrigger { - String getId() - String getName() - String getSeverity() - List getActions() -} - -class org.opensearch.commons.alerting.model.Alert { - String getId() - long getVersion() - boolean isAcknowledged() -} \ No newline at end of file diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt deleted file mode 100644 index ae837d8d9..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/AsyncActionHandler.kt +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.opensearch.client.node.NodeClient -import org.opensearch.rest.BytesRestResponse -import org.opensearch.rest.RestChannel - -abstract class AsyncActionHandler(protected val client: NodeClient, protected val channel: RestChannel) { - - protected fun onFailure(e: Exception) { - channel.sendResponse(BytesRestResponse(channel, e)) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt deleted file mode 100644 index f953876a4..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeAlertAction.kt +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.apache.logging.log4j.Logger -import org.opensearch.action.support.WriteRequest.RefreshPolicy -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.util.REFRESH -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.POST -import org.opensearch.rest.action.RestToXContentListener -import java.io.IOException - -private val log: Logger = LogManager.getLogger(RestAcknowledgeAlertAction::class.java) - -/** - * This class consists of the REST handler to acknowledge alerts. - * The user provides the monitorID to which these alerts pertain and in the content of the request provides - * the ids to the alerts he would like to acknowledge. - */ -class RestAcknowledgeAlertAction : BaseRestHandler() { - - override fun getName(): String { - return "acknowledge_alert_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - // Acknowledge alerts - return mutableListOf( - ReplacedRoute( - POST, - "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_acknowledge/alerts", - POST, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}/_acknowledge/alerts" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_acknowledge/alerts") - - val monitorId = request.param("monitorID") - require(!monitorId.isNullOrEmpty()) { "Missing monitor id." } - val alertIds = getAlertIds(request.contentParser()) - require(alertIds.isNotEmpty()) { "You must provide at least one alert id." } - val refreshPolicy = RefreshPolicy.parse(request.param(REFRESH, RefreshPolicy.IMMEDIATE.value)) - - val acknowledgeAlertRequest = AcknowledgeAlertRequest(monitorId, alertIds, refreshPolicy) - return RestChannelConsumer { channel -> - client.execute(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, acknowledgeAlertRequest, RestToXContentListener(channel)) - } - } - - /** - * Parse the request content and return a list of the alert ids to acknowledge - */ - private fun getAlertIds(xcp: XContentParser): List { - val ids = mutableListOf() - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - "alerts" -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - ids.add(xcp.text()) - } - } - } - } - return ids - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt deleted file mode 100644 index 968856a48..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestAcknowledgeChainedAlertsAction.kt +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.apache.logging.log4j.Logger -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.POST -import org.opensearch.rest.action.RestToXContentListener -import java.io.IOException - -private val log: Logger = LogManager.getLogger(RestAcknowledgeAlertAction::class.java) - -/** - * This class consists of the REST handler to acknowledge chained alerts. - * The user provides the workflowID to which these alerts pertain and in the content of the request provides - * the ids to the chained alerts user would like to acknowledge. - */ -class RestAcknowledgeChainedAlertAction : BaseRestHandler() { - - override fun getName(): String { - return "acknowledge_chained_alert_action" - } - - override fun routes(): List { - // Acknowledge alerts - return mutableListOf( - Route( - POST, - "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_acknowledge/alerts" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_acknowledge/alerts") - - val workflowId = request.param("workflowID") - require(!workflowId.isNullOrEmpty()) { "Missing workflow id." } - val alertIds = getAlertIds(request.contentParser()) - require(alertIds.isNotEmpty()) { "You must provide at least one alert id." } - - val acknowledgeAlertRequest = AcknowledgeChainedAlertRequest(workflowId, alertIds) - return RestChannelConsumer { channel -> - client.execute(AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, acknowledgeAlertRequest, RestToXContentListener(channel)) - } - } - - /** - * Parse the request content and return a list of the alert ids to acknowledge - */ - private fun getAlertIds(xcp: XContentParser): List { - val ids = mutableListOf() - ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - val fieldName = xcp.currentName() - xcp.nextToken() - when (fieldName) { - "alerts" -> { - ensureExpectedToken(XContentParser.Token.START_ARRAY, xcp.currentToken(), xcp) - while (xcp.nextToken() != XContentParser.Token.END_ARRAY) { - ids.add(xcp.text()) - } - } - } - } - return ids - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt deleted file mode 100644 index a2276349e..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteMonitorAction.kt +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.apache.logging.log4j.Logger -import org.opensearch.action.support.WriteRequest.RefreshPolicy -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.util.REFRESH -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.DeleteMonitorRequest -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.DELETE -import org.opensearch.rest.action.RestToXContentListener -import java.io.IOException - -private val log: Logger = LogManager.getLogger(RestDeleteMonitorAction::class.java) - -/** - * This class consists of the REST handler to delete monitors. - * When a monitor is deleted, all alerts are moved to the [Alert.State.DELETED] state and moved to the alert history index. - * If this process fails the monitor is not deleted. - */ -class RestDeleteMonitorAction : BaseRestHandler() { - - override fun getName(): String { - return "delete_monitor_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - DELETE, - "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", - DELETE, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}") - - val monitorId = request.param("monitorID") - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/$monitorId") - - val refreshPolicy = RefreshPolicy.parse(request.param(REFRESH, RefreshPolicy.IMMEDIATE.value)) - val deleteMonitorRequest = DeleteMonitorRequest(monitorId, refreshPolicy) - - return RestChannelConsumer { channel -> - client.execute(AlertingActions.DELETE_MONITOR_ACTION_TYPE, deleteMonitorRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt deleted file mode 100644 index a61a9b51c..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestDeleteWorkflowAction.kt +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.util.REFRESH -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.DeleteWorkflowRequest -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.RestHandler -import org.opensearch.rest.RestRequest -import org.opensearch.rest.action.RestToXContentListener -import java.io.IOException - -/** - * This class consists of the REST handler to delete workflows. - */ -class RestDeleteWorkflowAction : BaseRestHandler() { - - private val log = LogManager.getLogger(javaClass) - - override fun getName(): String { - return "delete_workflow_action" - } - - override fun routes(): List { - return listOf( - RestHandler.Route( - RestRequest.Method.DELETE, - "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}") - - val workflowId = request.param("workflowID") - val deleteDelegateMonitors = request.paramAsBoolean("deleteDelegateMonitors", false) - log.debug("${request.method()} ${request.uri()}") - - val refreshPolicy = - WriteRequest.RefreshPolicy.parse(request.param(REFRESH, WriteRequest.RefreshPolicy.IMMEDIATE.value)) - val deleteWorkflowRequest = DeleteWorkflowRequest(workflowId, deleteDelegateMonitors) - - return RestChannelConsumer { channel -> - client.execute( - AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, deleteWorkflowRequest, - RestToXContentListener(channel) - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt deleted file mode 100644 index 740dcb2d6..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteMonitorAction.kt +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.ExecuteMonitorAction -import org.opensearch.alerting.action.ExecuteMonitorRequest -import org.opensearch.client.node.NodeClient -import org.opensearch.common.unit.TimeValue -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.core.xcontent.XContentParser.Token.START_OBJECT -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.POST -import org.opensearch.rest.action.RestToXContentListener -import java.time.Instant - -private val log = LogManager.getLogger(RestExecuteMonitorAction::class.java) - -class RestExecuteMonitorAction : BaseRestHandler() { - - override fun getName(): String = "execute_monitor_action" - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - POST, - "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}/_execute", - POST, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}/_execute" - ), - ReplacedRoute( - POST, - "${AlertingPlugin.MONITOR_BASE_URI}/_execute", - POST, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/_execute" - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/_execute") - - return RestChannelConsumer { channel -> - val dryrun = request.paramAsBoolean("dryrun", false) - val requestEnd = request.paramAsTime("period_end", TimeValue(Instant.now().toEpochMilli())) - - if (request.hasParam("monitorID")) { - val monitorId = request.param("monitorID") - val execMonitorRequest = ExecuteMonitorRequest(dryrun, requestEnd, monitorId, null) - client.execute(ExecuteMonitorAction.INSTANCE, execMonitorRequest, RestToXContentListener(channel)) - } else { - val xcp = request.contentParser() - ensureExpectedToken(START_OBJECT, xcp.nextToken(), xcp) - val monitor = Monitor.parse(xcp, Monitor.NO_ID, Monitor.NO_VERSION) - val execMonitorRequest = ExecuteMonitorRequest(dryrun, requestEnd, null, monitor) - client.execute(ExecuteMonitorAction.INSTANCE, execMonitorRequest, RestToXContentListener(channel)) - } - } - } - - override fun responseParams(): Set { - return setOf("dryrun", "period_end", "monitorID") - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt deleted file mode 100644 index de8da1bac..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestExecuteWorkflowAction.kt +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.ExecuteWorkflowAction -import org.opensearch.alerting.action.ExecuteWorkflowRequest -import org.opensearch.client.node.NodeClient -import org.opensearch.common.unit.TimeValue -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.RestHandler -import org.opensearch.rest.RestRequest -import org.opensearch.rest.action.RestToXContentListener -import java.time.Instant - -private val log = LogManager.getLogger(RestExecuteWorkflowAction::class.java) - -class RestExecuteWorkflowAction : BaseRestHandler() { - - override fun getName(): String = "execute_workflow_action" - - override fun routes(): List { - return listOf( - RestHandler.Route(RestRequest.Method.POST, "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}/_execute") - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/_execute") - - return RestChannelConsumer { channel -> - val dryrun = request.paramAsBoolean("dryrun", false) - val requestEnd = request.paramAsTime("period_end", TimeValue(Instant.now().toEpochMilli())) - - if (request.hasParam("workflowID")) { - val workflowId = request.param("workflowID") - val execWorkflowRequest = ExecuteWorkflowRequest(dryrun, requestEnd, workflowId, null) - client.execute(ExecuteWorkflowAction.INSTANCE, execWorkflowRequest, RestToXContentListener(channel)) - } else { - val xcp = request.contentParser() - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val workflow = Workflow.parse(xcp, Workflow.NO_ID, Workflow.NO_VERSION) - val execWorkflowRequest = ExecuteWorkflowRequest(dryrun, requestEnd, null, workflow) - client.execute(ExecuteWorkflowAction.INSTANCE, execWorkflowRequest, RestToXContentListener(channel)) - } - } - } - - override fun responseParams(): Set { - return setOf("dryrun", "period_end", "workflowID") - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt deleted file mode 100644 index aabcf8d6c..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetAlertsAction.kt +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetAlertsRequest -import org.opensearch.commons.alerting.model.Table -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.GET -import org.opensearch.rest.action.RestToXContentListener - -/** - * This class consists of the REST handler to retrieve alerts . - */ -class RestGetAlertsAction : BaseRestHandler() { - - private val log = LogManager.getLogger(RestGetAlertsAction::class.java) - - override fun getName(): String { - return "get_alerts_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - GET, - "${AlertingPlugin.MONITOR_BASE_URI}/alerts", - GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/alerts" - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/alerts") - - val sortString = request.param("sortString", "monitor_name.keyword") - val sortOrder = request.param("sortOrder", "asc") - val missing: String? = request.param("missing") - val size = request.paramAsInt("size", 20) - val startIndex = request.paramAsInt("startIndex", 0) - val searchString = request.param("searchString", "") - val severityLevel = request.param("severityLevel", "ALL") - val alertState = request.param("alertState", "ALL") - val monitorId: String? = request.param("monitorId") - val workflowId: String? = request.param("workflowIds") - val workflowIds = mutableListOf() - if (workflowId.isNullOrEmpty() == false) { - workflowIds.add(workflowId) - } else { - workflowIds.add("") - } - val table = Table( - sortOrder, - sortString, - missing, - size, - startIndex, - searchString - ) - - val getAlertsRequest = GetAlertsRequest(table, severityLevel, alertState, monitorId, null, workflowIds = workflowIds) - return RestChannelConsumer { - channel -> - client.execute(AlertingActions.GET_ALERTS_ACTION_TYPE, getAlertsRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt deleted file mode 100644 index 7e5e1530f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetDestinationsAction.kt +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.GetDestinationsAction -import org.opensearch.alerting.action.GetDestinationsRequest -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.model.Table -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.action.RestActions -import org.opensearch.rest.action.RestToXContentListener -import org.opensearch.search.fetch.subphase.FetchSourceContext - -/** - * This class consists of the REST handler to retrieve destinations . - */ -class RestGetDestinationsAction : BaseRestHandler() { - - private val log = LogManager.getLogger(RestGetDestinationsAction::class.java) - - override fun getName(): String { - return "get_destinations_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - // Get a specific destination - ReplacedRoute( - RestRequest.Method.GET, - "${AlertingPlugin.DESTINATION_BASE_URI}/{destinationID}", - RestRequest.Method.GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_DESTINATION_BASE_URI}/{destinationID}" - ), - ReplacedRoute( - RestRequest.Method.GET, - AlertingPlugin.DESTINATION_BASE_URI, - RestRequest.Method.GET, - AlertingPlugin.LEGACY_OPENDISTRO_DESTINATION_BASE_URI - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${request.path()}") - - val destinationId: String? = request.param("destinationID") - - var srcContext = context(request) - if (request.method() == RestRequest.Method.HEAD) { - srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE - } - - val sortString = request.param("sortString", "destination.name.keyword") - val sortOrder = request.param("sortOrder", "asc") - val missing: String? = request.param("missing") - val size = request.paramAsInt("size", 20) - val startIndex = request.paramAsInt("startIndex", 0) - val searchString = request.param("searchString", "") - val destinationType = request.param("destinationType", "ALL") - - val table = Table( - sortOrder, - sortString, - missing, - size, - startIndex, - searchString - ) - - val getDestinationsRequest = GetDestinationsRequest( - destinationId, - RestActions.parseVersion(request), - srcContext, - table, - destinationType - ) - return RestChannelConsumer { - channel -> - client.execute(GetDestinationsAction.INSTANCE, getDestinationsRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt deleted file mode 100644 index ba5dbdb8a..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailAccountAction.kt +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.GetEmailAccountAction -import org.opensearch.alerting.action.GetEmailAccountRequest -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.action.RestActions -import org.opensearch.rest.action.RestToXContentListener -import org.opensearch.search.fetch.subphase.FetchSourceContext -import java.lang.IllegalArgumentException - -/** - * Rest handler to retrieve an EmailAccount. - */ -class RestGetEmailAccountAction : BaseRestHandler() { - - override fun getName(): String { - return "get_email_account_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - RestRequest.Method.GET, - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}", - RestRequest.Method.GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}" - ), - ReplacedRoute( - RestRequest.Method.HEAD, - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}", - RestRequest.Method.HEAD, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/{emailAccountID}" - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - val emailAccountID = request.param("emailAccountID") - if (emailAccountID == null || emailAccountID.isEmpty()) { - throw IllegalArgumentException("Missing email account ID") - } - - var srcContext = context(request) - if (request.method() == RestRequest.Method.HEAD) { - srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE - } - - val getEmailAccountRequest = GetEmailAccountRequest(emailAccountID, RestActions.parseVersion(request), request.method(), srcContext) - return RestChannelConsumer { channel -> - client.execute(GetEmailAccountAction.INSTANCE, getEmailAccountRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt deleted file mode 100644 index 7fe37c17c..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetEmailGroupAction.kt +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.GetEmailGroupAction -import org.opensearch.alerting.action.GetEmailGroupRequest -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.action.RestActions -import org.opensearch.rest.action.RestToXContentListener -import org.opensearch.search.fetch.subphase.FetchSourceContext -import java.lang.IllegalArgumentException - -/** - * Rest handlers to retrieve an EmailGroup - */ -class RestGetEmailGroupAction : BaseRestHandler() { - - override fun getName(): String { - return "get_email_group_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - RestRequest.Method.GET, - "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/{emailGroupID}", - RestRequest.Method.GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/{emailGroupID}" - ), - ReplacedRoute( - RestRequest.Method.HEAD, - "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/{emailGroupID}", - RestRequest.Method.HEAD, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/{emailGroupID}" - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - val emailGroupID = request.param("emailGroupID") - if (emailGroupID == null || emailGroupID.isEmpty()) { - throw IllegalArgumentException("Missing email group ID") - } - - var srcContext = context(request) - if (request.method() == RestRequest.Method.HEAD) { - srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE - } - - val getEmailGroupRequest = GetEmailGroupRequest(emailGroupID, RestActions.parseVersion(request), request.method(), srcContext) - return RestChannelConsumer { channel -> - client.execute(GetEmailGroupAction.INSTANCE, getEmailGroupRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt deleted file mode 100644 index 75607a701..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetFindingsAction.kt +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetFindingsRequest -import org.opensearch.commons.alerting.model.Table -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.GET -import org.opensearch.rest.action.RestToXContentListener - -/** - * This class consists of the REST handler to search findings . - */ -class RestGetFindingsAction : BaseRestHandler() { - - private val log = LogManager.getLogger(RestGetFindingsAction::class.java) - - override fun getName(): String { - return "get_findings_action" - } - - override fun routes(): List { - return listOf( - Route(GET, "${AlertingPlugin.FINDING_BASE_URI}/_search") - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.info("${request.method()} ${request.path()}") - - val findingID: String? = request.param("findingId") - val sortString = request.param("sortString", "id") - val sortOrder = request.param("sortOrder", "asc") - val missing: String? = request.param("missing") - val size = request.paramAsInt("size", 20) - val startIndex = request.paramAsInt("startIndex", 0) - val searchString = request.param("searchString", "") - - val table = Table( - sortOrder, - sortString, - missing, - size, - startIndex, - searchString - ) - - val getFindingsSearchRequest = GetFindingsRequest( - findingID, - table - ) - return RestChannelConsumer { - channel -> - client.execute(AlertingActions.GET_FINDINGS_ACTION_TYPE, getFindingsSearchRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt deleted file mode 100644 index 54270b717..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetMonitorAction.kt +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetMonitorRequest -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.GET -import org.opensearch.rest.RestRequest.Method.HEAD -import org.opensearch.rest.action.RestActions -import org.opensearch.rest.action.RestToXContentListener -import org.opensearch.search.fetch.subphase.FetchSourceContext - -private val log = LogManager.getLogger(RestGetMonitorAction::class.java) - -/** - * This class consists of the REST handler to retrieve a monitor . - */ -class RestGetMonitorAction : BaseRestHandler() { - - override fun getName(): String { - return "get_monitor_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - // Get a specific monitor - ReplacedRoute( - GET, - "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", - GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" - ), - ReplacedRoute( - HEAD, - "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", - HEAD, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}") - - val monitorId = request.param("monitorID") - if (monitorId == null || monitorId.isEmpty()) { - throw IllegalArgumentException("missing id") - } - - var srcContext = context(request) - if (request.method() == HEAD) { - srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE - } - val getMonitorRequest = GetMonitorRequest(monitorId, RestActions.parseVersion(request), request.method(), srcContext) - return RestChannelConsumer { - channel -> - client.execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt deleted file mode 100644 index 1a2ca4426..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAction.kt +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetWorkflowRequest -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.RestHandler -import org.opensearch.rest.RestRequest -import org.opensearch.rest.action.RestToXContentListener -import org.opensearch.search.fetch.subphase.FetchSourceContext - -/** - * This class consists of the REST handler to retrieve a workflow . - */ -class RestGetWorkflowAction : BaseRestHandler() { - - private val log = LogManager.getLogger(javaClass) - - override fun getName(): String { - return "get_workflow_action" - } - - override fun routes(): List { - return listOf( - RestHandler.Route( - RestRequest.Method.GET, - "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}") - - val workflowId = request.param("workflowID") - if (workflowId == null || workflowId.isEmpty()) { - throw IllegalArgumentException("missing id") - } - - var srcContext = context(request) - if (request.method() == RestRequest.Method.HEAD) { - srcContext = FetchSourceContext.DO_NOT_FETCH_SOURCE - } - val getWorkflowRequest = - GetWorkflowRequest(workflowId, request.method()) - return RestChannelConsumer { - channel -> - client.execute(AlertingActions.GET_WORKFLOW_ACTION_TYPE, getWorkflowRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt deleted file mode 100644 index 474c32d4a..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestGetWorkflowAlertsAction.kt +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest -import org.opensearch.commons.alerting.model.Table -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.GET -import org.opensearch.rest.action.RestToXContentListener - -/** - * This class consists of the REST handler to retrieve chained alerts by workflow id. - */ -class RestGetWorkflowAlertsAction : BaseRestHandler() { - - private val log = LogManager.getLogger(RestGetWorkflowAlertsAction::class.java) - - override fun getName(): String { - return "get_workflow_alerts_action" - } - - override fun routes(): List { - return mutableListOf( - Route( - GET, - "${AlertingPlugin.WORKFLOW_BASE_URI}/alerts" - ) - ) - } - - override fun replacedRoutes(): MutableList { - return mutableListOf() - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.WORKFLOW_BASE_URI}/alerts") - - val sortString = request.param("sortString", "monitor_name.keyword") - val sortOrder = request.param("sortOrder", "asc") - val missing: String? = request.param("missing") - val size = request.paramAsInt("size", 20) - val startIndex = request.paramAsInt("startIndex", 0) - val searchString = request.param("searchString", "") - val severityLevel = request.param("severityLevel", "ALL") - val alertState = request.param("alertState", "ALL") - val workflowId: String? = request.param("workflowIds") - val alertId: String? = request.param("alertIds") - val getAssociatedAlerts: Boolean = request.param("getAssociatedAlerts", "false").toBoolean() - val workflowIds = mutableListOf() - if (workflowId.isNullOrEmpty() == false) { - workflowIds.add(workflowId) - } - val alertIds = mutableListOf() - if (alertId.isNullOrEmpty() == false) { - alertIds.add(alertId) - } - val table = Table( - sortOrder, - sortString, - missing, - size, - startIndex, - searchString - ) - - val getWorkflowAlertsRequest = GetWorkflowAlertsRequest( - table, - severityLevel, - alertState, - alertIndex = null, - associatedAlertsIndex = null, - workflowIds = workflowIds, - monitorIds = emptyList(), - getAssociatedAlerts = getAssociatedAlerts, - alertIds = alertIds - ) - return RestChannelConsumer { channel -> - client.execute(AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, getWorkflowAlertsRequest, RestToXContentListener(channel)) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt deleted file mode 100644 index 8b1a1f78c..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexMonitorAction.kt +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.util.IF_PRIMARY_TERM -import org.opensearch.alerting.util.IF_SEQ_NO -import org.opensearch.alerting.util.REFRESH -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.IndexMonitorRequest -import org.opensearch.commons.alerting.action.IndexMonitorResponse -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentParser.Token -import org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken -import org.opensearch.index.seqno.SequenceNumbers -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.BytesRestResponse -import org.opensearch.rest.RestChannel -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.POST -import org.opensearch.rest.RestRequest.Method.PUT -import org.opensearch.rest.RestResponse -import org.opensearch.rest.action.RestResponseListener -import java.io.IOException -import java.time.Instant - -private val log = LogManager.getLogger(RestIndexMonitorAction::class.java) - -/** - * Rest handlers to create and update monitors. - */ -class RestIndexMonitorAction : BaseRestHandler() { - - override fun getName(): String { - return "index_monitor_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - POST, - AlertingPlugin.MONITOR_BASE_URI, - POST, - AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI - ), - ReplacedRoute( - PUT, - "${AlertingPlugin.MONITOR_BASE_URI}/{monitorID}", - PUT, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/{monitorID}" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}") - - val id = request.param("monitorID", Monitor.NO_ID) - if (request.method() == PUT && Monitor.NO_ID == id) { - throw IllegalArgumentException("Missing monitor ID") - } - - // Validate request by parsing JSON to Monitor - val xcp = request.contentParser() - ensureExpectedToken(Token.START_OBJECT, xcp.nextToken(), xcp) - val monitor = Monitor.parse(xcp, id).copy(lastUpdateTime = Instant.now()) - val rbacRoles = request.contentParser().map()["rbac_roles"] as List? - - validateDataSources(monitor) - validateOwner(monitor.owner) - val monitorType = monitor.monitorType - val triggers = monitor.triggers - when (monitorType) { - Monitor.MonitorType.QUERY_LEVEL_MONITOR -> { - triggers.forEach { - if (it !is QueryLevelTrigger) { - throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for query level monitor") - } - } - } - Monitor.MonitorType.BUCKET_LEVEL_MONITOR -> { - triggers.forEach { - if (it !is BucketLevelTrigger) { - throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for bucket level monitor") - } - } - } - Monitor.MonitorType.CLUSTER_METRICS_MONITOR -> { - triggers.forEach { - if (it !is QueryLevelTrigger) { - throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for cluster metrics monitor") - } - } - } - Monitor.MonitorType.DOC_LEVEL_MONITOR -> { - triggers.forEach { - if (it !is DocumentLevelTrigger) { - throw IllegalArgumentException("Illegal trigger type, ${it.javaClass.name}, for document level monitor") - } - } - } - } - val seqNo = request.paramAsLong(IF_SEQ_NO, SequenceNumbers.UNASSIGNED_SEQ_NO) - val primaryTerm = request.paramAsLong(IF_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_PRIMARY_TERM) - val refreshPolicy = if (request.hasParam(REFRESH)) { - WriteRequest.RefreshPolicy.parse(request.param(REFRESH)) - } else { - WriteRequest.RefreshPolicy.IMMEDIATE - } - val indexMonitorRequest = IndexMonitorRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), monitor, rbacRoles) - - return RestChannelConsumer { channel -> - client.execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, indexMonitorRequest, indexMonitorResponse(channel, request.method())) - } - } - - private fun validateDataSources(monitor: Monitor) { // Data Sources will currently be supported only at transport layer. - if (monitor.dataSources != null) { - if ( - monitor.dataSources.queryIndex != ScheduledJob.DOC_LEVEL_QUERIES_INDEX || - monitor.dataSources.findingsIndex != AlertIndices.FINDING_HISTORY_WRITE_INDEX || - monitor.dataSources.alertsIndex != AlertIndices.ALERT_INDEX - ) { - throw IllegalArgumentException("Custom Data Sources are not allowed.") - } - } - } - - private fun validateOwner(owner: String?) { - if (owner != "alerting") { - throw IllegalArgumentException("Invalid owner field") - } - } - - private fun indexMonitorResponse(channel: RestChannel, restMethod: RestRequest.Method): - RestResponseListener { - return object : RestResponseListener(channel) { - @Throws(Exception::class) - override fun buildResponse(response: IndexMonitorResponse): RestResponse { - var returnStatus = RestStatus.CREATED - if (restMethod == RestRequest.Method.PUT) { - returnStatus = RestStatus.OK - } - - val restResponse = BytesRestResponse(returnStatus, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)) - if (returnStatus == RestStatus.CREATED) { - val location = "${AlertingPlugin.MONITOR_BASE_URI}/${response.id}" - restResponse.addHeader("Location", location) - } - return restResponse - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt deleted file mode 100644 index d631ed710..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestIndexWorkflowAction.kt +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ -package org.opensearch.alerting.resthandler - -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.IF_PRIMARY_TERM -import org.opensearch.alerting.util.IF_SEQ_NO -import org.opensearch.alerting.util.REFRESH -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.IndexWorkflowRequest -import org.opensearch.commons.alerting.action.IndexWorkflowResponse -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.seqno.SequenceNumbers -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.BytesRestResponse -import org.opensearch.rest.RestChannel -import org.opensearch.rest.RestHandler -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestResponse -import org.opensearch.rest.action.RestResponseListener -import java.io.IOException -import java.time.Instant - -/** - * Rest handlers to create and update workflows. - */ -class RestIndexWorkflowAction : BaseRestHandler() { - - override fun getName(): String { - return "index_workflow_action" - } - - override fun routes(): List { - return listOf( - RestHandler.Route(RestRequest.Method.POST, AlertingPlugin.WORKFLOW_BASE_URI), - RestHandler.Route( - RestRequest.Method.PUT, - "${AlertingPlugin.WORKFLOW_BASE_URI}/{workflowID}" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - val id = request.param("workflowID", Workflow.NO_ID) - if (request.method() == RestRequest.Method.PUT && Workflow.NO_ID == id) { - throw AlertingException.wrap(IllegalArgumentException("Missing workflow ID")) - } - - // Validate request by parsing JSON to Monitor - val xcp = request.contentParser() - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val workflow = Workflow.parse(xcp, id).copy(lastUpdateTime = Instant.now()) - val rbacRoles = request.contentParser().map()["rbac_roles"] as List? - - val seqNo = request.paramAsLong(IF_SEQ_NO, SequenceNumbers.UNASSIGNED_SEQ_NO) - val primaryTerm = request.paramAsLong(IF_PRIMARY_TERM, SequenceNumbers.UNASSIGNED_PRIMARY_TERM) - val refreshPolicy = if (request.hasParam(REFRESH)) { - WriteRequest.RefreshPolicy.parse(request.param(REFRESH)) - } else { - WriteRequest.RefreshPolicy.IMMEDIATE - } - val workflowRequest = - IndexWorkflowRequest(id, seqNo, primaryTerm, refreshPolicy, request.method(), workflow, rbacRoles) - - return RestChannelConsumer { channel -> - client.execute(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, workflowRequest, indexMonitorResponse(channel, request.method())) - } - } - - private fun indexMonitorResponse(channel: RestChannel, restMethod: RestRequest.Method): RestResponseListener { - return object : RestResponseListener(channel) { - @Throws(Exception::class) - override fun buildResponse(response: IndexWorkflowResponse): RestResponse { - var returnStatus = RestStatus.CREATED - if (restMethod == RestRequest.Method.PUT) - returnStatus = RestStatus.OK - - val restResponse = - BytesRestResponse(returnStatus, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)) - if (returnStatus == RestStatus.CREATED) { - val location = "${AlertingPlugin.WORKFLOW_BASE_URI}/${response.id}" - restResponse.addHeader("Location", location) - } - return restResponse - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt deleted file mode 100644 index 1d2224e3b..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailAccountAction.kt +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.SearchEmailAccountAction -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory.jsonBuilder -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS -import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BytesRestResponse -import org.opensearch.rest.RestChannel -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestResponse -import org.opensearch.rest.action.RestResponseListener -import org.opensearch.search.builder.SearchSourceBuilder -import java.io.IOException - -/** - * Rest handlers to search for EmailAccount - */ -class RestSearchEmailAccountAction : BaseRestHandler() { - - override fun getName(): String { - return "search_email_account_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - RestRequest.Method.POST, - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", - RestRequest.Method.POST, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/_search" - ), - ReplacedRoute( - RestRequest.Method.GET, - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", - RestRequest.Method.GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_ACCOUNT_BASE_URI}/_search" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - val searchSourceBuilder = SearchSourceBuilder() - searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) - searchSourceBuilder.fetchSource(context(request)) - - // An exists query is added on top of the user's query to ensure that only documents of email_account type - // are searched - searchSourceBuilder.query( - QueryBuilders.boolQuery().must(searchSourceBuilder.query()) - .filter(QueryBuilders.existsQuery(EmailAccount.EMAIL_ACCOUNT_TYPE)) - ) - .seqNoAndPrimaryTerm(true) - val searchRequest = SearchRequest() - .source(searchSourceBuilder) - .indices(SCHEDULED_JOBS_INDEX) - return RestChannelConsumer { channel -> - client.execute(SearchEmailAccountAction.INSTANCE, searchRequest, searchEmailAccountResponse(channel)) - } - } - - private fun searchEmailAccountResponse(channel: RestChannel): RestResponseListener { - return object : RestResponseListener(channel) { - @Throws(Exception::class) - override fun buildResponse(response: SearchResponse): RestResponse { - if (response.isTimedOut) { - return BytesRestResponse(RestStatus.REQUEST_TIMEOUT, response.toString()) - } - - for (hit in response.hits) { - XContentType.JSON.xContent().createParser( - channel.request().xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceAsString - ).use { hitsParser -> - val emailAccount = EmailAccount.parseWithType(hitsParser, hit.id, hit.version) - val xcb = emailAccount.toXContent(jsonBuilder(), EMPTY_PARAMS) - hit.sourceRef(BytesReference.bytes(xcb)) - } - } - return BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), EMPTY_PARAMS)) - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt deleted file mode 100644 index e29dd8872..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchEmailGroupAction.kt +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.SearchEmailGroupAction -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.BytesRestResponse -import org.opensearch.rest.RestChannel -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestResponse -import org.opensearch.rest.action.RestResponseListener -import org.opensearch.search.builder.SearchSourceBuilder -import java.io.IOException - -/** - * Rest handlers to search for EmailGroup - */ -class RestSearchEmailGroupAction : BaseRestHandler() { - - override fun getName(): String { - return "search_email_group_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - ReplacedRoute( - RestRequest.Method.POST, - "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/_search", - RestRequest.Method.POST, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/_search" - ), - ReplacedRoute( - RestRequest.Method.GET, - "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/_search", - RestRequest.Method.GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_EMAIL_GROUP_BASE_URI}/_search" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - val searchSourceBuilder = SearchSourceBuilder() - searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) - searchSourceBuilder.fetchSource(context(request)) - - // An exists query is added on top of the user's query to ensure that only documents of email_group type - // are searched - searchSourceBuilder.query( - QueryBuilders.boolQuery().must(searchSourceBuilder.query()) - .filter(QueryBuilders.existsQuery(EmailGroup.EMAIL_GROUP_TYPE)) - ) - .seqNoAndPrimaryTerm(true) - val searchRequest = SearchRequest() - .source(searchSourceBuilder) - .indices(SCHEDULED_JOBS_INDEX) - return RestChannelConsumer { channel -> - client.execute(SearchEmailGroupAction.INSTANCE, searchRequest, searchEmailGroupResponse(channel)) - } - } - - private fun searchEmailGroupResponse(channel: RestChannel): RestResponseListener { - return object : RestResponseListener(channel) { - @Throws(Exception::class) - override fun buildResponse(response: SearchResponse): RestResponse { - if (response.isTimedOut) { - return BytesRestResponse(RestStatus.REQUEST_TIMEOUT, response.toString()) - } - - for (hit in response.hits) { - XContentType.JSON.xContent().createParser( - channel.request().xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceAsString - ).use { hitsParser -> - val emailGroup = EmailGroup.parseWithType(hitsParser, hit.id, hit.version) - val xcb = emailGroup.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) - hit.sourceRef(BytesReference.bytes(xcb)) - } - } - return BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), ToXContent.EMPTY_PARAMS)) - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt deleted file mode 100644 index 1bf51678e..000000000 --- a/alerting/bin/main/org/opensearch/alerting/resthandler/RestSearchMonitorAction.kt +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_ALERT_INDEX_PATTERN -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.context -import org.opensearch.client.node.NodeClient -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory.jsonBuilder -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.SearchMonitorRequest -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.BaseRestHandler.RestChannelConsumer -import org.opensearch.rest.BytesRestResponse -import org.opensearch.rest.RestChannel -import org.opensearch.rest.RestHandler.ReplacedRoute -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.GET -import org.opensearch.rest.RestRequest.Method.POST -import org.opensearch.rest.RestResponse -import org.opensearch.rest.action.RestResponseListener -import org.opensearch.search.builder.SearchSourceBuilder -import java.io.IOException - -private val log = LogManager.getLogger(RestSearchMonitorAction::class.java) - -/** - * Rest handlers to search for monitors. - * TODO: Deprecate API for a set of new APIs that will support this APIs use cases - */ -class RestSearchMonitorAction( - val settings: Settings, - clusterService: ClusterService -) : BaseRestHandler() { - - @Volatile private var filterBy = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.FILTER_BY_BACKEND_ROLES) { filterBy = it } - } - - override fun getName(): String { - return "search_monitor_action" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - // Search for monitors - ReplacedRoute( - POST, - "${AlertingPlugin.MONITOR_BASE_URI}/_search", - POST, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/_search" - ), - ReplacedRoute( - GET, - "${AlertingPlugin.MONITOR_BASE_URI}/_search", - GET, - "${AlertingPlugin.LEGACY_OPENDISTRO_MONITOR_BASE_URI}/_search" - ) - ) - } - - @Throws(IOException::class) - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - log.debug("${request.method()} ${AlertingPlugin.MONITOR_BASE_URI}/_search") - - val index = request.param("index", SCHEDULED_JOBS_INDEX) - if (index != SCHEDULED_JOBS_INDEX && index != ALL_ALERT_INDEX_PATTERN) { - throw IllegalArgumentException("Invalid index name.") - } - - val searchSourceBuilder = SearchSourceBuilder() - searchSourceBuilder.parseXContent(request.contentOrSourceParamParser()) - searchSourceBuilder.fetchSource(context(request)) - - val searchRequest = SearchRequest() - .source(searchSourceBuilder) - .indices(index) - - val searchMonitorRequest = SearchMonitorRequest(searchRequest) - return RestChannelConsumer { channel -> - client.execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, searchMonitorRequest, searchMonitorResponse(channel)) - } - } - - private fun searchMonitorResponse(channel: RestChannel): RestResponseListener { - return object : RestResponseListener(channel) { - @Throws(Exception::class) - override fun buildResponse(response: SearchResponse): RestResponse { - if (response.isTimedOut) { - return BytesRestResponse(RestStatus.REQUEST_TIMEOUT, response.toString()) - } - - // Swallow exception and return response as is - try { - for (hit in response.hits) { - XContentType.JSON.xContent().createParser( - channel.request().xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceAsString - ).use { hitsParser -> - val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) - val xcb = monitor.toXContent(jsonBuilder(), EMPTY_PARAMS) - hit.sourceRef(BytesReference.bytes(xcb)) - } - } - } catch (e: Exception) { - log.info("The monitor parsing failed. Will return response as is.") - } - return BytesRestResponse(RestStatus.OK, response.toXContent(channel.newBuilder(), EMPTY_PARAMS)) - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt deleted file mode 100644 index 72518ed48..000000000 --- a/alerting/bin/main/org/opensearch/alerting/script/BucketLevelTriggerExecutionContext.kt +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.script - -import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.Monitor -import java.time.Instant - -data class BucketLevelTriggerExecutionContext( - override val monitor: Monitor, - val trigger: BucketLevelTrigger, - override val results: List>, - override val periodStart: Instant, - override val periodEnd: Instant, - val dedupedAlerts: List = listOf(), - val newAlerts: List = listOf(), - val completedAlerts: List = listOf(), - override val error: Exception? = null -) : TriggerExecutionContext(monitor, results, periodStart, periodEnd, error) { - - constructor( - monitor: Monitor, - trigger: BucketLevelTrigger, - monitorRunResult: MonitorRunResult, - dedupedAlerts: List = listOf(), - newAlerts: List = listOf(), - completedAlerts: List = listOf() - ) : this( - monitor, trigger, monitorRunResult.inputResults.results, monitorRunResult.periodStart, monitorRunResult.periodEnd, - dedupedAlerts, newAlerts, completedAlerts, monitorRunResult.scriptContextError(trigger) - ) - - /** - * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we - * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. - */ - override fun asTemplateArg(): Map { - val tempArg = super.asTemplateArg().toMutableMap() - tempArg["trigger"] = trigger.asTemplateArg() - tempArg["dedupedAlerts"] = dedupedAlerts.map { it.asTemplateArg() } - tempArg["newAlerts"] = newAlerts.map { it.asTemplateArg() } - tempArg["completedAlerts"] = completedAlerts.map { it.asTemplateArg() } - return tempArg - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt deleted file mode 100644 index d4bf4cb59..000000000 --- a/alerting/bin/main/org/opensearch/alerting/script/ChainedAlertTriggerExecutionContext.kt +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.script - -import org.opensearch.alerting.model.WorkflowRunResult -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.Workflow -import java.time.Instant - -data class ChainedAlertTriggerExecutionContext( - val workflow: Workflow, - val workflowRunResult: WorkflowRunResult, - val periodStart: Instant, - val periodEnd: Instant?, - val error: Exception? = null, - val trigger: ChainedAlertTrigger, - val alertGeneratingMonitors: Set, - val monitorIdToAlertIdsMap: Map>, - val alert: Alert? = null -) { - - /** - * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we - * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. - */ - open fun asTemplateArg(): Map { - return mapOf( - "monitor" to workflow.asTemplateArg(), - "results" to workflowRunResult, - "periodStart" to periodStart, - "error" to error, - "alertGeneratingMonitors" to alertGeneratingMonitors, - "monitorIdToAlertIdsMap" to monitorIdToAlertIdsMap - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt deleted file mode 100644 index 66de731f6..000000000 --- a/alerting/bin/main/org/opensearch/alerting/script/DocumentLevelTriggerExecutionContext.kt +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.script - -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Monitor -import java.time.Instant - -data class DocumentLevelTriggerExecutionContext( - override val monitor: Monitor, - val trigger: DocumentLevelTrigger, - override val results: List>, - override val periodStart: Instant, - override val periodEnd: Instant, - val alerts: List = listOf(), - val triggeredDocs: List, - val relatedFindings: List, - override val error: Exception? = null -) : TriggerExecutionContext(monitor, results, periodStart, periodEnd, error) { - - constructor( - monitor: Monitor, - trigger: DocumentLevelTrigger, - alerts: List = listOf() - ) : this( - monitor, trigger, emptyList(), Instant.now(), Instant.now(), - alerts, emptyList(), emptyList(), null - ) - - /** - * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we - * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. - */ - override fun asTemplateArg(): Map { - val tempArg = super.asTemplateArg().toMutableMap() - tempArg["trigger"] = trigger.asTemplateArg() - tempArg["alerts"] = alerts.map { it.asTemplateArg() } - return tempArg - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt deleted file mode 100644 index 729aa18d0..000000000 --- a/alerting/bin/main/org/opensearch/alerting/script/QueryLevelTriggerExecutionContext.kt +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.script - -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.QueryLevelTriggerRunResult -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import java.time.Instant - -data class QueryLevelTriggerExecutionContext( - override val monitor: Monitor, - val trigger: QueryLevelTrigger, - override val results: List>, - override val periodStart: Instant, - override val periodEnd: Instant, - val alert: Alert? = null, - override val error: Exception? = null -) : TriggerExecutionContext(monitor, results, periodStart, periodEnd, error) { - - constructor( - monitor: Monitor, - trigger: QueryLevelTrigger, - monitorRunResult: MonitorRunResult, - alert: Alert? = null - ) : this( - monitor, - trigger, - monitorRunResult.inputResults.results, - monitorRunResult.periodStart, - monitorRunResult.periodEnd, - alert, - monitorRunResult.scriptContextError(trigger) - ) - - /** - * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we - * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. - */ - override fun asTemplateArg(): Map { - val tempArg = super.asTemplateArg().toMutableMap() - tempArg["trigger"] = trigger.asTemplateArg() - tempArg["alert"] = alert?.asTemplateArg() - return tempArg - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt b/alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt deleted file mode 100644 index 1f5bd8be5..000000000 --- a/alerting/bin/main/org/opensearch/alerting/script/TriggerExecutionContext.kt +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.script - -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.Trigger -import java.time.Instant - -abstract class TriggerExecutionContext( - open val monitor: Monitor, - open val results: List>, - open val periodStart: Instant, - open val periodEnd: Instant, - open val error: Exception? = null -) { - - constructor(monitor: Monitor, trigger: Trigger, monitorRunResult: MonitorRunResult<*>) : - this( - monitor, - monitorRunResult.inputResults.results, - monitorRunResult.periodStart, - monitorRunResult.periodEnd, - monitorRunResult.scriptContextError(trigger) - ) - - /** - * Mustache templates need special permissions to reflectively introspect field names. To avoid doing this we - * translate the context to a Map of Strings to primitive types, which can be accessed without reflection. - */ - open fun asTemplateArg(): Map { - return mapOf( - "monitor" to monitor.asTemplateArg(), - "results" to results, - "periodStart" to periodStart, - "periodEnd" to periodEnd, - "error" to error - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt b/alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt deleted file mode 100644 index f6ed1ec6a..000000000 --- a/alerting/bin/main/org/opensearch/alerting/script/TriggerScript.kt +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.script - -import org.opensearch.script.Script -import org.opensearch.script.ScriptContext - -abstract class TriggerScript(_scriptParams: Map) { - - /** - * [scriptParams] are the [user-defined parameters][Script.getParams] specified in the script definition. - * The [scriptParams] are defined when the script is compiled and DON'T change every time the script executes. This field - * is named **script**Params to avoid confusion with the [PARAMETERS] field. However to remain consistent with every other - * painless script context we surface it to the painless script as just `params` using a custom getter name. - */ - val scriptParams: Map = _scriptParams - @JvmName("getParams") - get - - companion object { - /** - * [PARAMETERS] contains the names of the formal arguments to the [execute] method which define the - * script's execution context. These argument names (`_results` etc.) are available as named parameters - * in the painless script. These arguments passed to the [execute] method change every time the trigger is executed. - * In a sane world this would have been named `ARGUMENTS` to avoid confusing the hell out of everyone who has to - * work with this code. - */ - @JvmField val PARAMETERS = arrayOf("ctx") - - val CONTEXT = ScriptContext("trigger", Factory::class.java) - } - - /** - * Run a trigger script with the given context. - * - * @param ctx - the trigger execution context - */ - abstract fun execute(ctx: QueryLevelTriggerExecutionContext): Boolean - - interface Factory { - fun newInstance(scriptParams: Map): TriggerScript - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt b/alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt deleted file mode 100644 index b26ae2473..000000000 --- a/alerting/bin/main/org/opensearch/alerting/service/DeleteMonitorService.kt +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.service - -import kotlinx.coroutines.CoroutineName -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.SupervisorJob -import org.apache.logging.log4j.LogManager -import org.apache.lucene.search.join.ScoreMode -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest -import org.opensearch.action.admin.indices.exists.indices.IndicesExistsRequest -import org.opensearch.action.admin.indices.exists.indices.IndicesExistsResponse -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.delete.DeleteResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.IndicesOptions -import org.opensearch.action.support.WriteRequest.RefreshPolicy -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.MonitorMetadataService -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH -import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH -import org.opensearch.client.Client -import org.opensearch.commons.alerting.action.DeleteMonitorResponse -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.action.ActionListener -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.reindex.BulkByScrollResponse -import org.opensearch.index.reindex.DeleteByQueryAction -import org.opensearch.index.reindex.DeleteByQueryRequestBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import kotlin.coroutines.resume -import kotlin.coroutines.resumeWithException -import kotlin.coroutines.suspendCoroutine - -/** - * Component used when deleting the monitors - */ -object DeleteMonitorService : - CoroutineScope by CoroutineScope(SupervisorJob() + Dispatchers.Default + CoroutineName("WorkflowMetadataService")) { - private val log = LogManager.getLogger(this.javaClass) - - private lateinit var client: Client - - fun initialize( - client: Client, - ) { - DeleteMonitorService.client = client - } - - /** - * Deletes the monitor, docLevelQueries and monitor metadata - * @param monitor monitor to be deleted - * @param refreshPolicy - */ - suspend fun deleteMonitor(monitor: Monitor, refreshPolicy: RefreshPolicy): DeleteMonitorResponse { - val deleteResponse = deleteMonitor(monitor.id, refreshPolicy) - deleteDocLevelMonitorQueriesAndIndices(monitor) - deleteMetadata(monitor) - return DeleteMonitorResponse(deleteResponse.id, deleteResponse.version) - } - - private suspend fun deleteMonitor(monitorId: String, refreshPolicy: RefreshPolicy): DeleteResponse { - val deleteMonitorRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) - .setRefreshPolicy(refreshPolicy) - return client.suspendUntil { delete(deleteMonitorRequest, it) } - } - - private suspend fun deleteMetadata(monitor: Monitor) { - val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, "${monitor.id}-metadata") - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - try { - val deleteResponse: DeleteResponse = client.suspendUntil { delete(deleteRequest, it) } - log.debug("Monitor metadata: ${deleteResponse.id} deletion result: ${deleteResponse.result}") - } catch (e: Exception) { - // we only log the error and don't fail the request because if monitor document has been deleted, - // we cannot retry based on this failure - log.error("Failed to delete monitor metadata ${deleteRequest.id()}.", e) - } - } - - private suspend fun deleteDocLevelMonitorQueriesAndIndices(monitor: Monitor) { - try { - val metadata = MonitorMetadataService.getMetadata(monitor) - metadata?.sourceToQueryIndexMapping?.forEach { (_, queryIndex) -> - - val indicesExistsResponse: IndicesExistsResponse = - client.suspendUntil { - client.admin().indices().exists(IndicesExistsRequest(queryIndex), it) - } - if (indicesExistsResponse.isExists == false) { - return - } - // Check if there's any queries from other monitors in this queryIndex, - // to avoid unnecessary doc deletion, if we could just delete index completely - val searchResponse: SearchResponse = client.suspendUntil { - search( - SearchRequest(queryIndex).source( - SearchSourceBuilder() - .size(0) - .query( - QueryBuilders.boolQuery().mustNot( - QueryBuilders.matchQuery("monitor_id", monitor.id) - ) - ) - ).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), - it - ) - } - if (searchResponse.hits.totalHits.value == 0L) { - val ack: AcknowledgedResponse = client.suspendUntil { - client.admin().indices().delete( - DeleteIndexRequest(queryIndex).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN), - it - ) - } - if (ack.isAcknowledged == false) { - log.error("Deletion of concrete queryIndex:$queryIndex is not ack'd!") - } - } else { - // Delete all queries added by this monitor - val response: BulkByScrollResponse = suspendCoroutine { cont -> - DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) - .source(queryIndex) - .filter(QueryBuilders.matchQuery("monitor_id", monitor.id)) - .refresh(true) - .execute( - object : ActionListener { - override fun onResponse(response: BulkByScrollResponse) = cont.resume(response) - override fun onFailure(t: Exception) = cont.resumeWithException(t) - } - ) - } - } - } - } catch (e: Exception) { - // we only log the error and don't fail the request because if monitor document has been deleted successfully, - // we cannot retry based on this failure - log.error("Failed to delete doc level queries from query index.", e) - } - } - - /** - * Checks if the monitor is part of the workflow - * - * @param monitorId id of monitor that is checked if it is a workflow delegate - */ - suspend fun monitorIsWorkflowDelegate(monitorId: String): Boolean { - val queryBuilder = QueryBuilders.nestedQuery( - WORKFLOW_DELEGATE_PATH, - QueryBuilders.boolQuery().must( - QueryBuilders.matchQuery( - WORKFLOW_MONITOR_PATH, - monitorId - ) - ), - ScoreMode.None - ) - try { - val searchRequest = SearchRequest() - .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) - .source(SearchSourceBuilder().query(queryBuilder)) - - client.threadPool().threadContext.stashContext().use { - val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } - if (searchResponse.hits.totalHits?.value == 0L) { - return false - } - - val workflowIds = searchResponse.hits.hits.map { it.id }.joinToString() - log.info("Monitor $monitorId can't be deleted since it belongs to $workflowIds") - return true - } - } catch (ex: Exception) { - log.error("Error getting the monitor workflows", ex) - throw AlertingException.wrap(ex) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt deleted file mode 100644 index 7dd90b106..000000000 --- a/alerting/bin/main/org/opensearch/alerting/settings/AlertingSettings.kt +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.settings - -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.common.settings.Setting -import org.opensearch.common.unit.TimeValue -import java.util.concurrent.TimeUnit - -/** - * settings specific to [AlertingPlugin]. These settings include things like history index max age, request timeout, etc... - */ -class AlertingSettings { - - companion object { - const val DEFAULT_MAX_ACTIONABLE_ALERT_COUNT = 50L - - val ALERTING_MAX_MONITORS = Setting.intSetting( - "plugins.alerting.monitor.max_monitors", - LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val INPUT_TIMEOUT = Setting.positiveTimeSetting( - "plugins.alerting.input_timeout", - LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val INDEX_TIMEOUT = Setting.positiveTimeSetting( - "plugins.alerting.index_timeout", - LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val BULK_TIMEOUT = Setting.positiveTimeSetting( - "plugins.alerting.bulk_timeout", - LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val ALERT_BACKOFF_MILLIS = Setting.positiveTimeSetting( - "plugins.alerting.alert_backoff_millis", - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val ALERT_BACKOFF_COUNT = Setting.intSetting( - "plugins.alerting.alert_backoff_count", - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val MOVE_ALERTS_BACKOFF_MILLIS = Setting.positiveTimeSetting( - "plugins.alerting.move_alerts_backoff_millis", - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val MOVE_ALERTS_BACKOFF_COUNT = Setting.intSetting( - "plugins.alerting.move_alerts_backoff_count", - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val ALERT_HISTORY_ENABLED = Setting.boolSetting( - "plugins.alerting.alert_history_enabled", - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - // TODO: Do we want to let users to disable this? If so, we need to fix the rollover logic - // such that the main index is findings and rolls over to the finding history index - val FINDING_HISTORY_ENABLED = Setting.boolSetting( - "plugins.alerting.alert_finding_enabled", - true, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val ALERT_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( - "plugins.alerting.alert_history_rollover_period", - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val FINDING_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( - "plugins.alerting.alert_finding_rollover_period", - TimeValue.timeValueHours(12), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val ALERT_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( - "plugins.alerting.alert_history_max_age", - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val FINDING_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( - "plugins.alerting.finding_history_max_age", - TimeValue(30, TimeUnit.DAYS), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val ALERT_HISTORY_MAX_DOCS = Setting.longSetting( - "plugins.alerting.alert_history_max_docs", - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val FINDING_HISTORY_MAX_DOCS = Setting.longSetting( - "plugins.alerting.alert_finding_max_docs", - 1000L, - 0L, - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( - "plugins.alerting.alert_history_retention_period", - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val FINDING_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( - "plugins.alerting.finding_history_retention_period", - TimeValue(60, TimeUnit.DAYS), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val REQUEST_TIMEOUT = Setting.positiveTimeSetting( - "plugins.alerting.request_timeout", - LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val MAX_ACTION_THROTTLE_VALUE = Setting.positiveTimeSetting( - "plugins.alerting.action_throttle_max_value", - LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val FILTER_BY_BACKEND_ROLES = Setting.boolSetting( - "plugins.alerting.filter_by_backend_roles", - LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val MAX_ACTIONABLE_ALERT_COUNT = Setting.longSetting( - "plugins.alerting.max_actionable_alert_count", - DEFAULT_MAX_ACTIONABLE_ALERT_COUNT, - -1L, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt deleted file mode 100644 index 14086ce68..000000000 --- a/alerting/bin/main/org/opensearch/alerting/settings/DestinationSettings.kt +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.settings - -import org.opensearch.common.settings.SecureSetting -import org.opensearch.common.settings.Setting -import org.opensearch.common.settings.Setting.AffixSetting -import org.opensearch.common.settings.Settings -import org.opensearch.core.common.settings.SecureString -import java.util.function.Function - -/** - * Settings specific to Destinations. This class is separated from the general AlertingSettings since some Destination - * types require SecureSettings and need additional logic for retrieving and loading them. - */ -class DestinationSettings { - companion object { - - const val DESTINATION_SETTING_PREFIX = "plugins.alerting.destination." - const val EMAIL_DESTINATION_SETTING_PREFIX = DESTINATION_SETTING_PREFIX + "email." - val ALLOW_LIST_NONE = emptyList() - - val ALLOW_LIST: Setting> = Setting.listSetting( - DESTINATION_SETTING_PREFIX + "allow_list", - LegacyOpenDistroDestinationSettings.ALLOW_LIST, - Function.identity(), - Setting.Property.NodeScope, - Setting.Property.Dynamic - ) - - val EMAIL_USERNAME: Setting.AffixSetting = Setting.affixKeySetting( - EMAIL_DESTINATION_SETTING_PREFIX, - "username", - // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error - Function { key: String -> - SecureSetting.secureString( - key, - fallback(key, LegacyOpenDistroDestinationSettings.EMAIL_USERNAME, "plugins", "opendistro") - ) - } - ) - - val EMAIL_PASSWORD: Setting.AffixSetting = Setting.affixKeySetting( - EMAIL_DESTINATION_SETTING_PREFIX, - "password", - // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error - Function { key: String -> - SecureSetting.secureString( - key, - fallback(key, LegacyOpenDistroDestinationSettings.EMAIL_PASSWORD, "plugins", "opendistro") - ) - } - ) - - val HOST_DENY_LIST: Setting> = Setting.listSetting( - "plugins.destination.host.deny_list", - LegacyOpenDistroDestinationSettings.HOST_DENY_LIST, - Function.identity(), - Setting.Property.NodeScope, - Setting.Property.Final - ) - - fun loadDestinationSettings(settings: Settings): Map { - // Only loading Email Destination settings for now since those are the only secure settings needed. - // If this logic needs to be expanded to support other Destinations, different groups can be retrieved similar - // to emailAccountNames based on the setting namespace and SecureDestinationSettings should be expanded to support - // these new settings. - val emailAccountNames: Set = settings.getGroups(EMAIL_DESTINATION_SETTING_PREFIX).keys - val emailAccounts: MutableMap = mutableMapOf() - for (emailAccountName in emailAccountNames) { - // Only adding the settings if they exist - getSecureDestinationSettings(settings, emailAccountName)?.let { - emailAccounts[emailAccountName] = it - } - } - - return emailAccounts - } - - private fun getSecureDestinationSettings(settings: Settings, emailAccountName: String): SecureDestinationSettings? { - // Using 'use' to emulate Java's try-with-resources on multiple closeable resources. - // Values are cloned so that we maintain a SecureString, the original SecureStrings will be closed after - // they have left the scope of this function. - return getEmailSettingValue(settings, emailAccountName, EMAIL_USERNAME)?.use { emailUsername -> - getEmailSettingValue(settings, emailAccountName, EMAIL_PASSWORD)?.use { emailPassword -> - SecureDestinationSettings(emailUsername = emailUsername.clone(), emailPassword = emailPassword.clone()) - } - } - } - - private fun getEmailSettingValue(settings: Settings, emailAccountName: String, emailSetting: Setting.AffixSetting): T? { - val concreteSetting = emailSetting.getConcreteSettingForNamespace(emailAccountName) - return concreteSetting.get(settings) - } - - private fun fallback(key: String, affixSetting: AffixSetting, regex: String, replacement: String): Setting? { - return if ("_na_" == key) { - affixSetting.getConcreteSettingForNamespace(key) - } else { - affixSetting.getConcreteSetting(key.replace(regex.toRegex(), replacement)) - } - } - - data class SecureDestinationSettings(val emailUsername: SecureString, val emailPassword: SecureString) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt deleted file mode 100644 index e3603aab2..000000000 --- a/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroAlertingSettings.kt +++ /dev/null @@ -1,157 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.settings - -import org.opensearch.common.settings.Setting -import org.opensearch.common.unit.TimeValue -import java.util.concurrent.TimeUnit - -/** - * Legacy Opendistro settings specific to [AlertingPlugin]. These settings include things like history index max age, request timeout, etc... - */ - -class LegacyOpenDistroAlertingSettings { - - companion object { - - val ALERTING_MAX_MONITORS = Setting.intSetting( - "opendistro.alerting.monitor.max_monitors", - 1000, - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val INPUT_TIMEOUT = Setting.positiveTimeSetting( - "opendistro.alerting.input_timeout", - TimeValue.timeValueSeconds(30), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val INDEX_TIMEOUT = Setting.positiveTimeSetting( - "opendistro.alerting.index_timeout", - TimeValue.timeValueSeconds(60), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val BULK_TIMEOUT = Setting.positiveTimeSetting( - "opendistro.alerting.bulk_timeout", - TimeValue.timeValueSeconds(120), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_BACKOFF_MILLIS = Setting.positiveTimeSetting( - "opendistro.alerting.alert_backoff_millis", - TimeValue.timeValueMillis(50), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_BACKOFF_COUNT = Setting.intSetting( - "opendistro.alerting.alert_backoff_count", - 2, - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val MOVE_ALERTS_BACKOFF_MILLIS = Setting.positiveTimeSetting( - "opendistro.alerting.move_alerts_backoff_millis", - TimeValue.timeValueMillis(250), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val MOVE_ALERTS_BACKOFF_COUNT = Setting.intSetting( - "opendistro.alerting.move_alerts_backoff_count", - 3, - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_HISTORY_ENABLED = Setting.boolSetting( - "opendistro.alerting.alert_history_enabled", - true, - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_HISTORY_ROLLOVER_PERIOD = Setting.positiveTimeSetting( - "opendistro.alerting.alert_history_rollover_period", - TimeValue.timeValueHours(12), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_HISTORY_INDEX_MAX_AGE = Setting.positiveTimeSetting( - "opendistro.alerting.alert_history_max_age", - TimeValue(30, TimeUnit.DAYS), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_HISTORY_MAX_DOCS = Setting.longSetting( - "opendistro.alerting.alert_history_max_docs", - 1000L, - 0L, - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_HISTORY_RETENTION_PERIOD = Setting.positiveTimeSetting( - "opendistro.alerting.alert_history_retention_period", - TimeValue(60, TimeUnit.DAYS), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val ALERT_FINDING_RETENTION_PERIOD = Setting.positiveTimeSetting( - "opendistro.alerting.alert_finding_retention_period", - TimeValue(60, TimeUnit.DAYS), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val REQUEST_TIMEOUT = Setting.positiveTimeSetting( - "opendistro.alerting.request_timeout", - TimeValue.timeValueSeconds(10), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val MAX_ACTION_THROTTLE_VALUE = Setting.positiveTimeSetting( - "opendistro.alerting.action_throttle_max_value", - TimeValue.timeValueHours(24), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val FILTER_BY_BACKEND_ROLES = Setting.boolSetting( - "opendistro.alerting.filter_by_backend_roles", - false, - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt deleted file mode 100644 index 73bae6463..000000000 --- a/alerting/bin/main/org/opensearch/alerting/settings/LegacyOpenDistroDestinationSettings.kt +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.settings - -import org.opensearch.alerting.util.DestinationType -import org.opensearch.common.settings.SecureSetting -import org.opensearch.common.settings.Setting -import org.opensearch.common.settings.Settings -import org.opensearch.core.common.settings.SecureString -import java.util.function.Function - -/** - * Settings specific to Destinations. This class is separated from the general AlertingSettings since some Destination - * types require SecureSettings and need additional logic for retrieving and loading them. - */ -class LegacyOpenDistroDestinationSettings { - - companion object { - - const val DESTINATION_SETTING_PREFIX = "opendistro.alerting.destination." - const val EMAIL_DESTINATION_SETTING_PREFIX = DESTINATION_SETTING_PREFIX + "email." - val ALLOW_LIST_ALL = DestinationType.values().toList().map { it.value } - val HOST_DENY_LIST_NONE = emptyList() - - val ALLOW_LIST: Setting> = Setting.listSetting( - DESTINATION_SETTING_PREFIX + "allow_list", - ALLOW_LIST_ALL, - Function.identity(), - Setting.Property.NodeScope, - Setting.Property.Dynamic, - Setting.Property.Deprecated - ) - - val EMAIL_USERNAME: Setting.AffixSetting = Setting.affixKeySetting( - EMAIL_DESTINATION_SETTING_PREFIX, - "username", - // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error - Function { key: String -> SecureSetting.secureString(key, null) } - ) - - val EMAIL_PASSWORD: Setting.AffixSetting = Setting.affixKeySetting( - EMAIL_DESTINATION_SETTING_PREFIX, - "password", - // Needed to coerce lambda to Function type for some reason to avoid argument mismatch compile error - Function { key: String -> SecureSetting.secureString(key, null) } - ) - - val HOST_DENY_LIST: Setting> = Setting.listSetting( - "opendistro.destination.host.deny_list", - HOST_DENY_LIST_NONE, - Function.identity(), - Setting.Property.NodeScope, - Setting.Property.Final, - Setting.Property.Deprecated - ) - - fun loadLegacyDestinationSettings(settings: Settings): Map { - // Only loading Email Destination settings for now since those are the only secure settings needed. - // If this logic needs to be expanded to support other Destinations, different groups can be retrieved similar - // to emailAccountNames based on the setting namespace and SecureDestinationSettings should be expanded to support - // these new settings. - val emailAccountNames: Set = settings.getGroups(EMAIL_DESTINATION_SETTING_PREFIX).keys - val emailAccounts: MutableMap = mutableMapOf() - for (emailAccountName in emailAccountNames) { - // Only adding the settings if they exist - getLegacySecureDestinationSettings(settings, emailAccountName)?.let { - emailAccounts[emailAccountName] = it - } - } - - return emailAccounts - } - - private fun getLegacySecureDestinationSettings(settings: Settings, emailAccountName: String): SecureDestinationSettings? { - // Using 'use' to emulate Java's try-with-resources on multiple closeable resources. - // Values are cloned so that we maintain a SecureString, the original SecureStrings will be closed after - // they have left the scope of this function. - return getLegacyEmailSettingValue(settings, emailAccountName, EMAIL_USERNAME)?.use { emailUsername -> - getLegacyEmailSettingValue(settings, emailAccountName, EMAIL_PASSWORD)?.use { emailPassword -> - SecureDestinationSettings(emailUsername = emailUsername.clone(), emailPassword = emailPassword.clone()) - } - } - } - - private fun getLegacyEmailSettingValue( - settings: Settings, - emailAccountName: String, - emailSetting: Setting.AffixSetting - ): T? { - val concreteSetting = emailSetting.getConcreteSettingForNamespace(emailAccountName) - return concreteSetting.get(settings) - } - - data class SecureDestinationSettings(val emailUsername: SecureString, val emailPassword: SecureString) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt b/alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt deleted file mode 100644 index a3e2a98c1..000000000 --- a/alerting/bin/main/org/opensearch/alerting/settings/SupportedClusterMetricsSettings.kt +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.settings - -import org.opensearch.action.ActionRequest -import org.opensearch.action.admin.cluster.health.ClusterHealthRequest -import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest -import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest -import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest -import org.opensearch.action.admin.cluster.state.ClusterStateRequest -import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest -import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest -import org.opensearch.action.admin.indices.recovery.RecoveryRequest -import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatIndicesRequestWrapper -import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatShardsRequestWrapper -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.json.JsonXContent -import org.opensearch.commons.alerting.model.ClusterMetricsInput -import org.opensearch.commons.alerting.model.ClusterMetricsInput.ClusterMetricType -import org.opensearch.commons.alerting.util.IndexUtils.Companion.supportedClusterMetricsSettings - -/** - * A class that supports storing a unique set of API paths that can be accessed by general users. - */ -class SupportedClusterMetricsSettings : org.opensearch.commons.alerting.settings.SupportedClusterMetricsSettings { - companion object { - const val RESOURCE_FILE = "supported_json_payloads.json" - - /** - * The key in this map represents the path to call an API. - * - * NOTE: Paths should conform to the following pattern: - * "/_cluster/stats" - * - * The value in these maps represents a path root mapped to a list of paths to field values. - * If the value mapped to an API is an empty map, no fields will be redacted from the API response. - * - * NOTE: Keys in this map should consist of root components of the response body; e.g.,: - * "indices" - * - * Values in these maps should consist of the remaining fields in the path - * to the supported value separated by periods; e.g.,: - * "shards.total", - * "shards.index.shards.min" - * - * In this example for ClusterStats, the response will only include - * the values at the end of these two paths: - * "/_cluster/stats": { - * "indices": [ - * "shards.total", - * "shards.index.shards.min" - * ] - * } - */ - private var supportedApiList = HashMap>>() - - init { - val supportedJsonPayloads = SupportedClusterMetricsSettings::class.java.getResource(RESOURCE_FILE) - - @Suppress("UNCHECKED_CAST") - if (supportedJsonPayloads != null) { - supportedApiList = XContentHelper.convertToMap(JsonXContent.jsonXContent, supportedJsonPayloads.readText(), false) - as HashMap>> - } - } - - /** - * Returns the map of all supported json payload associated with the provided path from supportedApiList. - * @param path The path for the requested API. - * @return The map of the supported json payload for the requested API. - * @throws IllegalArgumentException When supportedApiList does not contain a value for the provided key. - */ - fun getSupportedJsonPayload(path: String): Map> { - return supportedApiList[path] ?: throw IllegalArgumentException("API path not in supportedApiList.") - } - - /** - * Will return an [ActionRequest] for the API associated with that path. - * Will otherwise throw an exception. - * @param clusterMetricsInput The [ClusterMetricsInput] to resolve. - * @throws IllegalArgumentException when the requested API is not supported. - * @return The [ActionRequest] for the API associated with the provided [ClusterMetricsInput]. - */ - fun resolveToActionRequest(clusterMetricsInput: ClusterMetricsInput): ActionRequest { - val pathParams = clusterMetricsInput.parsePathParams() - return when (clusterMetricsInput.clusterMetricType) { - ClusterMetricType.CAT_INDICES -> CatIndicesRequestWrapper(pathParams) - ClusterMetricType.CAT_PENDING_TASKS -> PendingClusterTasksRequest() - ClusterMetricType.CAT_RECOVERY -> { - if (pathParams.isEmpty()) return RecoveryRequest() - val pathParamsArray = pathParams.split(",").toTypedArray() - return RecoveryRequest(*pathParamsArray) - } - ClusterMetricType.CAT_SHARDS -> CatShardsRequestWrapper(pathParams) - ClusterMetricType.CAT_SNAPSHOTS -> { - return GetSnapshotsRequest(pathParams, arrayOf(GetSnapshotsRequest.ALL_SNAPSHOTS)) - } - ClusterMetricType.CAT_TASKS -> ListTasksRequest() - ClusterMetricType.CLUSTER_HEALTH -> { - if (pathParams.isEmpty()) return ClusterHealthRequest() - val pathParamsArray = pathParams.split(",").toTypedArray() - return ClusterHealthRequest(*pathParamsArray) - } - ClusterMetricType.CLUSTER_SETTINGS -> ClusterStateRequest().routingTable(false).nodes(false) - ClusterMetricType.CLUSTER_STATS -> { - if (pathParams.isEmpty()) return ClusterStatsRequest() - val pathParamsArray = pathParams.split(",").toTypedArray() - return ClusterStatsRequest(*pathParamsArray) - } - ClusterMetricType.NODES_STATS -> NodesStatsRequest().addMetrics( - "os", - "process", - "jvm", - "thread_pool", - "fs", - "transport", - "http", - "breaker", - "script", - "discovery", - "ingest", - "adaptive_selection", - "script_cache", - "indexing_pressure", - "shard_indexing_pressure" - ) - else -> throw IllegalArgumentException("Unsupported API.") - } - } - - /** - * Confirms whether the provided path is in [supportedApiList]. - * Throws an exception if the provided path is not on the list; otherwise performs no action. - * @param clusterMetricsInput The [ClusterMetricsInput] to validate. - * @throws IllegalArgumentException when supportedApiList does not contain the provided path. - */ - fun validateApiTyped(clusterMetricsInput: ClusterMetricsInput) { - if (!supportedApiList.keys.contains(clusterMetricsInput.clusterMetricType.defaultPath)) { - throw IllegalArgumentException("API path not in supportedApiList.") - } - } - } - - constructor() { - supportedClusterMetricsSettings = this - } - - override fun validateApiType(clusterMetricsInput: ClusterMetricsInput) { - validateApiTyped(clusterMetricsInput) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json b/alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json deleted file mode 100644 index a153a67b2..000000000 --- a/alerting/bin/main/org/opensearch/alerting/settings/supported_json_payloads.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "/_cat/indices": {}, - "/_cat/pending_tasks": {}, - "/_cat/recovery": {}, - "/_cat/shards": {}, - "/_cat/snapshots": {}, - "/_cat/tasks": {}, - "/_cluster/health": {}, - "/_cluster/settings": {}, - "/_cluster/stats": {}, - "/_nodes/stats": {} -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt deleted file mode 100644 index 402780212..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/SecureTransportAction.kt +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.commons.ConfigConstants -import org.opensearch.commons.authuser.User -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus - -private val log = LogManager.getLogger(SecureTransportAction::class.java) - -/** - * TransportActon classes extend this interface to add filter-by-backend-roles functionality. - * - * 1. If filterBy is enabled - * a) Don't allow to create monitor/ destination (throw error) if the logged-on user has no backend roles configured. - * - * 2. If filterBy is enabled & monitors are created when filterBy is disabled: - * a) If backend_roles are saved with config, results will get filtered and data is shown - * b) If backend_roles are not saved with monitor config, results will get filtered and no monitors - * will be displayed. - * c) Users can edit and save the monitors to associate their backend_roles. - * - * 3. If filterBy is enabled & monitors are created by older version: - * a) No User details are present on monitor. - * b) No monitors will be displayed. - * c) Users can edit and save the monitors to associate their backend_roles. - */ -interface SecureTransportAction { - - var filterByEnabled: Boolean - - fun listenFilterBySettingChange(clusterService: ClusterService) { - clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.FILTER_BY_BACKEND_ROLES) { filterByEnabled = it } - } - - fun readUserFromThreadContext(client: Client): User? { - val userStr = client.threadPool().threadContext.getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) - log.debug("User and roles string from thread context: $userStr") - return User.parse(userStr) - } - - fun doFilterForUser(user: User?): Boolean { - log.debug("Is filterByEnabled: $filterByEnabled ; Is admin user: ${isAdmin(user)}") - return if (isAdmin(user)) { - false - } else { - filterByEnabled - } - } - - /** - * 'all_access' role users are treated as admins. - */ - fun isAdmin(user: User?): Boolean { - return when { - user == null -> { - false - } - user.roles?.isNullOrEmpty() == true -> { - false - } - else -> { - user.roles?.contains("all_access") == true - } - } - } - - fun validateUserBackendRoles(user: User?, actionListener: ActionListener): Boolean { - if (filterByEnabled) { - if (user == null) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Filter by user backend roles is enabled with security disabled.", - RestStatus.FORBIDDEN - ) - ) - ) - return false - } else if (isAdmin(user)) { - return true - } else if (user.backendRoles.isNullOrEmpty()) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("User doesn't have backend roles configured. Contact administrator", RestStatus.FORBIDDEN) - ) - ) - return false - } - } - return true - } - - /** - * If FilterBy is enabled, this function verifies that the requester user has FilterBy permissions to access - * the resource. If FilterBy is disabled, we will assume the user has permissions and return true. - * - * This check will later to moved to the security plugin. - */ - fun checkUserPermissionsWithResource( - requesterUser: User?, - resourceUser: User?, - actionListener: ActionListener, - resourceType: String, - resourceId: String - ): Boolean { - if (!doFilterForUser(requesterUser)) return true - - val resourceBackendRoles = resourceUser?.backendRoles - val requesterBackendRoles = requesterUser?.backendRoles - - if ( - resourceBackendRoles == null || - requesterBackendRoles == null || - resourceBackendRoles.intersect(requesterBackendRoles).isEmpty() - ) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Do not have permissions to resource, $resourceType, with id, $resourceId", - RestStatus.FORBIDDEN - ) - ) - ) - return false - } - return true - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt deleted file mode 100644 index a94a682d3..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeAlertAction.kt +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.ResourceNotFoundException -import org.opensearch.action.ActionRequest -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.action.update.UpdateRequest -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest -import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetMonitorRequest -import org.opensearch.commons.alerting.action.GetMonitorResponse -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.util.optionalTimeField -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestRequest -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.time.Instant -import java.util.Locale - -private val log = LogManager.getLogger(TransportAcknowledgeAlertAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportAcknowledgeAlertAction @Inject constructor( - transportService: TransportService, - val client: Client, - clusterService: ClusterService, - actionFilters: ActionFilters, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry, - val transportGetMonitorAction: TransportGetMonitorAction -) : HandledTransportAction( - AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_NAME, - transportService, - actionFilters, - ::AcknowledgeAlertRequest -) { - - @Volatile - private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } - } - - override fun doExecute( - task: Task, - acknowledgeAlertRequest: ActionRequest, - actionListener: ActionListener - ) { - val request = acknowledgeAlertRequest as? AcknowledgeAlertRequest - ?: recreateObject(acknowledgeAlertRequest) { AcknowledgeAlertRequest(it) } - client.threadPool().threadContext.stashContext().use { - scope.launch { - val getMonitorResponse: GetMonitorResponse = - transportGetMonitorAction.client.suspendUntil { - val getMonitorRequest = GetMonitorRequest( - monitorId = request.monitorId, - -3L, - RestRequest.Method.GET, - FetchSourceContext.FETCH_SOURCE - ) - execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, it) - } - if (getMonitorResponse.monitor == null) { - actionListener.onFailure( - AlertingException.wrap( - ResourceNotFoundException( - String.format( - Locale.ROOT, - "No monitor found with id [%s]", - request.monitorId - ) - ) - ) - ) - } else { - AcknowledgeHandler(client, actionListener, request).start(getMonitorResponse.monitor!!) - } - } - } - } - - inner class AcknowledgeHandler( - private val client: Client, - private val actionListener: ActionListener, - private val request: AcknowledgeAlertRequest - ) { - val alerts = mutableMapOf() - - suspend fun start(monitor: Monitor) = findActiveAlerts(monitor) - - private suspend fun findActiveAlerts(monitor: Monitor) { - val queryBuilder = QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery(Alert.MONITOR_ID_FIELD, request.monitorId)) - .filter(QueryBuilders.termsQuery("_id", request.alertIds)) - val searchRequest = SearchRequest() - .indices(monitor.dataSources.alertsIndex) - .routing(request.monitorId) - .source( - SearchSourceBuilder() - .query(queryBuilder) - .version(true) - .seqNoAndPrimaryTerm(true) - .size(request.alertIds.size) - ) - try { - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - onSearchResponse(searchResponse, monitor) - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun onSearchResponse(response: SearchResponse, monitor: Monitor) { - val alertsHistoryIndex = monitor.dataSources.alertsHistoryIndex - val updateRequests = mutableListOf() - val copyRequests = mutableListOf() - response.hits.forEach { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alerts[alert.id] = alert - - if (alert.state == Alert.State.ACTIVE) { - if ( - alert.findingIds.isEmpty() || - !isAlertHistoryEnabled - ) { - val updateRequest = UpdateRequest(monitor.dataSources.alertsIndex, alert.id) - .routing(request.monitorId) - .setIfSeqNo(hit.seqNo) - .setIfPrimaryTerm(hit.primaryTerm) - .doc( - XContentFactory.jsonBuilder().startObject() - .field(Alert.STATE_FIELD, Alert.State.ACKNOWLEDGED.toString()) - .optionalTimeField(Alert.ACKNOWLEDGED_TIME_FIELD, Instant.now()) - .endObject() - ) - updateRequests.add(updateRequest) - } else { - val copyRequest = IndexRequest(alertsHistoryIndex) - .routing(request.monitorId) - .id(alert.id) - .source( - alert.copy(state = Alert.State.ACKNOWLEDGED, acknowledgedTime = Instant.now()) - .toXContentWithUser(XContentFactory.jsonBuilder()) - ) - copyRequests.add(copyRequest) - } - } - } - - try { - val updateResponse: BulkResponse? = if (updateRequests.isNotEmpty()) { - client.suspendUntil { - client.bulk(BulkRequest().add(updateRequests).setRefreshPolicy(request.refreshPolicy), it) - } - } else null - val copyResponse: BulkResponse? = if (copyRequests.isNotEmpty()) { - client.suspendUntil { - client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(request.refreshPolicy), it) - } - } else null - onBulkResponse(updateResponse, copyResponse, monitor) - } catch (t: Exception) { - log.error("ack error: ${t.message}") - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?, monitor: Monitor) { - val deleteRequests = mutableListOf() - val missing = request.alertIds.toMutableSet() - val acknowledged = mutableListOf() - val failed = mutableListOf() - - alerts.values.forEach { - if (it.state != Alert.State.ACTIVE) { - missing.remove(it.id) - failed.add(it) - } - } - - updateResponse?.items?.forEach { item -> - missing.remove(item.id) - if (item.isFailed) { - failed.add(alerts[item.id]!!) - } else { - acknowledged.add(alerts[item.id]!!) - } - } - - copyResponse?.items?.forEach { item -> - log.info("got a copyResponse: $item") - missing.remove(item.id) - if (item.isFailed) { - log.info("got a failureResponse: ${item.failureMessage}") - failed.add(alerts[item.id]!!) - } else { - val deleteRequest = DeleteRequest(monitor.dataSources.alertsIndex, item.id) - .routing(request.monitorId) - deleteRequests.add(deleteRequest) - } - } - - if (deleteRequests.isNotEmpty()) { - try { - val deleteResponse: BulkResponse = client.suspendUntil { - client.bulk(BulkRequest().add(deleteRequests).setRefreshPolicy(request.refreshPolicy), it) - } - deleteResponse.items.forEach { item -> - missing.remove(item.id) - if (item.isFailed) { - failed.add(alerts[item.id]!!) - } else { - acknowledged.add(alerts[item.id]!!) - } - } - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - return - } - } - actionListener.onResponse(AcknowledgeAlertResponse(acknowledged.toList(), failed.toList(), missing.toList())) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt deleted file mode 100644 index 26da7f644..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportAcknowledgeChainedAlertAction.kt +++ /dev/null @@ -1,296 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.ResourceNotFoundException -import org.opensearch.action.ActionRequest -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.action.support.WriteRequest -import org.opensearch.action.update.UpdateRequest -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.ScheduledJobUtils -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse -import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.alerting.util.optionalTimeField -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.time.Instant -import java.util.Locale - -private val log = LogManager.getLogger(TransportAcknowledgeChainedAlertAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportAcknowledgeChainedAlertAction @Inject constructor( - transportService: TransportService, - val client: Client, - clusterService: ClusterService, - actionFilters: ActionFilters, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry, -) : HandledTransportAction( - AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_NAME, - transportService, - actionFilters, - ::AcknowledgeChainedAlertRequest -) { - @Volatile - private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } - } - - override fun doExecute( - task: Task, - AcknowledgeChainedAlertRequest: ActionRequest, - actionListener: ActionListener, - ) { - val request = AcknowledgeChainedAlertRequest as? AcknowledgeChainedAlertRequest - ?: recreateObject(AcknowledgeChainedAlertRequest) { AcknowledgeChainedAlertRequest(it) } - client.threadPool().threadContext.stashContext().use { - scope.launch { - try { - val getResponse = getWorkflow(request.workflowId) - if (getResponse.isExists == false) { - actionListener.onFailure( - AlertingException.wrap( - ResourceNotFoundException( - String.format( - Locale.ROOT, - "No workflow found with id [%s]", - request.workflowId - ) - ) - ) - ) - } else { - val workflow = ScheduledJobUtils.parseWorkflowFromScheduledJobDocSource(xContentRegistry, getResponse) - AcknowledgeHandler(client, actionListener, request).start(workflow = workflow) - } - } catch (e: Exception) { - log.error("Failed to acknowledge chained alerts from request $request", e) - actionListener.onFailure(AlertingException.wrap(e)) - } - } - } - } - - private suspend fun getWorkflow(workflowId: String): GetResponse { - return client.suspendUntil { client.get(GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, workflowId), it) } - } - - inner class AcknowledgeHandler( - private val client: Client, - private val actionListener: ActionListener, - private val request: AcknowledgeChainedAlertRequest, - ) { - val alerts = mutableMapOf() - - suspend fun start(workflow: Workflow) = findActiveAlerts(workflow) - - private suspend fun findActiveAlerts(workflow: Workflow) { - try { - val queryBuilder = QueryBuilders.boolQuery() - .must( - QueryBuilders.wildcardQuery("workflow_id", request.workflowId) - ) - .must(QueryBuilders.termsQuery("_id", request.alertIds)) - if (workflow.inputs.isEmpty() || (workflow.inputs[0] is CompositeInput) == false) { - actionListener.onFailure( - OpenSearchStatusException("Workflow ${workflow.id} is invalid", RestStatus.INTERNAL_SERVER_ERROR) - ) - return - } - val compositeInput = workflow.inputs[0] as CompositeInput - val workflowId = compositeInput.sequence.delegates[0].monitorId - val dataSources: DataSources = getDataSources(workflowId) - val searchRequest = SearchRequest() - .indices(dataSources.alertsIndex) - .routing(request.workflowId) - .source( - SearchSourceBuilder() - .query(queryBuilder) - .version(true) - .seqNoAndPrimaryTerm(true) - .size(request.alertIds.size) - ) - - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - onSearchResponse(searchResponse, workflow, dataSources) - } catch (t: Exception) { - log.error("Failed to acknowledge chained alert ${request.alertIds} for workflow ${request.workflowId}", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun getDataSources(monitorId: String): DataSources { - val getResponse: GetResponse = client.suspendUntil { client.get(GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId), it) } - return ScheduledJobUtils.parseMonitorFromScheduledJobDocSource(xContentRegistry, getResponse).dataSources - } - - private suspend fun onSearchResponse(response: SearchResponse, workflow: Workflow, dataSources: DataSources) { - val alertsHistoryIndex = dataSources.alertsHistoryIndex - val updateRequests = mutableListOf() - val copyRequests = mutableListOf() - response.hits.forEach { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alerts[alert.id] = alert - - if (alert.state == Alert.State.ACTIVE) { - if ( - alert.findingIds.isEmpty() || - !isAlertHistoryEnabled - ) { - val updateRequest = UpdateRequest(dataSources.alertsIndex, alert.id) - .routing(request.workflowId) - .setIfSeqNo(hit.seqNo) - .setIfPrimaryTerm(hit.primaryTerm) - .doc( - XContentFactory.jsonBuilder().startObject() - .field(Alert.STATE_FIELD, Alert.State.ACKNOWLEDGED.toString()) - .optionalTimeField(Alert.ACKNOWLEDGED_TIME_FIELD, Instant.now()) - .endObject() - ) - updateRequests.add(updateRequest) - } else { - val copyRequest = IndexRequest(alertsHistoryIndex) - .routing(request.workflowId) - .id(alert.id) - .source( - alert.copy(state = Alert.State.ACKNOWLEDGED, acknowledgedTime = Instant.now()) - .toXContentWithUser(XContentFactory.jsonBuilder()) - ) - copyRequests.add(copyRequest) - } - } - } - - try { - val updateResponse: BulkResponse? = if (updateRequests.isNotEmpty()) { - client.suspendUntil { - client.bulk(BulkRequest().add(updateRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) - } - } else null - val copyResponse: BulkResponse? = if (copyRequests.isNotEmpty()) { - client.suspendUntil { - client.bulk(BulkRequest().add(copyRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) - } - } else null - onBulkResponse(updateResponse, copyResponse, dataSources) - } catch (t: Exception) { - log.error("Failed to acknowledge chained alert ${request.alertIds} for workflow ${request.workflowId}", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun onBulkResponse(updateResponse: BulkResponse?, copyResponse: BulkResponse?, dataSources: DataSources) { - val deleteRequests = mutableListOf() - val acknowledged = mutableListOf() - val missing = request.alertIds.toMutableSet() - val failed = mutableListOf() - - alerts.values.forEach { - if (it.state != Alert.State.ACTIVE) { - missing.remove(it.id) - failed.add(it) - } - } - - updateResponse?.items?.forEach { item -> - missing.remove(item.id) - if (item.isFailed) { - failed.add(alerts[item.id]!!) - } else { - acknowledged.add(alerts[item.id]!!) - } - } - - copyResponse?.items?.forEach { item -> - log.info("got a copyResponse: $item") - missing.remove(item.id) - if (item.isFailed) { - log.info("got a failureResponse: ${item.failureMessage}") - failed.add(alerts[item.id]!!) - } else { - val deleteRequest = DeleteRequest(dataSources.alertsIndex, item.id) - .routing(request.workflowId) - deleteRequests.add(deleteRequest) - } - } - - if (deleteRequests.isNotEmpty()) { - try { - val deleteResponse: BulkResponse = client.suspendUntil { - client.bulk(BulkRequest().add(deleteRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), it) - } - deleteResponse.items.forEach { item -> - missing.remove(item.id) - if (item.isFailed) { - failed.add(alerts[item.id]!!) - } else { - acknowledged.add(alerts[item.id]!!) - } - } - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - return - } - } - actionListener.onResponse( - AcknowledgeAlertResponse( - acknowledged.toList(), - failed.toList(), - missing.toList() - ) - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt deleted file mode 100644 index 321c2e162..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteMonitorAction.kt +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionRequest -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.action.support.WriteRequest.RefreshPolicy -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.service.DeleteMonitorService -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.DeleteMonitorRequest -import org.opensearch.commons.alerting.action.DeleteMonitorResponse -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.authuser.User -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) -private val log = LogManager.getLogger(TransportDeleteMonitorAction::class.java) - -class TransportDeleteMonitorAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val clusterService: ClusterService, - settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - AlertingActions.DELETE_MONITOR_ACTION_NAME, - transportService, - actionFilters, - ::DeleteMonitorRequest -), - SecureTransportAction { - - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { - val transformedRequest = request as? DeleteMonitorRequest - ?: recreateObject(request) { DeleteMonitorRequest(it) } - val user = readUserFromThreadContext(client) - - if (!validateUserBackendRoles(user, actionListener)) { - return - } - scope.launch { - DeleteMonitorHandler( - client, - actionListener, - user, - transformedRequest.monitorId - ).resolveUserAndStart(transformedRequest.refreshPolicy) - } - } - - inner class DeleteMonitorHandler( - private val client: Client, - private val actionListener: ActionListener, - private val user: User?, - private val monitorId: String - ) { - suspend fun resolveUserAndStart(refreshPolicy: RefreshPolicy) { - try { - val monitor = getMonitor() - - val canDelete = user == null || !doFilterForUser(user) || - checkUserPermissionsWithResource(user, monitor.user, actionListener, "monitor", monitorId) - - if (DeleteMonitorService.monitorIsWorkflowDelegate(monitor.id)) { - actionListener.onFailure( - AlertingException( - "Monitor can't be deleted because it is a part of workflow(s)", - RestStatus.FORBIDDEN, - IllegalStateException() - ) - ) - } else if (canDelete) { - actionListener.onResponse( - DeleteMonitorService.deleteMonitor(monitor, refreshPolicy) - ) - } else { - actionListener.onFailure( - AlertingException("Not allowed to delete this monitor!", RestStatus.FORBIDDEN, IllegalStateException()) - ) - } - } catch (t: Exception) { - log.error("Failed to delete monitor $monitorId", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun getMonitor(): Monitor { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, monitorId) - - val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } - if (getResponse.isExists == false) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Monitor with $monitorId is not found", RestStatus.NOT_FOUND) - ) - ) - } - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - getResponse.sourceAsBytesRef, - XContentType.JSON - ) - return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt deleted file mode 100644 index 9b076a600..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportDeleteWorkflowAction.kt +++ /dev/null @@ -1,327 +0,0 @@ - -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.apache.lucene.search.join.ScoreMode -import org.opensearch.OpenSearchException -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionRequest -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.delete.DeleteResponse -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.action.support.WriteRequest.RefreshPolicy -import org.opensearch.alerting.model.MonitorMetadata -import org.opensearch.alerting.model.WorkflowMetadata -import org.opensearch.alerting.opensearchapi.addFilter -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.service.DeleteMonitorService -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH -import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.DeleteWorkflowRequest -import org.opensearch.commons.alerting.action.DeleteWorkflowResponse -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.authuser.User -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.reindex.BulkByScrollResponse -import org.opensearch.index.reindex.DeleteByQueryAction -import org.opensearch.index.reindex.DeleteByQueryRequestBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) -/** - * Transport class that deletes the workflow. - * If the deleteDelegateMonitor flag is set to true, deletes the workflow delegates that are not part of another workflow - */ -class TransportDeleteWorkflowAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val clusterService: ClusterService, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry, -) : HandledTransportAction( - AlertingActions.DELETE_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::DeleteWorkflowRequest -), - SecureTransportAction { - private val log = LogManager.getLogger(javaClass) - - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { - val transformedRequest = request as? DeleteWorkflowRequest - ?: recreateObject(request) { DeleteWorkflowRequest(it) } - - val user = readUserFromThreadContext(client) - val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, transformedRequest.workflowId) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE) - - if (!validateUserBackendRoles(user, actionListener)) { - return - } - - scope.launch { - DeleteWorkflowHandler( - client, - actionListener, - deleteRequest, - transformedRequest.deleteDelegateMonitors, - user, - transformedRequest.workflowId - ).resolveUserAndStart() - } - } - - inner class DeleteWorkflowHandler( - private val client: Client, - private val actionListener: ActionListener, - private val deleteRequest: DeleteRequest, - private val deleteDelegateMonitors: Boolean?, - private val user: User?, - private val workflowId: String, - ) { - suspend fun resolveUserAndStart() { - try { - val workflow = getWorkflow() - - val canDelete = user == null || - !doFilterForUser(user) || - checkUserPermissionsWithResource( - user, - workflow.user, - actionListener, - "workflow", - workflowId - ) - - if (canDelete) { - val delegateMonitorIds = (workflow.inputs[0] as CompositeInput).getMonitorIds() - var deletableMonitors = listOf() - // User can only delete the delegate monitors only in the case if all monitors can be deleted - // if there are monitors in this workflow that are referenced in other workflows, we cannot delete the monitors. - // We will not partially delete monitors. we delete them all or fail the request. - if (deleteDelegateMonitors == true) { - deletableMonitors = getDeletableDelegates(workflowId, delegateMonitorIds, user) - val monitorsDiff = delegateMonitorIds.toMutableList() - monitorsDiff.removeAll(deletableMonitors.map { it.id }) - - if (monitorsDiff.isNotEmpty()) { - actionListener.onFailure( - AlertingException( - "Not allowed to delete ${monitorsDiff.joinToString()} monitors", - RestStatus.FORBIDDEN, - IllegalStateException() - ) - ) - return - } - } - - val deleteResponse = deleteWorkflow(deleteRequest) - var deleteWorkflowResponse = DeleteWorkflowResponse(deleteResponse.id, deleteResponse.version) - - val workflowMetadataId = WorkflowMetadata.getId(workflow.id) - - val metadataIdsToDelete = mutableListOf(workflowMetadataId) - - if (deleteDelegateMonitors == true) { - val failedMonitorIds = tryDeletingMonitors(deletableMonitors, RefreshPolicy.IMMEDIATE) - // Update delete workflow response - deleteWorkflowResponse.nonDeletedMonitors = failedMonitorIds - // Delete monitors workflow metadata - // Monitor metadata will be in workflowId-monitorId-metadata format - metadataIdsToDelete.addAll(deletableMonitors.map { MonitorMetadata.getId(it, workflowMetadataId) }) - } - try { - // Delete the monitors workflow metadata - val deleteMonitorWorkflowMetadataResponse: BulkByScrollResponse = client.suspendUntil { - DeleteByQueryRequestBuilder(this, DeleteByQueryAction.INSTANCE) - .source(ScheduledJob.SCHEDULED_JOBS_INDEX) - .filter(QueryBuilders.idsQuery().addIds(*metadataIdsToDelete.toTypedArray())) - .execute(it) - } - } catch (t: Exception) { - log.error("Failed to delete delegate monitor metadata. But proceeding with workflow deletion $workflowId", t) - } - actionListener.onResponse(deleteWorkflowResponse) - } else { - actionListener.onFailure( - AlertingException( - "Not allowed to delete this workflow!", - RestStatus.FORBIDDEN, - IllegalStateException() - ) - ) - } - } catch (t: Exception) { - if (t is IndexNotFoundException) { - actionListener.onFailure( - OpenSearchStatusException( - "Workflow not found.", - RestStatus.NOT_FOUND - ) - ) - } else { - log.error("Failed to delete workflow $workflowId", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - } - - /** - * Tries to delete the given list of the monitors. Return value contains all the monitorIds for which deletion failed - * @param monitorIds list of monitor ids to be deleted - * @param refreshPolicy - * @return list of the monitors that were not deleted - */ - private suspend fun tryDeletingMonitors(monitors: List, refreshPolicy: RefreshPolicy): List { - val nonDeletedMonitorIds = mutableListOf() - for (monitor in monitors) { - try { - DeleteMonitorService.deleteMonitor(monitor, refreshPolicy) - } catch (ex: Exception) { - log.error("failed to delete delegate monitor ${monitor.id} for $workflowId") - nonDeletedMonitorIds.add(monitor.id) - } - } - return nonDeletedMonitorIds - } - - /** - * Returns lit of monitor ids belonging only to a given workflow. - * if filterBy is enabled, it filters and returns only those monitors which user has permission to delete. - * @param workflowIdToBeDeleted Id of the workflow that should be deleted - * @param monitorIds List of delegate monitor ids (underlying monitor ids) - */ - private suspend fun getDeletableDelegates(workflowIdToBeDeleted: String, monitorIds: List, user: User?): List { - // Retrieve monitors belonging to another workflows - val queryBuilder = QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery("_id", workflowIdToBeDeleted)).filter( - QueryBuilders.nestedQuery( - WORKFLOW_DELEGATE_PATH, - QueryBuilders.boolQuery().must( - QueryBuilders.termsQuery( - WORKFLOW_MONITOR_PATH, - monitorIds - ) - ), - ScoreMode.None - ) - ) - - val searchRequest = SearchRequest() - .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) - .source(SearchSourceBuilder().query(queryBuilder)) - - val searchResponse: SearchResponse = client.suspendUntil { search(searchRequest, it) } - - val workflows = searchResponse.hits.hits.map { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, XContentType.JSON - ).also { it.nextToken() } - lateinit var workflow: Workflow - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - xcp.nextToken() - when (xcp.currentName()) { - "workflow" -> workflow = Workflow.parse(xcp) - } - } - workflow.copy(id = hit.id, version = hit.version) - } - val workflowMonitors = workflows.flatMap { (it.inputs[0] as CompositeInput).getMonitorIds() }.distinct() - // Monitors that can be deleted -> all workflow delegates - monitors belonging to different workflows - val deletableMonitorIds = monitorIds.minus(workflowMonitors.toSet()) - - // filtering further to get the list of monitors that user has permission to delete if filterby is enabled and user is not null - val query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", deletableMonitorIds)) - val searchSource = SearchSourceBuilder().query(query) - val monitorSearchRequest = SearchRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).source(searchSource) - - if (user != null && filterByEnabled) { - addFilter(user, monitorSearchRequest.source(), "monitor.user.backend_roles.keyword") - } - - val searchMonitorResponse: SearchResponse = client.suspendUntil { search(monitorSearchRequest, it) } - if (searchMonitorResponse.isTimedOut) { - throw OpenSearchException("Cannot determine that the ${ScheduledJob.SCHEDULED_JOBS_INDEX} index is healthy") - } - val deletableMonitors = mutableListOf() - for (hit in searchMonitorResponse.hits) { - XContentType.JSON.xContent().createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, hit.sourceAsString - ).use { hitsParser -> - val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor - deletableMonitors.add(monitor) - } - } - - return deletableMonitors - } - - private suspend fun getWorkflow(): Workflow { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, workflowId) - - val getResponse: GetResponse = client.suspendUntil { get(getRequest, it) } - if (getResponse.isExists == false) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Workflow not found.", RestStatus.NOT_FOUND) - ) - ) - } - val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - getResponse.sourceAsBytesRef, XContentType.JSON - ) - return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Workflow - } - - private suspend fun deleteWorkflow(deleteRequest: DeleteRequest): DeleteResponse { - log.debug("Deleting the workflow with id ${deleteRequest.id()}") - return client.suspendUntil { delete(deleteRequest, it) } - } - - private suspend fun deleteWorkflowMetadata(workflow: Workflow) { - val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, WorkflowMetadata.getId(workflow.id)) - val deleteResponse: DeleteResponse = client.suspendUntil { delete(deleteRequest, it) } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt deleted file mode 100644 index 9a814bb90..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteMonitorAction.kt +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import kotlinx.coroutines.withContext -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.MonitorMetadataService -import org.opensearch.alerting.MonitorRunnerService -import org.opensearch.alerting.action.ExecuteMonitorAction -import org.opensearch.alerting.action.ExecuteMonitorRequest -import org.opensearch.alerting.action.ExecuteMonitorResponse -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.DocLevelMonitorQueries -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.ConfigConstants -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.authuser.User -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.time.Instant - -private val log = LogManager.getLogger(TransportExecuteMonitorAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportExecuteMonitorAction @Inject constructor( - transportService: TransportService, - private val client: Client, - private val clusterService: ClusterService, - private val runner: MonitorRunnerService, - actionFilters: ActionFilters, - val xContentRegistry: NamedXContentRegistry, - private val docLevelMonitorQueries: DocLevelMonitorQueries, - private val settings: Settings -) : HandledTransportAction ( - ExecuteMonitorAction.NAME, - transportService, - actionFilters, - ::ExecuteMonitorRequest -) { - @Volatile private var indexTimeout = AlertingSettings.INDEX_TIMEOUT.get(settings) - - override fun doExecute(task: Task, execMonitorRequest: ExecuteMonitorRequest, actionListener: ActionListener) { - val userStr = client.threadPool().threadContext.getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) - log.debug("User and roles string from thread context: $userStr") - val user: User? = User.parse(userStr) - - client.threadPool().threadContext.stashContext().use { - val executeMonitor = fun(monitor: Monitor) { - // Launch the coroutine with the clients threadContext. This is needed to preserve authentication information - // stored on the threadContext set by the security plugin when using the Alerting plugin with the Security plugin. - // runner.launch(ElasticThreadContextElement(client.threadPool().threadContext)) { - runner.launch { - val (periodStart, periodEnd) = - monitor.schedule.getPeriodEndingAt(Instant.ofEpochMilli(execMonitorRequest.requestEnd.millis)) - try { - log.info( - "Executing monitor from API - id: ${monitor.id}, type: ${monitor.monitorType.name}, " + - "periodStart: $periodStart, periodEnd: $periodEnd, dryrun: ${execMonitorRequest.dryrun}" - ) - val monitorRunResult = runner.runJob(monitor, periodStart, periodEnd, execMonitorRequest.dryrun) - withContext(Dispatchers.IO) { - actionListener.onResponse(ExecuteMonitorResponse(monitorRunResult)) - } - } catch (e: Exception) { - log.error("Unexpected error running monitor", e) - withContext(Dispatchers.IO) { - actionListener.onFailure(AlertingException.wrap(e)) - } - } - } - } - - if (execMonitorRequest.monitorId != null) { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).id(execMonitorRequest.monitorId) - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Can't find monitor with id: ${response.id}", RestStatus.NOT_FOUND) - ) - ) - return - } - if (!response.isSourceEmpty) { - XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, - XContentType.JSON - ).use { xcp -> - val monitor = ScheduledJob.parse(xcp, response.id, response.version) as Monitor - executeMonitor(monitor) - } - } - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } else { - val monitor = when (user?.name.isNullOrEmpty()) { - true -> execMonitorRequest.monitor as Monitor - false -> (execMonitorRequest.monitor as Monitor).copy(user = user) - } - - if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - try { - scope.launch { - if (!docLevelMonitorQueries.docLevelQueryIndexExists(monitor.dataSources)) { - docLevelMonitorQueries.initDocLevelQueryIndex(monitor.dataSources) - log.info("Central Percolation index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX} created") - } - val (metadata, _) = MonitorMetadataService.getOrCreateMetadata(monitor, skipIndex = true) - docLevelMonitorQueries.indexDocLevelQueries( - monitor, - monitor.id, - metadata, - WriteRequest.RefreshPolicy.IMMEDIATE, - indexTimeout - ) - log.info("Queries inserted into Percolate index ${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}") - executeMonitor(monitor) - } - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } else { - executeMonitor(monitor) - } - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt deleted file mode 100644 index b29171f65..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportExecuteWorkflowAction.kt +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import kotlinx.coroutines.withContext -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.MonitorRunnerService -import org.opensearch.alerting.action.ExecuteWorkflowAction -import org.opensearch.alerting.action.ExecuteWorkflowRequest -import org.opensearch.alerting.action.ExecuteWorkflowResponse -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.common.inject.Inject -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.ConfigConstants -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.authuser.User -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.time.Instant - -private val log = LogManager.getLogger(TransportExecuteWorkflowAction::class.java) - -class TransportExecuteWorkflowAction @Inject constructor( - transportService: TransportService, - private val client: Client, - private val runner: MonitorRunnerService, - actionFilters: ActionFilters, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - ExecuteWorkflowAction.NAME, transportService, actionFilters, ::ExecuteWorkflowRequest -) { - override fun doExecute( - task: Task, - execWorkflowRequest: ExecuteWorkflowRequest, - actionListener: ActionListener, - ) { - val userStr = client.threadPool().threadContext.getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) - log.debug("User and roles string from thread context: $userStr") - val user: User? = User.parse(userStr) - - client.threadPool().threadContext.stashContext().use { - val executeWorkflow = fun(workflow: Workflow) { - runner.launch { - val (periodStart, periodEnd) = - workflow.schedule.getPeriodEndingAt(Instant.ofEpochMilli(execWorkflowRequest.requestEnd.millis)) - try { - log.info( - "Executing workflow from API - id: ${workflow.id}, periodStart: $periodStart, periodEnd: $periodEnd, " + - "dryrun: ${execWorkflowRequest.dryrun}" - ) - val workflowRunResult = - MonitorRunnerService.runJob(workflow, periodStart, periodEnd, execWorkflowRequest.dryrun) - withContext(Dispatchers.IO, { - actionListener.onResponse( - ExecuteWorkflowResponse( - workflowRunResult - ) - ) - }) - } catch (e: Exception) { - log.error("Unexpected error running workflow", e) - withContext(Dispatchers.IO) { - actionListener.onFailure(AlertingException.wrap(e)) - } - } - } - } - - if (execWorkflowRequest.workflowId != null) { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX).id(execWorkflowRequest.workflowId) - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - log.error("Can't find workflow with id: ${response.id}") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Can't find workflow with id: ${response.id}", - RestStatus.NOT_FOUND - ) - ) - ) - return - } - if (!response.isSourceEmpty) { - XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, XContentType.JSON - ).use { xcp -> - val workflow = ScheduledJob.parse(xcp, response.id, response.version) as Workflow - executeWorkflow(workflow) - } - } - } - - override fun onFailure(t: Exception) { - log.error("Error getting workflow ${execWorkflowRequest.workflowId}", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } else { - val workflow = when (user?.name.isNullOrEmpty()) { - true -> execWorkflowRequest.workflow as Workflow - false -> (execWorkflowRequest.workflow as Workflow).copy(user = user) - } - - executeWorkflow(workflow) - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt deleted file mode 100644 index 604c130fc..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetAlertsAction.kt +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionRequest -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.opensearchapi.addFilter -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetAlertsRequest -import org.opensearch.commons.alerting.action.GetAlertsResponse -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.authuser.User -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.Operator -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.sort.SortBuilders -import org.opensearch.search.sort.SortOrder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.io.IOException - -private val log = LogManager.getLogger(TransportGetAlertsAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportGetAlertsAction @Inject constructor( - transportService: TransportService, - val client: Client, - clusterService: ClusterService, - actionFilters: ActionFilters, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry, -) : HandledTransportAction( - AlertingActions.GET_ALERTS_ACTION_NAME, - transportService, - actionFilters, - ::GetAlertsRequest -), - SecureTransportAction { - - @Volatile - override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute( - task: Task, - request: ActionRequest, - actionListener: ActionListener, - ) { - val getAlertsRequest = request as? GetAlertsRequest - ?: recreateObject(request) { GetAlertsRequest(it) } - val user = readUserFromThreadContext(client) - - val tableProp = getAlertsRequest.table - val sortBuilder = SortBuilders - .fieldSort(tableProp.sortString) - .order(SortOrder.fromString(tableProp.sortOrder)) - if (!tableProp.missing.isNullOrBlank()) { - sortBuilder.missing(tableProp.missing) - } - - val queryBuilder = QueryBuilders.boolQuery() - - if (getAlertsRequest.severityLevel != "ALL") { - queryBuilder.filter(QueryBuilders.termQuery("severity", getAlertsRequest.severityLevel)) - } - - if (getAlertsRequest.alertState == "ALL") { - // alerting dashboards expects chained alerts and individually executed monitors' alerts to be returned from this api - // when invoked with state=ALL. They require that audit alerts are NOT returned in this page - // and only be shown in "associated alerts" field under get workflow_alerts API. - // But if the API is called with query_params: state=AUDIT,monitor_id=<123>,workflow_id=, this api - // will return audit alerts generated by delegate monitor <123> in workflow - queryBuilder.filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(Alert.STATE_FIELD, Alert.State.AUDIT.name))) - } else { - queryBuilder.filter(QueryBuilders.termQuery("state", getAlertsRequest.alertState)) - } - - if (getAlertsRequest.alertIds.isNullOrEmpty() == false) { - queryBuilder.filter(QueryBuilders.termsQuery("_id", getAlertsRequest.alertIds)) - } - - if (getAlertsRequest.monitorId != null) { - queryBuilder.filter(QueryBuilders.termQuery("monitor_id", getAlertsRequest.monitorId)) - addWorkflowIdNullOrEmptyCheck(getAlertsRequest, queryBuilder) - } else if (getAlertsRequest.monitorIds.isNullOrEmpty() == false) { - queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getAlertsRequest.monitorIds)) - addWorkflowIdNullOrEmptyCheck(getAlertsRequest, queryBuilder) - } - if ( - getAlertsRequest.workflowIds.isNullOrEmpty() == false && - !(getAlertsRequest.workflowIds!!.size == 1 && getAlertsRequest.workflowIds!![0] == "") - ) { - queryBuilder.must(QueryBuilders.termsQuery("workflow_id", getAlertsRequest.workflowIds)) - } - if (!tableProp.searchString.isNullOrBlank()) { - queryBuilder - .must( - QueryBuilders - .queryStringQuery(tableProp.searchString) - .defaultOperator(Operator.AND) - .field("monitor_name") - .field("trigger_name") - ) - } - val searchSourceBuilder = SearchSourceBuilder() - .version(true) - .seqNoAndPrimaryTerm(true) - .query(queryBuilder) - .sort(sortBuilder) - .size(tableProp.size) - .from(tableProp.startIndex) - - client.threadPool().threadContext.stashContext().use { - scope.launch { - try { - val alertIndex = resolveAlertsIndexName(getAlertsRequest) - getAlerts(alertIndex, searchSourceBuilder, actionListener, user) - } catch (t: Exception) { - log.error("Failed to get alerts", t) - if (t is AlertingException) { - actionListener.onFailure(t) - } else { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - } - } - } - - // we add this check when we want to fetch alerts for monitors not generated as part of a workflow i.e. non-delegate monitor alerts - private fun addWorkflowIdNullOrEmptyCheck( - getAlertsRequest: GetAlertsRequest, - queryBuilder: BoolQueryBuilder, - ) { - if ( - getAlertsRequest.workflowIds != null && getAlertsRequest.workflowIds!!.size == 1 && getAlertsRequest.workflowIds!![0] == "" - ) { - val noWorkflowIdQuery = QueryBuilders.boolQuery() - .should(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery(Alert.WORKFLOW_ID_FIELD))) - .should(QueryBuilders.termsQuery(Alert.WORKFLOW_ID_FIELD, "")) - queryBuilder.must(noWorkflowIdQuery) - } - } - - /** Precedence order for resolving alert index to be queried: - 1. alertIndex param. - 2. alert index mentioned in monitor data sources. - 3. Default alert indices pattern - */ - suspend fun resolveAlertsIndexName(getAlertsRequest: GetAlertsRequest): String { - var alertIndex = AlertIndices.ALL_ALERT_INDEX_PATTERN - if (getAlertsRequest.alertIndex.isNullOrEmpty() == false) { - alertIndex = getAlertsRequest.alertIndex!! - } else if (getAlertsRequest.monitorId.isNullOrEmpty() == false) { - val retrievedMonitor = getMonitor(getAlertsRequest) - if (retrievedMonitor != null) { - alertIndex = retrievedMonitor.dataSources.alertsIndex - } - } - return if (alertIndex == AlertIndices.ALERT_INDEX) - AlertIndices.ALL_ALERT_INDEX_PATTERN - else - alertIndex - } - - private suspend fun getMonitor(getAlertsRequest: GetAlertsRequest): Monitor? { - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, getAlertsRequest.monitorId!!) - try { - val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } - if (!getResponse.isExists) { - return null - } - val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - getResponse.sourceAsBytesRef, XContentType.JSON - ) - return ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor - } catch (t: Exception) { - log.error("Failure in fetching monitor ${getAlertsRequest.monitorId} to resolve alert index in get alerts action", t) - return null - } - } - - fun getAlerts( - alertIndex: String, - searchSourceBuilder: SearchSourceBuilder, - actionListener: ActionListener, - user: User?, - ) { - // user is null when: 1/ security is disabled. 2/when user is super-admin. - if (user == null) { - // user is null when: 1/ security is disabled. 2/when user is super-admin. - search(alertIndex, searchSourceBuilder, actionListener) - } else if (!doFilterForUser(user)) { - // security is enabled and filterby is disabled. - search(alertIndex, searchSourceBuilder, actionListener) - } else { - // security is enabled and filterby is enabled. - try { - log.info("Filtering result by: ${user.backendRoles}") - addFilter(user, searchSourceBuilder, "monitor_user.backend_roles.keyword") - search(alertIndex, searchSourceBuilder, actionListener) - } catch (ex: IOException) { - actionListener.onFailure(AlertingException.wrap(ex)) - } - } - } - - fun search(alertIndex: String, searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { - val searchRequest = SearchRequest() - .indices(alertIndex) - .source(searchSourceBuilder) - - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(response: SearchResponse) { - val totalAlertCount = response.hits.totalHits?.value?.toInt() - val alerts = response.hits.map { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alert - } - actionListener.onResponse(GetAlertsResponse(alerts, totalAlertCount)) - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(t) - } - } - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt deleted file mode 100644 index 4036769ad..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetDestinationsAction.kt +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.GetDestinationsAction -import org.opensearch.alerting.action.GetDestinationsRequest -import org.opensearch.alerting.action.GetDestinationsResponse -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.opensearchapi.addFilter -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.authuser.User -import org.opensearch.core.action.ActionListener -import org.opensearch.core.common.Strings -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.Operator -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.search.sort.SortBuilders -import org.opensearch.search.sort.SortOrder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.io.IOException - -private val log = LogManager.getLogger(TransportGetDestinationsAction::class.java) - -class TransportGetDestinationsAction @Inject constructor( - transportService: TransportService, - val client: Client, - clusterService: ClusterService, - actionFilters: ActionFilters, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction ( - GetDestinationsAction.NAME, - transportService, - actionFilters, - ::GetDestinationsRequest -), - SecureTransportAction { - - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute( - task: Task, - getDestinationsRequest: GetDestinationsRequest, - actionListener: ActionListener - ) { - val user = readUserFromThreadContext(client) - val tableProp = getDestinationsRequest.table - - val sortBuilder = SortBuilders - .fieldSort(tableProp.sortString) - .order(SortOrder.fromString(tableProp.sortOrder)) - if (!tableProp.missing.isNullOrBlank()) { - sortBuilder.missing(tableProp.missing) - } - - val searchSourceBuilder = SearchSourceBuilder() - .sort(sortBuilder) - .size(tableProp.size) - .from(tableProp.startIndex) - .fetchSource(FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)) - .seqNoAndPrimaryTerm(true) - .version(true) - val queryBuilder = QueryBuilders.boolQuery() - .must(QueryBuilders.existsQuery("destination")) - - if (!getDestinationsRequest.destinationId.isNullOrBlank()) { - queryBuilder.filter(QueryBuilders.termQuery("_id", getDestinationsRequest.destinationId)) - } - - if (getDestinationsRequest.destinationType != "ALL") { - queryBuilder.filter(QueryBuilders.termQuery("destination.type", getDestinationsRequest.destinationType)) - } - - if (!tableProp.searchString.isNullOrBlank()) { - queryBuilder - .must( - QueryBuilders - .queryStringQuery(tableProp.searchString) - .defaultOperator(Operator.AND) - .field("destination.type") - .field("destination.name") - ) - } - searchSourceBuilder.query(queryBuilder) - - client.threadPool().threadContext.stashContext().use { - resolve(searchSourceBuilder, actionListener, user) - } - } - - fun resolve( - searchSourceBuilder: SearchSourceBuilder, - actionListener: ActionListener, - user: User? - ) { - if (user == null) { - // user is null when: 1/ security is disabled. 2/when user is super-admin. - search(searchSourceBuilder, actionListener) - } else if (!doFilterForUser(user)) { - // security is enabled and filterby is disabled. - search(searchSourceBuilder, actionListener) - } else { - // security is enabled and filterby is enabled. - try { - log.info("Filtering result by: ${user.backendRoles}") - addFilter(user, searchSourceBuilder, "destination.user.backend_roles.keyword") - search(searchSourceBuilder, actionListener) - } catch (ex: IOException) { - actionListener.onFailure(AlertingException.wrap(ex)) - } - } - } - - fun search(searchSourceBuilder: SearchSourceBuilder, actionListener: ActionListener) { - val searchRequest = SearchRequest() - .source(searchSourceBuilder) - .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(response: SearchResponse) { - val totalDestinationCount = response.hits.totalHits?.value?.toInt() - val destinations = mutableListOf() - for (hit in response.hits) { - val id = hit.id - val version = hit.version - val seqNo = hit.seqNo.toInt() - val primaryTerm = hit.primaryTerm.toInt() - val xcp = XContentType.JSON.xContent() - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - destinations.add(Destination.parse(xcp, id, version, seqNo, primaryTerm)) - } - actionListener.onResponse(GetDestinationsResponse(RestStatus.OK, totalDestinationCount, destinations)) - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt deleted file mode 100644 index cae1f2298..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailAccountAction.kt +++ /dev/null @@ -1,122 +0,0 @@ -/* -* Copyright OpenSearch Contributors -* SPDX-License-Identifier: Apache-2.0 -*/ - -package org.opensearch.alerting.transport - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.GetEmailAccountAction -import org.opensearch.alerting.action.GetEmailAccountRequest -import org.opensearch.alerting.action.GetEmailAccountResponse -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -private val log = LogManager.getLogger(TransportGetEmailAccountAction::class.java) - -class TransportGetEmailAccountAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val clusterService: ClusterService, - settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - GetEmailAccountAction.NAME, - transportService, - actionFilters, - ::GetEmailAccountRequest -) { - - @Volatile private var allowList = ALLOW_LIST.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } - } - - override fun doExecute( - task: Task, - getEmailAccountRequest: GetEmailAccountRequest, - actionListener: ActionListener - ) { - if (!allowList.contains(DestinationType.EMAIL.value)) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", - RestStatus.FORBIDDEN - ) - ) - ) - return - } - - val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, getEmailAccountRequest.emailAccountID) - .version(getEmailAccountRequest.version) - .fetchSourceContext(getEmailAccountRequest.srcContext) - client.threadPool().threadContext.stashContext().use { - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Email Account not found.", RestStatus.NOT_FOUND) - ) - ) - return - } - - var emailAccount: EmailAccount? = null - if (!response.isSourceEmpty) { - XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, - XContentType.JSON - ).use { xcp -> - emailAccount = EmailAccount.parseWithType(xcp, response.id, response.version) - } - } - - actionListener.onResponse( - GetEmailAccountResponse( - response.id, - response.version, - response.seqNo, - response.primaryTerm, - RestStatus.OK, - emailAccount - ) - ) - } - - override fun onFailure(e: Exception) { - actionListener.onFailure(e) - } - } - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt deleted file mode 100644 index 4bc51440c..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetEmailGroupAction.kt +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.GetEmailGroupAction -import org.opensearch.alerting.action.GetEmailGroupRequest -import org.opensearch.alerting.action.GetEmailGroupResponse -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -private val log = LogManager.getLogger(TransportGetEmailGroupAction::class.java) - -class TransportGetEmailGroupAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val clusterService: ClusterService, - settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction( - GetEmailGroupAction.NAME, - transportService, - actionFilters, - ::GetEmailGroupRequest -) { - - @Volatile private var allowList = ALLOW_LIST.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } - } - - override fun doExecute( - task: Task, - getEmailGroupRequest: GetEmailGroupRequest, - actionListener: ActionListener - ) { - if (!allowList.contains(DestinationType.EMAIL.value)) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", - RestStatus.FORBIDDEN - ) - ) - ) - return - } - - val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, getEmailGroupRequest.emailGroupID) - .version(getEmailGroupRequest.version) - .fetchSourceContext(getEmailGroupRequest.srcContext) - client.threadPool().threadContext.stashContext().use { - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Email Group not found.", RestStatus.NOT_FOUND) - ) - ) - return - } - - var emailGroup: EmailGroup? = null - if (!response.isSourceEmpty) { - XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, - XContentType.JSON - ).use { xcp -> - emailGroup = EmailGroup.parseWithType(xcp, response.id, response.version) - } - } - - actionListener.onResponse( - GetEmailGroupResponse( - response.id, - response.version, - response.seqNo, - response.primaryTerm, - RestStatus.OK, - emailGroup - ) - ) - } - - override fun onFailure(e: Exception) { - actionListener.onFailure(e) - } - } - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt deleted file mode 100644 index 84f3ab24f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetFindingsAction.kt +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import kotlinx.coroutines.withContext -import org.apache.logging.log4j.LogManager -import org.apache.lucene.search.join.ScoreMode -import org.opensearch.action.ActionRequest -import org.opensearch.action.get.MultiGetRequest -import org.opensearch.action.get.MultiGetResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_FINDING_INDEX_PATTERN -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetFindingsRequest -import org.opensearch.commons.alerting.action.GetFindingsResponse -import org.opensearch.commons.alerting.action.GetMonitorRequest -import org.opensearch.commons.alerting.action.GetMonitorResponse -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.commons.alerting.model.FindingDocument -import org.opensearch.commons.alerting.model.FindingWithDocs -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.common.Strings -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.Operator -import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestRequest -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.search.sort.SortBuilders -import org.opensearch.search.sort.SortOrder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -private val log = LogManager.getLogger(TransportGetFindingsSearchAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportGetFindingsSearchAction @Inject constructor( - transportService: TransportService, - val client: Client, - clusterService: ClusterService, - actionFilters: ActionFilters, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry -) : HandledTransportAction ( - AlertingActions.GET_FINDINGS_ACTION_NAME, - transportService, - actionFilters, - ::GetFindingsRequest -), - SecureTransportAction { - - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute( - task: Task, - request: ActionRequest, - actionListener: ActionListener - ) { - val getFindingsRequest = request as? GetFindingsRequest - ?: recreateObject(request) { GetFindingsRequest(it) } - val tableProp = getFindingsRequest.table - - val sortBuilder = SortBuilders - .fieldSort(tableProp.sortString) - .order(SortOrder.fromString(tableProp.sortOrder)) - if (!tableProp.missing.isNullOrBlank()) { - sortBuilder.missing(tableProp.missing) - } - - val searchSourceBuilder = SearchSourceBuilder() - .sort(sortBuilder) - .size(tableProp.size) - .from(tableProp.startIndex) - .fetchSource(FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)) - .seqNoAndPrimaryTerm(true) - .version(true) - - val queryBuilder = QueryBuilders.boolQuery() - - if (!getFindingsRequest.findingId.isNullOrBlank()) { - queryBuilder.filter(QueryBuilders.termQuery("_id", getFindingsRequest.findingId)) - } - - if (getFindingsRequest.monitorId != null) { - queryBuilder.filter(QueryBuilders.termQuery("monitor_id", getFindingsRequest.monitorId)) - } else if (getFindingsRequest.monitorIds.isNullOrEmpty() == false) { - queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getFindingsRequest.monitorIds)) - } - - if (!tableProp.searchString.isNullOrBlank()) { - queryBuilder - .should( - QueryBuilders - .queryStringQuery(tableProp.searchString) - ) - .should( - QueryBuilders.nestedQuery( - "queries", - QueryBuilders.boolQuery() - .must( - QueryBuilders - .queryStringQuery(tableProp.searchString) - .defaultOperator(Operator.AND) - .field("queries.tags") - .field("queries.name") - ), - ScoreMode.Avg - ) - ) - } - - searchSourceBuilder.query(queryBuilder) - - client.threadPool().threadContext.stashContext().use { - scope.launch { - try { - val indexName = resolveFindingsIndexName(getFindingsRequest) - val getFindingsResponse = search(searchSourceBuilder, indexName) - actionListener.onResponse(getFindingsResponse) - } catch (t: AlertingException) { - actionListener.onFailure(t) - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - } - } - - suspend fun resolveFindingsIndexName(findingsRequest: GetFindingsRequest): String { - var indexName = ALL_FINDING_INDEX_PATTERN - - if (findingsRequest.findingIndex.isNullOrEmpty() == false) { - // findingIndex has highest priority, so use that if available - indexName = findingsRequest.findingIndex!! - } else if (findingsRequest.monitorId.isNullOrEmpty() == false) { - // second best is monitorId. - // We will use it to fetch monitor and then read indexName from dataSources field of monitor - withContext(Dispatchers.IO) { - val getMonitorRequest = GetMonitorRequest( - findingsRequest.monitorId!!, - -3L, - RestRequest.Method.GET, - FetchSourceContext.FETCH_SOURCE - ) - val getMonitorResponse: GetMonitorResponse = - this@TransportGetFindingsSearchAction.client.suspendUntil { - execute(AlertingActions.GET_MONITOR_ACTION_TYPE, getMonitorRequest, it) - } - indexName = getMonitorResponse.monitor?.dataSources?.findingsIndex ?: ALL_FINDING_INDEX_PATTERN - } - } - return indexName - } - - suspend fun search(searchSourceBuilder: SearchSourceBuilder, indexName: String): GetFindingsResponse { - val searchRequest = SearchRequest() - .source(searchSourceBuilder) - .indices(indexName) - val searchResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - val totalFindingCount = searchResponse.hits.totalHits?.value?.toInt() - val mgetRequest = MultiGetRequest() - val findingsWithDocs = mutableListOf() - val findings = mutableListOf() - for (hit in searchResponse.hits) { - val xcp = XContentType.JSON.xContent() - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val finding = Finding.parse(xcp) - findings.add(finding) - val documentIds = finding.relatedDocIds - // Add getRequests to mget request - documentIds.forEach { docId -> - mgetRequest.add(MultiGetRequest.Item(finding.index, docId)) - } - } - val documents = if (mgetRequest.items.isEmpty()) mutableMapOf() else searchDocument(mgetRequest) - findings.forEach { - val documentIds = it.relatedDocIds - val relatedDocs = mutableListOf() - for (docId in documentIds) { - val key = "${it.index}|$docId" - documents[key]?.let { document -> relatedDocs.add(document) } - } - findingsWithDocs.add(FindingWithDocs(it, relatedDocs)) - } - - return GetFindingsResponse(searchResponse.status(), totalFindingCount, findingsWithDocs) - } - - // TODO: Verify what happens if indices are closed/deleted - suspend fun searchDocument( - mgetRequest: MultiGetRequest - ): Map { - val response: MultiGetResponse = client.suspendUntil { client.multiGet(mgetRequest, it) } - val documents: MutableMap = mutableMapOf() - response.responses.forEach { - val key = "${it.index}|${it.id}" - val docData = if (it.isFailed) "" else it.response.sourceAsString - val findingDocument = FindingDocument(it.index, it.id, !it.isFailed, docData) - documents[key] = findingDocument - } - - return documents - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt deleted file mode 100644 index 3a6f090ec..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetMonitorAction.kt +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.apache.lucene.search.join.ScoreMode -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.ActionRequest -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_DELEGATE_PATH -import org.opensearch.alerting.util.ScheduledJobUtils.Companion.WORKFLOW_MONITOR_PATH -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetMonitorRequest -import org.opensearch.commons.alerting.action.GetMonitorResponse -import org.opensearch.commons.alerting.action.GetMonitorResponse.AssociatedWorkflow -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -private val log = LogManager.getLogger(TransportGetMonitorAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportGetMonitorAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val xContentRegistry: NamedXContentRegistry, - val clusterService: ClusterService, - settings: Settings, -) : HandledTransportAction( - AlertingActions.GET_MONITOR_ACTION_NAME, - transportService, - actionFilters, - ::GetMonitorRequest -), - SecureTransportAction { - - @Volatile - override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { - val transformedRequest = request as? GetMonitorRequest - ?: recreateObject(request) { - GetMonitorRequest(it) - } - - val user = readUserFromThreadContext(client) - - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, transformedRequest.monitorId) - .version(transformedRequest.version) - .fetchSourceContext(transformedRequest.srcContext) - - if (!validateUserBackendRoles(user, actionListener)) { - return - } - - /* - * Remove security context before you call elasticsearch api's. By this time, permissions required - * to call this api are validated. - * Once system-indices [https://github.com/opendistro-for-elasticsearch/security/issues/666] is done, we - * might further improve this logic. Also change try to kotlin-use for auto-closable. - */ - client.threadPool().threadContext.stashContext().use { - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - actionListener.onFailure( - AlertingException.wrap(OpenSearchStatusException("Monitor not found.", RestStatus.NOT_FOUND)) - ) - return - } - - var monitor: Monitor? = null - if (!response.isSourceEmpty) { - XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, - XContentType.JSON - ).use { xcp -> - monitor = ScheduledJob.parse(xcp, response.id, response.version) as Monitor - - // security is enabled and filterby is enabled - if (!checkUserPermissionsWithResource( - user, - monitor?.user, - actionListener, - "monitor", - transformedRequest.monitorId - ) - ) { - return - } - } - } - try { - scope.launch { - val associatedCompositeMonitors = getAssociatedWorkflows(response.id) - actionListener.onResponse( - GetMonitorResponse( - response.id, - response.version, - response.seqNo, - response.primaryTerm, - monitor, - associatedCompositeMonitors - ) - ) - } - } catch (e: Exception) { - log.error("Failed to get associate workflows in get monitor action", e) - } - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } - } - - private suspend fun getAssociatedWorkflows(id: String): List { - try { - val associatedWorkflows = mutableListOf() - val queryBuilder = QueryBuilders.nestedQuery( - WORKFLOW_DELEGATE_PATH, - QueryBuilders.boolQuery().must( - QueryBuilders.matchQuery( - WORKFLOW_MONITOR_PATH, - id - ) - ), - ScoreMode.None - ) - val searchRequest = SearchRequest() - .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) - .source(SearchSourceBuilder().query(queryBuilder).fetchField("_id")) - val response: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - - for (hit in response.hits) { - XContentType.JSON.xContent().createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceAsString - ).use { hitsParser -> - val workflow = ScheduledJob.parse(hitsParser, hit.id, hit.version) - if (workflow is Workflow) { - associatedWorkflows.add(AssociatedWorkflow(hit.id, workflow.name)) - } - } - } - return associatedWorkflows - } catch (e: java.lang.Exception) { - log.error("failed to fetch associated workflows for monitor $id", e) - return emptyList() - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt deleted file mode 100644 index c7bd42904..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAction.kt +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetWorkflowRequest -import org.opensearch.commons.alerting.action.GetWorkflowResponse -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.index.IndexNotFoundException -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -class TransportGetWorkflowAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val xContentRegistry: NamedXContentRegistry, - val clusterService: ClusterService, - settings: Settings -) : HandledTransportAction( - AlertingActions.GET_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::GetWorkflowRequest -), - SecureTransportAction { - - private val log = LogManager.getLogger(javaClass) - - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute(task: Task, getWorkflowRequest: GetWorkflowRequest, actionListener: ActionListener) { - val user = readUserFromThreadContext(client) - - val getRequest = GetRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, getWorkflowRequest.workflowId) - - if (!validateUserBackendRoles(user, actionListener)) { - return - } - - client.threadPool().threadContext.stashContext().use { - client.get( - getRequest, - object : ActionListener { - override fun onResponse(response: GetResponse) { - if (!response.isExists) { - log.error("Workflow with ${getWorkflowRequest.workflowId} not found") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Workflow not found.", - RestStatus.NOT_FOUND - ) - ) - ) - return - } - - var workflow: Workflow? = null - if (!response.isSourceEmpty) { - XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, XContentType.JSON - ).use { xcp -> - val compositeMonitor = ScheduledJob.parse(xcp, response.id, response.version) - if (compositeMonitor is Workflow) { - workflow = compositeMonitor - } else { - log.error("Wrong monitor type returned") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Workflow not found.", - RestStatus.NOT_FOUND - ) - ) - ) - return - } - - // security is enabled and filterby is enabled - if (!checkUserPermissionsWithResource( - user, - workflow?.user, - actionListener, - "workflow", - getWorkflowRequest.workflowId - ) - ) { - return - } - } - } - - actionListener.onResponse( - GetWorkflowResponse( - response.id, - response.version, - response.seqNo, - response.primaryTerm, - RestStatus.OK, - workflow - ) - ) - } - - override fun onFailure(t: Exception) { - log.error("Getting the workflow failed", t) - - if (t is IndexNotFoundException) { - actionListener.onFailure( - OpenSearchStatusException( - "Workflow not found", - RestStatus.NOT_FOUND - ) - ) - } else { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - } - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt deleted file mode 100644 index 7a9561ccb..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportGetWorkflowAlertsAction.kt +++ /dev/null @@ -1,274 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.opensearchapi.addFilter -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.GetAlertsRequest -import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest -import org.opensearch.commons.alerting.action.GetWorkflowAlertsResponse -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.authuser.User -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.Operator -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.sort.SortBuilders -import org.opensearch.search.sort.SortOrder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.io.IOException - -private val log = LogManager.getLogger(TransportGetAlertsAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportGetWorkflowAlertsAction @Inject constructor( - transportService: TransportService, - val client: Client, - clusterService: ClusterService, - actionFilters: ActionFilters, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry, -) : HandledTransportAction( - AlertingActions.GET_WORKFLOW_ALERTS_ACTION_NAME, - transportService, - actionFilters, - ::GetAlertsRequest -), - SecureTransportAction { - - @Volatile - override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - @Volatile - private var isAlertHistoryEnabled = AlertingSettings.ALERT_HISTORY_ENABLED.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(AlertingSettings.ALERT_HISTORY_ENABLED) { isAlertHistoryEnabled = it } - listenFilterBySettingChange(clusterService) - } - - override fun doExecute( - task: Task, - request: ActionRequest, - actionListener: ActionListener, - ) { - val getWorkflowAlertsRequest = request as? GetWorkflowAlertsRequest - ?: recreateObject(request) { GetWorkflowAlertsRequest(it) } - val user = readUserFromThreadContext(client) - - val tableProp = getWorkflowAlertsRequest.table - val sortBuilder = SortBuilders.fieldSort(tableProp.sortString) - .order(SortOrder.fromString(tableProp.sortOrder)) - if (!tableProp.missing.isNullOrBlank()) { - sortBuilder.missing(tableProp.missing) - } - - val queryBuilder = QueryBuilders.boolQuery() - - if (getWorkflowAlertsRequest.severityLevel != "ALL") { - queryBuilder.filter(QueryBuilders.termQuery("severity", getWorkflowAlertsRequest.severityLevel)) - } - - if (getWorkflowAlertsRequest.alertState == "ALL") { - QueryBuilders.boolQuery() - .filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.termsQuery(Alert.STATE_FIELD, Alert.State.AUDIT.name))) - } else { - queryBuilder.filter(QueryBuilders.termQuery(Alert.STATE_FIELD, getWorkflowAlertsRequest.alertState)) - } - - if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty() == false) { - queryBuilder.filter(QueryBuilders.termsQuery("_id", getWorkflowAlertsRequest.alertIds)) - } - - if (getWorkflowAlertsRequest.monitorIds.isNullOrEmpty() == false) { - queryBuilder.filter(QueryBuilders.termsQuery("monitor_id", getWorkflowAlertsRequest.monitorIds)) - } - if (getWorkflowAlertsRequest.workflowIds.isNullOrEmpty() == false) { - queryBuilder.must(QueryBuilders.termsQuery("workflow_id", getWorkflowAlertsRequest.workflowIds)) - queryBuilder.must(QueryBuilders.termQuery("monitor_id", "")) - } - if (!tableProp.searchString.isNullOrBlank()) { - queryBuilder - .must( - QueryBuilders.queryStringQuery(tableProp.searchString) - .defaultOperator(Operator.AND) - .field("monitor_name") - .field("trigger_name") - ) - } - // if alert id is mentioned we cannot set "from" field as it may not return id. we would be using it to paginate associated alerts - val from = if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty()) - tableProp.startIndex - else 0 - - val searchSourceBuilder = SearchSourceBuilder() - .version(true) - .seqNoAndPrimaryTerm(true) - .query(queryBuilder) - .sort(sortBuilder) - .size(tableProp.size) - .from(from) - - client.threadPool().threadContext.stashContext().use { - scope.launch { - try { - val alertIndex = resolveAlertsIndexName(getWorkflowAlertsRequest) - getAlerts(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener, user) - } catch (t: Exception) { - log.error("Failed to get alerts", t) - if (t is AlertingException) { - actionListener.onFailure(t) - } else { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - } - } - } - - fun resolveAlertsIndexName(getAlertsRequest: GetWorkflowAlertsRequest): String { - var alertIndex = AlertIndices.ALL_ALERT_INDEX_PATTERN - if (getAlertsRequest.alertIndex.isNullOrEmpty() == false) { - alertIndex = getAlertsRequest.alertIndex!! - } - return if (alertIndex == AlertIndices.ALERT_INDEX) - AlertIndices.ALL_ALERT_INDEX_PATTERN - else - alertIndex - } - - fun resolveAssociatedAlertsIndexName(getAlertsRequest: GetWorkflowAlertsRequest): String { - return if (getAlertsRequest.alertIndex.isNullOrEmpty()) AlertIndices.ALL_ALERT_INDEX_PATTERN - else getAlertsRequest.associatedAlertsIndex!! - } - - suspend fun getAlerts( - getWorkflowAlertsRequest: GetWorkflowAlertsRequest, - alertIndex: String, - searchSourceBuilder: SearchSourceBuilder, - actionListener: ActionListener, - user: User?, - ) { - // user is null when: 1/ security is disabled. 2/when user is super-admin. - if (user == null) { - // user is null when: 1/ security is disabled. 2/when user is super-admin. - search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) - } else if (!doFilterForUser(user)) { - // security is enabled and filterby is disabled. - search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) - } else { - // security is enabled and filterby is enabled. - try { - log.info("Filtering result by: ${user.backendRoles}") - addFilter(user, searchSourceBuilder, "monitor_user.backend_roles.keyword") - search(getWorkflowAlertsRequest, alertIndex, searchSourceBuilder, actionListener) - } catch (ex: IOException) { - actionListener.onFailure(AlertingException.wrap(ex)) - } - } - } - - suspend fun search( - getWorkflowAlertsRequest: GetWorkflowAlertsRequest, - alertIndex: String, - searchSourceBuilder: SearchSourceBuilder, - actionListener: ActionListener, - ) { - try { - val searchRequest = SearchRequest() - .indices(alertIndex) - .source(searchSourceBuilder) - val alerts = mutableListOf() - val associatedAlerts = mutableListOf() - - val response: SearchResponse = client.suspendUntil { search(searchRequest, it) } - val totalAlertCount = response.hits.totalHits?.value?.toInt() - alerts.addAll( - parseAlertsFromSearchResponse(response) - ) - if (alerts.isNotEmpty() && getWorkflowAlertsRequest.getAssociatedAlerts == true) - getAssociatedAlerts( - associatedAlerts, - alerts, - resolveAssociatedAlertsIndexName(getWorkflowAlertsRequest), - getWorkflowAlertsRequest - ) - actionListener.onResponse(GetWorkflowAlertsResponse(alerts, associatedAlerts, totalAlertCount)) - } catch (e: Exception) { - actionListener.onFailure(AlertingException("Failed to get alerts", RestStatus.INTERNAL_SERVER_ERROR, e)) - } - } - - private suspend fun getAssociatedAlerts( - associatedAlerts: MutableList, - alerts: MutableList, - alertIndex: String, - getWorkflowAlertsRequest: GetWorkflowAlertsRequest, - ) { - try { - val associatedAlertIds = mutableSetOf() - alerts.forEach { associatedAlertIds.addAll(it.associatedAlertIds) } - if (associatedAlertIds.isEmpty()) return - val queryBuilder = QueryBuilders.boolQuery() - val searchRequest = SearchRequest(alertIndex) - // if chained alert id param is non-null, paginate the associated alerts. - if (getWorkflowAlertsRequest.alertIds.isNullOrEmpty() == false) { - val tableProp = getWorkflowAlertsRequest.table - val sortBuilder = SortBuilders.fieldSort(tableProp.sortString) - .order(SortOrder.fromString(tableProp.sortOrder)) - if (!tableProp.missing.isNullOrBlank()) { - sortBuilder.missing(tableProp.missing) - } - searchRequest.source().sort(sortBuilder).size(tableProp.size).from(tableProp.startIndex) - } - queryBuilder.must(QueryBuilders.termsQuery("_id", associatedAlertIds)) - queryBuilder.must(QueryBuilders.termQuery(Alert.STATE_FIELD, Alert.State.AUDIT)) - searchRequest.source().query(queryBuilder) - val response: SearchResponse = client.suspendUntil { search(searchRequest, it) } - associatedAlerts.addAll(parseAlertsFromSearchResponse(response)) - } catch (e: Exception) { - log.error("Failed to get associated alerts in get workflow alerts action", e) - } - } - - private fun parseAlertsFromSearchResponse(response: SearchResponse) = response.hits.map { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alert - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt deleted file mode 100644 index 2100c0593..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexMonitorAction.kt +++ /dev/null @@ -1,732 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.OpenSearchException -import org.opensearch.OpenSearchSecurityException -import org.opensearch.OpenSearchStatusException -import org.opensearch.ResourceAlreadyExistsException -import org.opensearch.action.ActionRequest -import org.opensearch.action.admin.cluster.health.ClusterHealthAction -import org.opensearch.action.admin.cluster.health.ClusterHealthRequest -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse -import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.action.support.WriteRequest.RefreshPolicy -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.MonitorMetadataService -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.alerting.model.MonitorMetadata -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.service.DeleteMonitorService -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERTING_MAX_MONITORS -import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTION_THROTTLE_VALUE -import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.DocLevelMonitorQueries -import org.opensearch.alerting.util.IndexUtils -import org.opensearch.alerting.util.addUserBackendRolesFilter -import org.opensearch.alerting.util.getRoleFilterEnabled -import org.opensearch.alerting.util.isADMonitor -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory.jsonBuilder -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.IndexMonitorRequest -import org.opensearch.commons.alerting.action.IndexMonitorResponse -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelMonitorInput.Companion.DOC_LEVEL_INPUT_FIELD -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.authuser.User -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.common.io.stream.NamedWriteableRegistry -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.reindex.BulkByScrollResponse -import org.opensearch.index.reindex.DeleteByQueryAction -import org.opensearch.index.reindex.DeleteByQueryRequestBuilder -import org.opensearch.rest.RestRequest -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.io.IOException -import java.time.Duration - -private val log = LogManager.getLogger(TransportIndexMonitorAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportIndexMonitorAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val scheduledJobIndices: ScheduledJobIndices, - val docLevelMonitorQueries: DocLevelMonitorQueries, - val clusterService: ClusterService, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry, - val namedWriteableRegistry: NamedWriteableRegistry -) : HandledTransportAction( - AlertingActions.INDEX_MONITOR_ACTION_NAME, - transportService, - actionFilters, - ::IndexMonitorRequest -), - SecureTransportAction { - - @Volatile private var maxMonitors = ALERTING_MAX_MONITORS.get(settings) - - @Volatile private var requestTimeout = REQUEST_TIMEOUT.get(settings) - - @Volatile private var indexTimeout = INDEX_TIMEOUT.get(settings) - - @Volatile private var maxActionThrottle = MAX_ACTION_THROTTLE_VALUE.get(settings) - - @Volatile private var allowList = ALLOW_LIST.get(settings) - - @Volatile override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERTING_MAX_MONITORS) { maxMonitors = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(INDEX_TIMEOUT) { indexTimeout = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(MAX_ACTION_THROTTLE_VALUE) { maxActionThrottle = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } - listenFilterBySettingChange(clusterService) - } - - override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { - val transformedRequest = request as? IndexMonitorRequest - ?: recreateObject(request, namedWriteableRegistry) { - IndexMonitorRequest(it) - } - - val user = readUserFromThreadContext(client) - - if (!validateUserBackendRoles(user, actionListener)) { - return - } - - if ( - user != null && - !isAdmin(user) && - transformedRequest.rbacRoles != null - ) { - if (transformedRequest.rbacRoles?.stream()?.anyMatch { !user.backendRoles.contains(it) } == true) { - log.debug( - "User specified backend roles, ${transformedRequest.rbacRoles}, " + - "that they don' have access to. User backend roles: ${user.backendRoles}" - ) - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "User specified backend roles that they don't have access to. Contact administrator", - RestStatus.FORBIDDEN - ) - ) - ) - return - } else if (transformedRequest.rbacRoles?.isEmpty() == true) { - log.debug( - "Non-admin user are not allowed to specify an empty set of backend roles. " + - "Please don't pass in the parameter or pass in at least one backend role." - ) - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Non-admin user are not allowed to specify an empty set of backend roles.", - RestStatus.FORBIDDEN - ) - ) - ) - return - } - } - - if (!isADMonitor(transformedRequest.monitor)) { - checkIndicesAndExecute(client, actionListener, transformedRequest, user) - } else { - // check if user has access to any anomaly detector for AD monitor - checkAnomalyDetectorAndExecute(client, actionListener, transformedRequest, user) - } - } - - /** - * Check if user has permissions to read the configured indices on the monitor and - * then create monitor. - */ - fun checkIndicesAndExecute( - client: Client, - actionListener: ActionListener, - request: IndexMonitorRequest, - user: User?, - ) { - val indices = mutableListOf() - // todo: for doc level alerting: check if index is present before monitor is created. - val searchInputs = request.monitor.inputs.filter { it.name() == SearchInput.SEARCH_FIELD || it.name() == DOC_LEVEL_INPUT_FIELD } - searchInputs.forEach { - val inputIndices = if (it.name() == SearchInput.SEARCH_FIELD) (it as SearchInput).indices - else (it as DocLevelMonitorInput).indices - indices.addAll(inputIndices) - } - val searchRequest = SearchRequest().indices(*indices.toTypedArray()) - .source(SearchSourceBuilder.searchSource().size(1).query(QueryBuilders.matchAllQuery())) - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(searchResponse: SearchResponse) { - // User has read access to configured indices in the monitor, now create monitor with out user context. - client.threadPool().threadContext.stashContext().use { - IndexMonitorHandler(client, actionListener, request, user).resolveUserAndStart() - } - } - - // Due to below issue with security plugin, we get security_exception when invalid index name is mentioned. - // https://github.com/opendistro-for-elasticsearch/security/issues/718 - override fun onFailure(t: Exception) { - actionListener.onFailure( - AlertingException.wrap( - when (t is OpenSearchSecurityException) { - true -> OpenSearchStatusException( - "User doesn't have read permissions for one or more configured index " + - "$indices", - RestStatus.FORBIDDEN - ) - false -> t - } - ) - ) - } - } - ) - } - - /** - * It's no reasonable to create AD monitor if the user has no access to any detector. Otherwise - * the monitor will not get any anomaly result. So we will check user has access to at least 1 - * anomaly detector if they need to create AD monitor. - * As anomaly detector index is system index, common user has no permission to query. So we need - * to send REST API call to AD REST API. - */ - fun checkAnomalyDetectorAndExecute( - client: Client, - actionListener: ActionListener, - request: IndexMonitorRequest, - user: User?, - ) { - client.threadPool().threadContext.stashContext().use { - IndexMonitorHandler(client, actionListener, request, user).resolveUserAndStartForAD() - } - } - - inner class IndexMonitorHandler( - private val client: Client, - private val actionListener: ActionListener, - private val request: IndexMonitorRequest, - private val user: User?, - ) { - - fun resolveUserAndStart() { - if (user == null) { - // Security is disabled, add empty user to Monitor. user is null for older versions. - request.monitor = request.monitor - .copy(user = User("", listOf(), listOf(), listOf())) - start() - } else { - request.monitor = request.monitor - .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) - start() - } - } - - fun resolveUserAndStartForAD() { - if (user == null) { - // Security is disabled, add empty user to Monitor. user is null for older versions. - request.monitor = request.monitor - .copy(user = User("", listOf(), listOf(), listOf())) - start() - } else { - try { - request.monitor = request.monitor - .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) - val searchSourceBuilder = SearchSourceBuilder().size(0) - if (getRoleFilterEnabled(clusterService, settings, "plugins.anomaly_detection.filter_by_backend_roles")) { - addUserBackendRolesFilter(user, searchSourceBuilder) - } - val searchRequest = SearchRequest().indices(".opendistro-anomaly-detectors").source(searchSourceBuilder) - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(response: SearchResponse?) { - val totalHits = response?.hits?.totalHits?.value - if (totalHits != null && totalHits > 0L) { - start() - } else { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("User has no available detectors", RestStatus.NOT_FOUND) - ) - ) - } - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } catch (ex: IOException) { - actionListener.onFailure(AlertingException.wrap(ex)) - } - } - } - - fun start() { - if (!scheduledJobIndices.scheduledJobIndexExists()) { - scheduledJobIndices.initScheduledJobIndex(object : ActionListener { - override fun onResponse(response: CreateIndexResponse) { - onCreateMappingsResponse(response.isAcknowledged) - } - override fun onFailure(t: Exception) { - // https://github.com/opensearch-project/alerting/issues/646 - if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { - scope.launch { - // Wait for the yellow status - val request = ClusterHealthRequest() - .indices(SCHEDULED_JOBS_INDEX) - .waitForYellowStatus() - val response: ClusterHealthResponse = client.suspendUntil { - execute(ClusterHealthAction.INSTANCE, request, it) - } - if (response.isTimedOut) { - actionListener.onFailure( - OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") - ) - } - // Retry mapping of monitor - onCreateMappingsResponse(true) - } - } else { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - }) - } else if (!IndexUtils.scheduledJobIndexUpdated) { - IndexUtils.updateIndexMapping( - SCHEDULED_JOBS_INDEX, - ScheduledJobIndices.scheduledJobMappings(), - clusterService.state(), - client.admin().indices(), - object : ActionListener { - override fun onResponse(response: AcknowledgedResponse) { - onUpdateMappingsResponse(response) - } - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } else { - prepareMonitorIndexing() - } - } - - /** - * This function prepares for indexing a new monitor. - * If this is an update request we can simply update the monitor. Otherwise we first check to see how many monitors already exist, - * and compare this to the [maxMonitorCount]. Requests that breach this threshold will be rejected. - */ - private fun prepareMonitorIndexing() { - // Below check needs to be async operations and needs to be refactored issue#269 - // checkForDisallowedDestinations(allowList) - - try { - validateActionThrottle(request.monitor, maxActionThrottle, TimeValue.timeValueMinutes(1)) - } catch (e: RuntimeException) { - actionListener.onFailure(AlertingException.wrap(e)) - return - } - - if (request.method == RestRequest.Method.PUT) { - scope.launch { - updateMonitor() - } - } else { - val query = QueryBuilders.boolQuery().filter(QueryBuilders.termQuery("${Monitor.MONITOR_TYPE}.type", Monitor.MONITOR_TYPE)) - val searchSource = SearchSourceBuilder().query(query).timeout(requestTimeout) - val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX).source(searchSource) - - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(searchResponse: SearchResponse) { - onSearchResponse(searchResponse) - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } - } - - private fun validateActionThrottle(monitor: Monitor, maxValue: TimeValue, minValue: TimeValue) { - monitor.triggers.forEach { trigger -> - trigger.actions.forEach { action -> - if (action.throttle != null) { - require( - TimeValue(Duration.of(action.throttle!!.value.toLong(), action.throttle!!.unit).toMillis()) - .compareTo(maxValue) <= 0, - { "Can only set throttle period less than or equal to $maxValue" } - ) - require( - TimeValue(Duration.of(action.throttle!!.value.toLong(), action.throttle!!.unit).toMillis()) - .compareTo(minValue) >= 0, - { "Can only set throttle period greater than or equal to $minValue" } - ) - } - } - } - } - - /** - * After searching for all existing monitors we validate the system can support another monitor to be created. - */ - private fun onSearchResponse(response: SearchResponse) { - val totalHits = response.hits.totalHits?.value - if (totalHits != null && totalHits >= maxMonitors) { - log.info("This request would create more than the allowed monitors [$maxMonitors].") - actionListener.onFailure( - AlertingException.wrap( - IllegalArgumentException( - "This request would create more than the allowed monitors [$maxMonitors]." - ) - ) - ) - } else { - scope.launch { - indexMonitor() - } - } - } - - private fun onCreateMappingsResponse(isAcknowledged: Boolean) { - if (isAcknowledged) { - log.info("Created $SCHEDULED_JOBS_INDEX with mappings.") - prepareMonitorIndexing() - IndexUtils.scheduledJobIndexUpdated() - } else { - log.info("Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged", - RestStatus.INTERNAL_SERVER_ERROR - ) - ) - ) - } - } - - private fun onUpdateMappingsResponse(response: AcknowledgedResponse) { - if (response.isAcknowledged) { - log.info("Updated ${ScheduledJob.SCHEDULED_JOBS_INDEX} with mappings.") - IndexUtils.scheduledJobIndexUpdated() - prepareMonitorIndexing() - } else { - log.info("Update ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Updated ${ScheduledJob.SCHEDULED_JOBS_INDEX} mappings call not acknowledged.", - RestStatus.INTERNAL_SERVER_ERROR - ) - ) - ) - } - } - - private suspend fun indexMonitor() { - if (user != null) { - // Use the backend roles which is an intersection of the requested backend roles and the user's backend roles. - // Admins can pass in any backend role. Also if no backend role is passed in, all the user's backend roles are used. - val rbacRoles = if (request.rbacRoles == null) user.backendRoles.toSet() - else if (!isAdmin(user)) request.rbacRoles?.intersect(user.backendRoles)?.toSet() - else request.rbacRoles - - request.monitor = request.monitor.copy( - user = User(user.name, rbacRoles.orEmpty().toList(), user.roles, user.customAttNames) - ) - log.debug("Created monitor's backend roles: $rbacRoles") - } - - val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(request.refreshPolicy) - .source(request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .setIfSeqNo(request.seqNo) - .setIfPrimaryTerm(request.primaryTerm) - .timeout(indexTimeout) - - log.info( - "Creating new monitor: ${request.monitor.toXContentWithUser( - jsonBuilder(), - ToXContent.MapParams(mapOf("with_type" to "true")) - )}" - ) - - try { - val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } - val failureReasons = checkShardsFailure(indexResponse) - if (failureReasons != null) { - log.info(failureReasons.toString()) - actionListener.onFailure( - AlertingException.wrap(OpenSearchStatusException(failureReasons.toString(), indexResponse.status())) - ) - return - } - var metadata: MonitorMetadata? - try { // delete monitor if metadata creation fails, log the right error and re-throw the error to fail listener - request.monitor = request.monitor.copy(id = indexResponse.id) - var (monitorMetadata: MonitorMetadata, created: Boolean) = MonitorMetadataService.getOrCreateMetadata(request.monitor) - if (created == false) { - log.warn("Metadata doc id:${monitorMetadata.id} exists, but it shouldn't!") - } - metadata = monitorMetadata - } catch (t: Exception) { - log.error("failed to create metadata for monitor ${indexResponse.id}. deleting monitor") - cleanupMonitorAfterPartialFailure(request.monitor, indexResponse) - throw t - } - try { - if (request.monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - indexDocLevelMonitorQueries(request.monitor, indexResponse.id, metadata, request.refreshPolicy) - } - // When inserting queries in queryIndex we could update sourceToQueryIndexMapping - MonitorMetadataService.upsertMetadata(metadata, updating = true) - } catch (t: Exception) { - log.error("failed to index doc level queries monitor ${indexResponse.id}. deleting monitor", t) - cleanupMonitorAfterPartialFailure(request.monitor, indexResponse) - throw t - } - - actionListener.onResponse( - IndexMonitorResponse( - indexResponse.id, - indexResponse.version, - indexResponse.seqNo, - indexResponse.primaryTerm, - request.monitor - ) - ) - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun cleanupMonitorAfterPartialFailure(monitor: Monitor, indexMonitorResponse: IndexResponse) { - // we simply log the success (debug log) or failure (error log) when we try clean up partially failed monitor creation request - try { - DeleteMonitorService.deleteMonitor( - monitor = monitor, - RefreshPolicy.IMMEDIATE - ) - log.debug( - "Cleaned up monitor related resources after monitor creation request partial failure. " + - "Monitor id : ${indexMonitorResponse.id}" - ) - } catch (e: Exception) { - log.error("Failed to clean up monitor after monitor creation request partial failure", e) - } - } - - @Suppress("UNCHECKED_CAST") - private suspend fun indexDocLevelMonitorQueries( - monitor: Monitor, - monitorId: String, - monitorMetadata: MonitorMetadata, - refreshPolicy: RefreshPolicy - ) { - val queryIndex = monitor.dataSources.queryIndex - if (!docLevelMonitorQueries.docLevelQueryIndexExists(monitor.dataSources)) { - docLevelMonitorQueries.initDocLevelQueryIndex(monitor.dataSources) - log.info("Central Percolation index $queryIndex created") - } - docLevelMonitorQueries.indexDocLevelQueries( - monitor, - monitorId, - monitorMetadata, - refreshPolicy, - indexTimeout - ) - log.debug("Queries inserted into Percolate index $queryIndex") - } - - private suspend fun updateMonitor() { - val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, request.monitorId) - try { - val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } - if (!getResponse.isExists) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException("Monitor with ${request.monitorId} is not found", RestStatus.NOT_FOUND) - ) - ) - return - } - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - getResponse.sourceAsBytesRef, - XContentType.JSON - ) - val monitor = ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Monitor - onGetResponse(monitor) - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun onGetResponse(currentMonitor: Monitor) { - if (!checkUserPermissionsWithResource(user, currentMonitor.user, actionListener, "monitor", request.monitorId)) { - return - } - - // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be - // incorrect. - if (request.monitor.enabled && currentMonitor.enabled) { - request.monitor = request.monitor.copy(enabledTime = currentMonitor.enabledTime) - } - - /** - * On update monitor check which backend roles to associate to the monitor. - * Below are 2 examples of how the logic works - * - * Example 1, say we have a Monitor with backend roles [a, b, c, d] associated with it. - * If I'm User A (non-admin user) and I have backend roles [a, b, c] associated with me and I make a request to update - * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c] and the roles to add are [a, b]. - * The Monitor's backend roles would then be [a, b, d]. - * - * Example 2, say we have a Monitor with backend roles [a, b, c, d] associated with it. - * If I'm User A (admin user) and I have backend roles [a, b, c] associated with me and I make a request to update - * the Monitor's backend roles to [a, b]. This would mean that the roles to remove are [c, d] and the roles to add are [a, b]. - * The Monitor's backend roles would then be [a, b]. - */ - if (user != null) { - if (request.rbacRoles != null) { - if (isAdmin(user)) { - request.monitor = request.monitor.copy( - user = User(user.name, request.rbacRoles, user.roles, user.customAttNames) - ) - } else { - // rolesToRemove: these are the backend roles to remove from the monitor - val rolesToRemove = user.backendRoles - request.rbacRoles.orEmpty() - // remove the monitor's roles with rolesToRemove and add any roles passed into the request.rbacRoles - val updatedRbac = currentMonitor.user?.backendRoles.orEmpty() - rolesToRemove + request.rbacRoles.orEmpty() - request.monitor = request.monitor.copy( - user = User(user.name, updatedRbac, user.roles, user.customAttNames) - ) - } - } else { - request.monitor = request.monitor - .copy(user = User(user.name, currentMonitor.user!!.backendRoles, user.roles, user.customAttNames)) - } - log.debug("Update monitor backend roles to: ${request.monitor.user?.backendRoles}") - } - - request.monitor = request.monitor.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) - val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(request.refreshPolicy) - .source(request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))) - .id(request.monitorId) - .setIfSeqNo(request.seqNo) - .setIfPrimaryTerm(request.primaryTerm) - .timeout(indexTimeout) - - log.info( - "Updating monitor, ${currentMonitor.id}, from: ${currentMonitor.toXContentWithUser( - jsonBuilder(), - ToXContent.MapParams(mapOf("with_type" to "true")) - )} \n to: ${request.monitor.toXContentWithUser(jsonBuilder(), ToXContent.MapParams(mapOf("with_type" to "true")))}" - ) - - try { - val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } - val failureReasons = checkShardsFailure(indexResponse) - if (failureReasons != null) { - actionListener.onFailure( - AlertingException.wrap(OpenSearchStatusException(failureReasons.toString(), indexResponse.status())) - ) - return - } - var updatedMetadata: MonitorMetadata - val (metadata, created) = MonitorMetadataService.getOrCreateMetadata(request.monitor) - // Recreate runContext if metadata exists - // Delete and insert all queries from/to queryIndex - if (created == false && currentMonitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - updatedMetadata = MonitorMetadataService.recreateRunContext(metadata, currentMonitor) - client.suspendUntil { - DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE) - .source(currentMonitor.dataSources.queryIndex) - .filter(QueryBuilders.matchQuery("monitor_id", currentMonitor.id)) - .execute(it) - } - indexDocLevelMonitorQueries(request.monitor, currentMonitor.id, updatedMetadata, request.refreshPolicy) - MonitorMetadataService.upsertMetadata(updatedMetadata, updating = true) - } - actionListener.onResponse( - IndexMonitorResponse( - indexResponse.id, - indexResponse.version, - indexResponse.seqNo, - indexResponse.primaryTerm, - request.monitor - ) - ) - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private fun checkShardsFailure(response: IndexResponse): String? { - val failureReasons = StringBuilder() - if (response.shardInfo.failed > 0) { - response.shardInfo.failures.forEach { - entry -> - failureReasons.append(entry.reason()) - } - return failureReasons.toString() - } - return null - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt deleted file mode 100644 index 26d834fe6..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportIndexWorkflowAction.kt +++ /dev/null @@ -1,796 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.OpenSearchException -import org.opensearch.OpenSearchStatusException -import org.opensearch.ResourceAlreadyExistsException -import org.opensearch.action.ActionRequest -import org.opensearch.action.admin.cluster.health.ClusterHealthAction -import org.opensearch.action.admin.cluster.health.ClusterHealthRequest -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse -import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.action.get.GetRequest -import org.opensearch.action.get.GetResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.index.IndexResponse -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.MonitorMetadataService -import org.opensearch.alerting.MonitorRunnerService.monitorCtx -import org.opensearch.alerting.WorkflowMetadataService -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.alerting.opensearchapi.InjectorContextElement -import org.opensearch.alerting.opensearchapi.addFilter -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.opensearchapi.withClosableContext -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.settings.AlertingSettings.Companion.ALERTING_MAX_MONITORS -import org.opensearch.alerting.settings.AlertingSettings.Companion.INDEX_TIMEOUT -import org.opensearch.alerting.settings.AlertingSettings.Companion.MAX_ACTION_THROTTLE_VALUE -import org.opensearch.alerting.settings.AlertingSettings.Companion.REQUEST_TIMEOUT -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.IndexUtils -import org.opensearch.alerting.util.isADMonitor -import org.opensearch.alerting.util.isQueryLevelMonitor -import org.opensearch.alerting.workflow.CompositeWorkflowRunner -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory.jsonBuilder -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.IndexWorkflowRequest -import org.opensearch.commons.alerting.action.IndexWorkflowResponse -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.Delegate -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.authuser.User -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.common.io.stream.NamedWriteableRegistry -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.query.QueryBuilders -import org.opensearch.rest.RestRequest -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService -import java.util.UUID -import java.util.stream.Collectors - -private val log = LogManager.getLogger(TransportIndexWorkflowAction::class.java) -private val scope: CoroutineScope = CoroutineScope(Dispatchers.IO) - -class TransportIndexWorkflowAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val scheduledJobIndices: ScheduledJobIndices, - val clusterService: ClusterService, - val settings: Settings, - val xContentRegistry: NamedXContentRegistry, - val namedWriteableRegistry: NamedWriteableRegistry, -) : HandledTransportAction( - AlertingActions.INDEX_WORKFLOW_ACTION_NAME, transportService, actionFilters, ::IndexWorkflowRequest -), - SecureTransportAction { - - @Volatile - private var maxMonitors = ALERTING_MAX_MONITORS.get(settings) - - @Volatile - private var requestTimeout = REQUEST_TIMEOUT.get(settings) - - @Volatile - private var indexTimeout = INDEX_TIMEOUT.get(settings) - - @Volatile - private var maxActionThrottle = MAX_ACTION_THROTTLE_VALUE.get(settings) - - @Volatile - private var allowList = ALLOW_LIST.get(settings) - - @Volatile - override var filterByEnabled = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(ALERTING_MAX_MONITORS) { maxMonitors = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(INDEX_TIMEOUT) { indexTimeout = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(MAX_ACTION_THROTTLE_VALUE) { maxActionThrottle = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } - listenFilterBySettingChange(clusterService) - } - - override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { - val transformedRequest = request as? IndexWorkflowRequest - ?: recreateObject(request, namedWriteableRegistry) { - IndexWorkflowRequest(it) - } - - val user = readUserFromThreadContext(client) - - if (!validateUserBackendRoles(user, actionListener)) { - return - } - - if ( - user != null && - !isAdmin(user) && - transformedRequest.rbacRoles != null - ) { - if (transformedRequest.rbacRoles?.stream()?.anyMatch { !user.backendRoles.contains(it) } == true) { - log.error( - "User specified backend roles, ${transformedRequest.rbacRoles}, " + - "that they don' have access to. User backend roles: ${user.backendRoles}" - ) - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "User specified backend roles that they don't have access to. Contact administrator", - RestStatus.FORBIDDEN - ) - ) - ) - return - } else if (transformedRequest.rbacRoles?.isEmpty() == true) { - log.error( - "Non-admin user are not allowed to specify an empty set of backend roles. " + - "Please don't pass in the parameter or pass in at least one backend role." - ) - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Non-admin user are not allowed to specify an empty set of backend roles.", - RestStatus.FORBIDDEN - ) - ) - ) - return - } - } - - scope.launch { - try { - validateMonitorAccess( - transformedRequest, - user, - client, - object : ActionListener { - override fun onResponse(response: AcknowledgedResponse) { - // Stash the context and start the workflow creation - client.threadPool().threadContext.stashContext().use { - IndexWorkflowHandler(client, actionListener, transformedRequest, user).resolveUserAndStart() - } - } - - override fun onFailure(e: Exception) { - log.error("Error indexing workflow", e) - actionListener.onFailure(e) - } - } - ) - } catch (e: Exception) { - log.error("Failed to create workflow", e) - if (e is IndexNotFoundException) { - actionListener.onFailure( - OpenSearchStatusException( - "Monitors not found", - RestStatus.NOT_FOUND - ) - ) - } else { - actionListener.onFailure(e) - } - } - } - } - - inner class IndexWorkflowHandler( - private val client: Client, - private val actionListener: ActionListener, - private val request: IndexWorkflowRequest, - private val user: User?, - ) { - fun resolveUserAndStart() { - scope.launch { - if (user == null) { - // Security is disabled, add empty user to Workflow. user is null for older versions. - request.workflow = request.workflow - .copy(user = User("", listOf(), listOf(), listOf())) - start() - } else { - request.workflow = request.workflow - .copy(user = User(user.name, user.backendRoles, user.roles, user.customAttNames)) - start() - } - } - } - - fun start() { - if (!scheduledJobIndices.scheduledJobIndexExists()) { - scheduledJobIndices.initScheduledJobIndex(object : ActionListener { - override fun onResponse(response: CreateIndexResponse) { - onCreateMappingsResponse(response.isAcknowledged) - } - - override fun onFailure(t: Exception) { - // https://github.com/opensearch-project/alerting/issues/646 - if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { - scope.launch { - // Wait for the yellow status - val request = ClusterHealthRequest() - .indices(SCHEDULED_JOBS_INDEX) - .waitForYellowStatus() - val response: ClusterHealthResponse = client.suspendUntil { - execute(ClusterHealthAction.INSTANCE, request, it) - } - if (response.isTimedOut) { - log.error("Workflow creation timeout", t) - actionListener.onFailure( - OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") - ) - } - // Retry mapping of workflow - onCreateMappingsResponse(true) - } - } else { - log.error("Failed to create workflow", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - }) - } else if (!IndexUtils.scheduledJobIndexUpdated) { - IndexUtils.updateIndexMapping( - SCHEDULED_JOBS_INDEX, - ScheduledJobIndices.scheduledJobMappings(), clusterService.state(), client.admin().indices(), - object : ActionListener { - override fun onResponse(response: AcknowledgedResponse) { - onUpdateMappingsResponse(response) - } - - override fun onFailure(t: Exception) { - log.error("Failed to create workflow", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } else { - prepareWorkflowIndexing() - } - } - - /** - * This function prepares for indexing a new workflow. - * If this is an update request we can simply update the workflow. Otherwise we first check to see how many monitors already exist, - * and compare this to the [maxMonitorCount]. Requests that breach this threshold will be rejected. - */ - private fun prepareWorkflowIndexing() { - if (request.method == RestRequest.Method.PUT) { - scope.launch { - updateWorkflow() - } - } else { - scope.launch { - indexWorkflow() - } - } - } - - private fun onCreateMappingsResponse(isAcknowledged: Boolean) { - if (isAcknowledged) { - log.info("Created $SCHEDULED_JOBS_INDEX with mappings.") - prepareWorkflowIndexing() - IndexUtils.scheduledJobIndexUpdated() - } else { - log.error("Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Create $SCHEDULED_JOBS_INDEX mappings call not acknowledged", - RestStatus.INTERNAL_SERVER_ERROR - ) - ) - ) - } - } - - private fun onUpdateMappingsResponse(response: AcknowledgedResponse) { - if (response.isAcknowledged) { - log.info("Updated $SCHEDULED_JOBS_INDEX with mappings.") - IndexUtils.scheduledJobIndexUpdated() - prepareWorkflowIndexing() - } else { - log.error("Update $SCHEDULED_JOBS_INDEX mappings call not acknowledged.") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Updated $SCHEDULED_JOBS_INDEX mappings call not acknowledged.", - RestStatus.INTERNAL_SERVER_ERROR - ) - ) - ) - } - } - - private suspend fun indexWorkflow() { - if (user != null) { - val rbacRoles = if (request.rbacRoles == null) user.backendRoles.toSet() - else if (!isAdmin(user)) request.rbacRoles?.intersect(user.backendRoles)?.toSet() - else request.rbacRoles - - request.workflow = request.workflow.copy( - user = User(user.name, rbacRoles.orEmpty().toList(), user.roles, user.customAttNames) - ) - log.debug("Created workflow's backend roles: $rbacRoles") - } - - val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(request.refreshPolicy) - .source( - request.workflow.toXContentWithUser( - jsonBuilder(), - ToXContent.MapParams(mapOf("with_type" to "true")) - ) - ) - .setIfSeqNo(request.seqNo) - .setIfPrimaryTerm(request.primaryTerm) - .timeout(indexTimeout) - - try { - val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } - val failureReasons = checkShardsFailure(indexResponse) - if (failureReasons != null) { - log.error("Failed to create workflow: $failureReasons") - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - failureReasons.toString(), - indexResponse.status() - ) - ) - ) - return - } - - val createdWorkflow = request.workflow.copy(id = indexResponse.id) - val executionId = CompositeWorkflowRunner.generateExecutionId(false, createdWorkflow) - - val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( - workflow = createdWorkflow, - skipIndex = false, - executionId = executionId - ) - - val delegates = (createdWorkflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } - val monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) - - for (monitor in monitors) { - var (monitorMetadata, created) = MonitorMetadataService.getOrCreateMetadata( - monitor = monitor, - createWithRunContext = true, - workflowMetadataId = workflowMetadata.id - ) - - if (created == false) { - log.warn("Metadata doc id:${monitorMetadata.id} exists, but it shouldn't!") - } - - if (monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - val oldMonitorMetadata = MonitorMetadataService.getMetadata(monitor) - monitorMetadata = monitorMetadata.copy(sourceToQueryIndexMapping = oldMonitorMetadata!!.sourceToQueryIndexMapping) - } - // When inserting queries in queryIndex we could update sourceToQueryIndexMapping - MonitorMetadataService.upsertMetadata(monitorMetadata, updating = true) - } - actionListener.onResponse( - IndexWorkflowResponse( - indexResponse.id, indexResponse.version, indexResponse.seqNo, - indexResponse.primaryTerm, request.workflow.copy(id = indexResponse.id) - ) - ) - } catch (t: Exception) { - log.error("Failed to index workflow", t) - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun updateWorkflow() { - val getRequest = GetRequest(SCHEDULED_JOBS_INDEX, request.workflowId) - try { - val getResponse: GetResponse = client.suspendUntil { client.get(getRequest, it) } - if (!getResponse.isExists) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "Workflow with ${request.workflowId} is not found", - RestStatus.NOT_FOUND - ) - ) - ) - return - } - val xcp = XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - getResponse.sourceAsBytesRef, XContentType.JSON - ) - val workflow = ScheduledJob.parse(xcp, getResponse.id, getResponse.version) as Workflow - onGetResponse(workflow) - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private suspend fun onGetResponse(currentWorkflow: Workflow) { - if (!checkUserPermissionsWithResource( - user, - currentWorkflow.user, - actionListener, - "workfklow", - request.workflowId - ) - ) { - return - } - - // If both are enabled, use the current existing monitor enabled time, otherwise the next execution will be - // incorrect. - if (request.workflow.enabled && currentWorkflow.enabled) - request.workflow = request.workflow.copy(enabledTime = currentWorkflow.enabledTime) - - /** - * On update workflow check which backend roles to associate to the workflow. - * Below are 2 examples of how the logic works - * - * Example 1, say we have a Workflow with backend roles [a, b, c, d] associated with it. - * If I'm User A (non-admin user) and I have backend roles [a, b, c] associated with me and I make a request to update - * the Workflow's backend roles to [a, b]. This would mean that the roles to remove are [c] and the roles to add are [a, b]. - * The Workflow's backend roles would then be [a, b, d]. - * - * Example 2, say we have a Workflow with backend roles [a, b, c, d] associated with it. - * If I'm User A (admin user) and I have backend roles [a, b, c] associated with me and I make a request to update - * the Workflow's backend roles to [a, b]. This would mean that the roles to remove are [c, d] and the roles to add are [a, b]. - * The Workflow's backend roles would then be [a, b]. - */ - if (user != null) { - if (request.rbacRoles != null) { - if (isAdmin(user)) { - request.workflow = request.workflow.copy( - user = User(user.name, request.rbacRoles, user.roles, user.customAttNames) - ) - } else { - // rolesToRemove: these are the backend roles to remove from the monitor - val rolesToRemove = user.backendRoles - request.rbacRoles.orEmpty() - // remove the monitor's roles with rolesToRemove and add any roles passed into the request.rbacRoles - val updatedRbac = - currentWorkflow.user?.backendRoles.orEmpty() - rolesToRemove + request.rbacRoles.orEmpty() - request.workflow = request.workflow.copy( - user = User(user.name, updatedRbac, user.roles, user.customAttNames) - ) - } - } else { - request.workflow = request.workflow - .copy( - user = User( - user.name, - currentWorkflow.user!!.backendRoles, - user.roles, - user.customAttNames - ) - ) - } - log.debug("Update workflow backend roles to: ${request.workflow.user?.backendRoles}") - } - - request.workflow = request.workflow.copy(schemaVersion = IndexUtils.scheduledJobIndexSchemaVersion) - val indexRequest = IndexRequest(SCHEDULED_JOBS_INDEX) - .setRefreshPolicy(request.refreshPolicy) - .source( - request.workflow.toXContentWithUser( - jsonBuilder(), - ToXContent.MapParams(mapOf("with_type" to "true")) - ) - ) - .id(request.workflowId) - .setIfSeqNo(request.seqNo) - .setIfPrimaryTerm(request.primaryTerm) - .timeout(indexTimeout) - - try { - val indexResponse: IndexResponse = client.suspendUntil { client.index(indexRequest, it) } - val failureReasons = checkShardsFailure(indexResponse) - if (failureReasons != null) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - failureReasons.toString(), - indexResponse.status() - ) - ) - ) - return - } - - val updatedWorkflow = request.workflow.copy(id = indexResponse.id) - val executionId = CompositeWorkflowRunner.generateExecutionId(false, updatedWorkflow) - - val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( - workflow = updatedWorkflow, - skipIndex = false, - executionId = executionId - ) - - val delegates = (updatedWorkflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } - val monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) - - for (monitor in monitors) { - val (monitorMetadata, created) = MonitorMetadataService.getOrCreateMetadata( - monitor = monitor, - createWithRunContext = true, - workflowMetadataId = workflowMetadata.id - ) - - if (created == false && monitor.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR) { - var updatedMetadata = MonitorMetadataService.recreateRunContext(monitorMetadata, monitor) - val oldMonitorMetadata = MonitorMetadataService.getMetadata(monitor) - updatedMetadata = updatedMetadata.copy(sourceToQueryIndexMapping = oldMonitorMetadata!!.sourceToQueryIndexMapping) - MonitorMetadataService.upsertMetadata(updatedMetadata, updating = true) - } - } - actionListener.onResponse( - IndexWorkflowResponse( - indexResponse.id, indexResponse.version, indexResponse.seqNo, - indexResponse.primaryTerm, request.workflow.copy(id = currentWorkflow.id) - ) - ) - } catch (t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - - private fun checkShardsFailure(response: IndexResponse): String? { - val failureReasons = StringBuilder() - if (response.shardInfo.failed > 0) { - response.shardInfo.failures.forEach { entry -> - failureReasons.append(entry.reason()) - } - return failureReasons.toString() - } - return null - } - } - - private fun validateChainedMonitorFindingsMonitors(delegates: List, monitorDelegates: List) { - infix fun List.equalsIgnoreOrder(other: List) = - this.size == other.size && this.toSet() == other.toSet() - - val monitorsById = monitorDelegates.associateBy { it.id } - delegates.forEach { - - val delegateMonitor = monitorsById[it.monitorId] ?: throw AlertingException.wrap( - IllegalArgumentException("Delegate monitor ${it.monitorId} doesn't exist") - ) - if (it.chainedMonitorFindings != null) { - val chainedMonitorIds: MutableList = mutableListOf() - if (it.chainedMonitorFindings!!.monitorId.isNullOrBlank()) { - chainedMonitorIds.addAll(it.chainedMonitorFindings!!.monitorIds) - } else { - chainedMonitorIds.add(it.chainedMonitorFindings!!.monitorId!!) - } - chainedMonitorIds.forEach { chainedMonitorId -> - val chainedFindingMonitor = - monitorsById[chainedMonitorId] ?: throw AlertingException.wrap( - IllegalArgumentException("Chained finding monitor $chainedMonitorId doesn't exist") - ) - - if (chainedFindingMonitor.isQueryLevelMonitor()) { - throw AlertingException.wrap(IllegalArgumentException("Query level monitor can't be part of chained findings")) - } - - val delegateMonitorIndices = getMonitorIndices(delegateMonitor) - - val chainedMonitorIndices = getMonitorIndices(chainedFindingMonitor) - - if (!delegateMonitorIndices.containsAll(chainedMonitorIndices)) { - throw AlertingException.wrap( - IllegalArgumentException( - "Delegate monitor indices ${delegateMonitorIndices.joinToString()} " + - "doesn't query all of chained findings monitor's indices ${chainedMonitorIndices.joinToString()}}" - ) - ) - } - } - } - } - } - - /** - * Returns list of indices for the given monitor depending on it's type - */ - private fun getMonitorIndices(monitor: Monitor): List { - return when (monitor.monitorType) { - Monitor.MonitorType.DOC_LEVEL_MONITOR -> (monitor.inputs[0] as DocLevelMonitorInput).indices - Monitor.MonitorType.BUCKET_LEVEL_MONITOR -> monitor.inputs.flatMap { s -> (s as SearchInput).indices } - Monitor.MonitorType.QUERY_LEVEL_MONITOR -> { - if (isADMonitor(monitor)) monitor.inputs.flatMap { s -> (s as SearchInput).indices } - else { - val indices = mutableListOf() - for (input in monitor.inputs) { - when (input) { - is SearchInput -> indices.addAll(input.indices) - else -> indices - } - } - indices - } - } - - else -> emptyList() - } - } - - private fun validateDelegateMonitorsExist( - monitorIds: List, - delegateMonitors: List, - ) { - val reqMonitorIds: MutableList = monitorIds as MutableList - delegateMonitors.forEach { - reqMonitorIds.remove(it.id) - } - if (reqMonitorIds.isNotEmpty()) { - throw AlertingException.wrap(IllegalArgumentException(("${reqMonitorIds.joinToString()} are not valid monitor ids"))) - } - } - - /** - * Validates monitor and indices access - * 1. Validates the monitor access (if the filterByEnabled is set to true - adds backend role filter) as admin - * 2. Unstashes the context and checks if the user can access the monitor indices - */ - private suspend fun validateMonitorAccess( - request: IndexWorkflowRequest, - user: User?, - client: Client, - actionListener: ActionListener, - ) { - val compositeInput = request.workflow.inputs[0] as CompositeInput - val monitorIds = compositeInput.sequence.delegates.stream().map { it.monitorId }.collect(Collectors.toList()) - val query = QueryBuilders.boolQuery().filter(QueryBuilders.termsQuery("_id", monitorIds)) - val searchSource = SearchSourceBuilder().query(query) - val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX).source(searchSource) - - if (user != null && !isAdmin(user) && filterByEnabled) { - addFilter(user, searchRequest.source(), "monitor.user.backend_roles.keyword") - } - - val searchMonitorResponse: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - - if (searchMonitorResponse.isTimedOut) { - throw OpenSearchException("Cannot determine that the $SCHEDULED_JOBS_INDEX index is healthy") - } - val monitors = mutableListOf() - for (hit in searchMonitorResponse.hits) { - XContentType.JSON.xContent().createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, hit.sourceAsString - ).use { hitsParser -> - val monitor = ScheduledJob.parse(hitsParser, hit.id, hit.version) as Monitor - monitors.add(monitor) - } - } - if (monitors.isEmpty()) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "User doesn't have read permissions for one or more configured monitors ${monitorIds.joinToString()}", - RestStatus.FORBIDDEN - ) - ) - ) - return - } - // Validate delegates and it's chained findings - try { - validateDelegateMonitorsExist(monitorIds, monitors) - validateChainedMonitorFindingsMonitors(compositeInput.sequence.delegates, monitors) - } catch (e: Exception) { - actionListener.onFailure(e) - return - } - val indices = getMonitorIndices(monitors) - - val indicesSearchRequest = SearchRequest().indices(*indices.toTypedArray()) - .source(SearchSourceBuilder.searchSource().size(1).query(QueryBuilders.matchAllQuery())) - - if (user != null && filterByEnabled) { - // Unstash the context and check if user with specified roles has indices access - withClosableContext( - InjectorContextElement( - user.name.plus(UUID.randomUUID().toString()), - settings, - client.threadPool().threadContext, - user.roles, - user - ) - ) { - checkIndicesAccess(client, indicesSearchRequest, indices, actionListener) - } - } else { - checkIndicesAccess(client, indicesSearchRequest, indices, actionListener) - } - } - - /** - * Checks if the client can access the given indices - */ - private fun checkIndicesAccess( - client: Client, - indicesSearchRequest: SearchRequest?, - indices: MutableList, - actionListener: ActionListener, - ) { - client.search( - indicesSearchRequest, - object : ActionListener { - override fun onResponse(response: SearchResponse?) { - actionListener.onResponse(AcknowledgedResponse(true)) - } - - override fun onFailure(e: Exception) { - log.error("Error accessing the monitor indices", e) - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "User doesn't have read permissions for one or more configured index ${indices.joinToString()}", - RestStatus.FORBIDDEN - ) - ) - ) - } - } - ) - } - - /** - * Extract indices from monitors - */ - private fun getMonitorIndices(monitors: List): MutableList { - val indices = mutableListOf() - - val searchInputs = - monitors.flatMap { monitor -> - monitor.inputs.filter { - it.name() == SearchInput.SEARCH_FIELD || it.name() == DocLevelMonitorInput.DOC_LEVEL_INPUT_FIELD - } - } - searchInputs.forEach { - val inputIndices = if (it.name() == SearchInput.SEARCH_FIELD) (it as SearchInput).indices - else (it as DocLevelMonitorInput).indices - indices.addAll(inputIndices) - } - return indices - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt deleted file mode 100644 index 29ebbb90f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailAccountAction.kt +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.SearchEmailAccountAction -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -class TransportSearchEmailAccountAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val clusterService: ClusterService, - settings: Settings -) : HandledTransportAction( - SearchEmailAccountAction.NAME, - transportService, - actionFilters, - ::SearchRequest -) { - - @Volatile private var allowList = ALLOW_LIST.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } - } - - override fun doExecute(task: Task, searchRequest: SearchRequest, actionListener: ActionListener) { - if (!allowList.contains(DestinationType.EMAIL.value)) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", - RestStatus.FORBIDDEN - ) - ) - ) - return - } - - client.threadPool().threadContext.stashContext().use { - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(response: SearchResponse) { - actionListener.onResponse(response) - } - - override fun onFailure(e: Exception) { - actionListener.onFailure(e) - } - } - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt deleted file mode 100644 index c6fc84640..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchEmailGroupAction.kt +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.action.SearchEmailGroupAction -import org.opensearch.alerting.settings.DestinationSettings.Companion.ALLOW_LIST -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -class TransportSearchEmailGroupAction @Inject constructor( - transportService: TransportService, - val client: Client, - actionFilters: ActionFilters, - val clusterService: ClusterService, - settings: Settings -) : HandledTransportAction( - SearchEmailGroupAction.NAME, - transportService, - actionFilters, - ::SearchRequest -) { - - @Volatile private var allowList = ALLOW_LIST.get(settings) - - init { - clusterService.clusterSettings.addSettingsUpdateConsumer(ALLOW_LIST) { allowList = it } - } - - override fun doExecute(task: Task, searchRequest: SearchRequest, actionListener: ActionListener) { - if (!allowList.contains(DestinationType.EMAIL.value)) { - actionListener.onFailure( - AlertingException.wrap( - OpenSearchStatusException( - "This API is blocked since Destination type [${DestinationType.EMAIL}] is not allowed", - RestStatus.FORBIDDEN - ) - ) - ) - return - } - - client.threadPool().threadContext.stashContext().use { - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(response: SearchResponse) { - actionListener.onResponse(response) - } - - override fun onFailure(e: Exception) { - actionListener.onFailure(e) - } - } - ) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt b/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt deleted file mode 100644 index 7359d60ea..000000000 --- a/alerting/bin/main/org/opensearch/alerting/transport/TransportSearchMonitorAction.kt +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.HandledTransportAction -import org.opensearch.alerting.opensearchapi.addFilter -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.AlertingException -import org.opensearch.client.Client -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.SearchMonitorRequest -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.authuser.User -import org.opensearch.commons.utils.recreateObject -import org.opensearch.core.action.ActionListener -import org.opensearch.core.common.io.stream.NamedWriteableRegistry -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.ExistsQueryBuilder -import org.opensearch.index.query.MatchQueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.tasks.Task -import org.opensearch.transport.TransportService - -private val log = LogManager.getLogger(TransportSearchMonitorAction::class.java) - -class TransportSearchMonitorAction @Inject constructor( - transportService: TransportService, - val settings: Settings, - val client: Client, - clusterService: ClusterService, - actionFilters: ActionFilters, - val namedWriteableRegistry: NamedWriteableRegistry -) : HandledTransportAction( - AlertingActions.SEARCH_MONITORS_ACTION_NAME, transportService, actionFilters, ::SearchMonitorRequest -), - SecureTransportAction { - @Volatile - override var filterByEnabled: Boolean = AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings) - init { - listenFilterBySettingChange(clusterService) - } - - override fun doExecute(task: Task, request: ActionRequest, actionListener: ActionListener) { - val transformedRequest = request as? SearchMonitorRequest - ?: recreateObject(request, namedWriteableRegistry) { - SearchMonitorRequest(it) - } - - val searchSourceBuilder = transformedRequest.searchRequest.source() - .seqNoAndPrimaryTerm(true) - .version(true) - val queryBuilder = if (searchSourceBuilder.query() == null) BoolQueryBuilder() - else QueryBuilders.boolQuery().must(searchSourceBuilder.query()) - - // The SearchMonitor API supports one 'index' parameter of either the SCHEDULED_JOBS_INDEX or ALL_ALERT_INDEX_PATTERN. - // When querying the ALL_ALERT_INDEX_PATTERN, we don't want to check whether the MONITOR_TYPE field exists - // because we're querying alert indexes. - if (transformedRequest.searchRequest.indices().contains(ScheduledJob.SCHEDULED_JOBS_INDEX)) { - val monitorWorkflowType = QueryBuilders.boolQuery().should(QueryBuilders.existsQuery(Monitor.MONITOR_TYPE)) - .should(QueryBuilders.existsQuery(Workflow.WORKFLOW_TYPE)) - queryBuilder.must(monitorWorkflowType) - } - - searchSourceBuilder.query(queryBuilder) - .seqNoAndPrimaryTerm(true) - .version(true) - addOwnerFieldIfNotExists(transformedRequest.searchRequest) - val user = readUserFromThreadContext(client) - client.threadPool().threadContext.stashContext().use { - resolve(transformedRequest, actionListener, user) - } - } - - fun resolve(searchMonitorRequest: SearchMonitorRequest, actionListener: ActionListener, user: User?) { - if (user == null) { - // user header is null when: 1/ security is disabled. 2/ when user is super-admin. - search(searchMonitorRequest.searchRequest, actionListener) - } else if (!doFilterForUser(user)) { - // security is enabled and filterby is disabled. - search(searchMonitorRequest.searchRequest, actionListener) - } else { - // security is enabled and filterby is enabled. - log.info("Filtering result by: ${user.backendRoles}") - addFilter(user, searchMonitorRequest.searchRequest.source(), "monitor.user.backend_roles.keyword") - search(searchMonitorRequest.searchRequest, actionListener) - } - } - - fun search(searchRequest: SearchRequest, actionListener: ActionListener) { - client.search( - searchRequest, - object : ActionListener { - override fun onResponse(response: SearchResponse) { - actionListener.onResponse(response) - } - - override fun onFailure(t: Exception) { - actionListener.onFailure(AlertingException.wrap(t)) - } - } - ) - } - - private fun addOwnerFieldIfNotExists(searchRequest: SearchRequest) { - if (searchRequest.source().query() == null || searchRequest.source().query().toString().contains("monitor.owner") == false) { - var boolQueryBuilder: BoolQueryBuilder = if (searchRequest.source().query() == null) BoolQueryBuilder() - else QueryBuilders.boolQuery().must(searchRequest.source().query()) - val bqb = BoolQueryBuilder() - bqb.should().add(BoolQueryBuilder().mustNot(ExistsQueryBuilder("monitor.owner"))) - bqb.should().add(BoolQueryBuilder().must(MatchQueryBuilder("monitor.owner", "alerting"))) - boolQueryBuilder.filter(bqb) - searchRequest.source().query(boolQueryBuilder) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt deleted file mode 100644 index c0e215000..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/ExpressionParser.kt +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.parsers - -import org.opensearch.alerting.triggercondition.resolvers.TriggerExpressionResolver - -interface ExpressionParser { - fun parse(): TriggerExpressionResolver -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt deleted file mode 100644 index 835e9b383..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionParser.kt +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.parsers - -import org.opensearch.alerting.triggercondition.resolvers.TriggerExpressionRPNResolver -import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator - -/** - * The postfix (Reverse Polish Notation) parser. - * Uses the Shunting-yard algorithm to parse a mathematical expression - * @param triggerExpression String containing the trigger expression for the monitor - */ -class TriggerExpressionParser( - triggerExpression: String -) : TriggerExpressionRPNBaseParser(triggerExpression) { - - override fun parse(): TriggerExpressionRPNResolver { - val expression = expressionToParse.replace(" ", "") - - val splitters = ArrayList() - TriggerExpressionOperator.values().forEach { splitters.add(it.value) } - - val breaks = ArrayList().apply { add(expression) } - for (s in splitters) { - val a = ArrayList() - for (ind in 0 until breaks.size) { - breaks[ind].let { - if (it.length > 1) { - a.addAll(breakString(breaks[ind], s)) - } else a.add(it) - } - } - breaks.clear() - breaks.addAll(a) - } - - return TriggerExpressionRPNResolver(convertInfixToPostfix(breaks)) - } - - private fun breakString(input: String, delimeter: String): ArrayList { - val tokens = input.split(delimeter) - val array = ArrayList() - for (t in tokens) { - array.add(t) - array.add(delimeter) - } - array.removeAt(array.size - 1) - return array - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt deleted file mode 100644 index 53cd5f0ac..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/parsers/TriggerExpressionRPNBaseParser.kt +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.parsers - -import org.opensearch.alerting.triggercondition.tokens.ExpressionToken -import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionConstant -import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator -import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionToken -import java.util.Stack - -/** - * This is the abstract base class which holds the trigger expression parsing logic; - * using the Infix to Postfix a.k.a. Reverse Polish Notation (RPN) parser. - * It also uses the Shunting-Yard algorithm to parse the given trigger expression. - * - * @param expressionToParse Complete string containing the trigger expression - */ -abstract class TriggerExpressionRPNBaseParser( - protected val expressionToParse: String -) : ExpressionParser { - /** - * To perform the Infix-to-postfix conversion of the trigger expression - */ - protected fun convertInfixToPostfix(expTokens: List): ArrayList { - val expTokenStack = Stack() - val outputExpTokens = ArrayList() - - for (tokenString in expTokens) { - if (tokenString.isEmpty()) continue - when (val expToken = assignToken(tokenString)) { - is TriggerExpressionToken -> outputExpTokens.add(expToken) - is TriggerExpressionOperator -> { - when (expToken) { - TriggerExpressionOperator.PAR_LEFT -> expTokenStack.push(expToken) - TriggerExpressionOperator.PAR_RIGHT -> { - var topExpToken = expTokenStack.popExpTokenOrNull() - while (topExpToken != null && topExpToken != TriggerExpressionOperator.PAR_LEFT) { - outputExpTokens.add(topExpToken) - topExpToken = expTokenStack.popExpTokenOrNull() - } - if (topExpToken != TriggerExpressionOperator.PAR_LEFT) { - throw java.lang.IllegalArgumentException("No matching left parenthesis.") - } - } - else -> { - var op2 = expTokenStack.peekExpTokenOrNull() - while (op2 != null) { - val c = expToken.precedence.compareTo(op2.precedence) - if (c < 0 || !expToken.rightAssociative && c <= 0) { - outputExpTokens.add(expTokenStack.pop()) - } else { - break - } - op2 = expTokenStack.peekExpTokenOrNull() - } - expTokenStack.push(expToken) - } - } - } - } - } - - while (!expTokenStack.isEmpty()) { - expTokenStack.peekExpTokenOrNull()?.let { - if (it == TriggerExpressionOperator.PAR_LEFT) { - throw java.lang.IllegalArgumentException("No matching right parenthesis.") - } - } - val top = expTokenStack.pop() - outputExpTokens.add(top) - } - - return outputExpTokens - } - - /** - * Looks up and maps the expression token that matches the string version of that expression unit - */ - private fun assignToken(tokenString: String): ExpressionToken { - // Check "query" string in trigger expression such as in 'query[name="abc"]' - if (tokenString.startsWith(TriggerExpressionConstant.ConstantType.QUERY.ident)) { - return TriggerExpressionToken(tokenString) - } - - // Check operators in trigger expression such as in [&&, ||, !] - for (op in TriggerExpressionOperator.values()) { - if (op.value == tokenString) return op - } - - // Check any constants in trigger expression such as in ["name, "id", "tag", [", "]", "="] - for (con in TriggerExpressionConstant.ConstantType.values()) { - if (tokenString == con.ident) return TriggerExpressionConstant(con) - } - - throw IllegalArgumentException("Error while processing the trigger expression '$tokenString'") - } - - private inline fun Stack.popExpTokenOrNull(): T? { - return try { - pop() as T - } catch (e: java.lang.Exception) { - null - } - } - - private inline fun Stack.peekExpTokenOrNull(): T? { - return try { - peek() as T - } catch (e: java.lang.Exception) { - null - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt deleted file mode 100644 index 2a3e6c1ff..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpression.kt +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.resolvers - -sealed class TriggerExpression { - - fun resolve(): Set = when (this) { - is And -> resolveAnd(docSet1, docSet2) - is Or -> resolveOr(docSet1, docSet2) - is Not -> resolveNot(allDocs, docSet2) - } - - private fun resolveAnd(documentSet1: Set, documentSet2: Set): Set { - return documentSet1.intersect(documentSet2) - } - - private fun resolveOr(documentSet1: Set, documentSet2: Set): Set { - return documentSet1.union(documentSet2) - } - - private fun resolveNot(allDocs: Set, documentSet2: Set): Set { - return allDocs.subtract(documentSet2) - } - - // Operators implemented as operator functions - class And(val docSet1: Set, val docSet2: Set) : TriggerExpression() - class Or(val docSet1: Set, val docSet2: Set) : TriggerExpression() - class Not(val allDocs: Set, val docSet2: Set) : TriggerExpression() -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt deleted file mode 100644 index 45937c8ab..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionRPNResolver.kt +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.resolvers - -import org.opensearch.alerting.triggercondition.tokens.ExpressionToken -import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionConstant -import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionOperator -import org.opensearch.alerting.triggercondition.tokens.TriggerExpressionToken -import org.opensearch.commons.alerting.model.DocLevelQuery -import java.util.Optional -import java.util.Stack - -/** - * Solves the Trigger Expression using the Reverse Polish Notation (RPN) based solver - * @param polishNotation an array of expression tokens organized in the RPN order - */ -class TriggerExpressionRPNResolver( - private val polishNotation: ArrayList -) : TriggerExpressionResolver { - - private val eqString by lazy { - val stringBuilder = StringBuilder() - for (expToken in polishNotation) { - when (expToken) { - is TriggerExpressionToken -> stringBuilder.append(expToken.value) - is TriggerExpressionOperator -> stringBuilder.append(expToken.value) - is TriggerExpressionConstant -> stringBuilder.append(expToken.type.ident) - else -> throw Exception() - } - stringBuilder.append(" ") - } - stringBuilder.toString() - } - - override fun toString(): String = eqString - - /** - * Evaluates the trigger expression expressed provided in form of the RPN token array. - * @param queryToDocIds Map to hold the resultant document id per query id - * @return evaluates the final set of document id - */ - override fun evaluate(queryToDocIds: Map>): Set { - val tokenStack = Stack>() - - val allDocIds = mutableSetOf() - for (value in queryToDocIds.values) { - allDocIds.addAll(value) - } - - for (expToken in polishNotation) { - when (expToken) { - is TriggerExpressionToken -> tokenStack.push(resolveQueryExpression(expToken.value, queryToDocIds)) - is TriggerExpressionOperator -> { - val right = tokenStack.pop() - val expr = when (expToken) { - TriggerExpressionOperator.AND -> TriggerExpression.And(tokenStack.pop(), right) - TriggerExpressionOperator.OR -> TriggerExpression.Or(tokenStack.pop(), right) - TriggerExpressionOperator.NOT -> TriggerExpression.Not(allDocIds, right) - else -> throw IllegalArgumentException("No matching operator.") - } - tokenStack.push(expr.resolve()) - } - } - } - return tokenStack.pop() - } - - private fun resolveQueryExpression(queryExpString: String, queryToDocIds: Map>): Set { - if (!queryExpString.startsWith(TriggerExpressionConstant.ConstantType.QUERY.ident)) return emptySet() - val token = queryExpString.substringAfter(TriggerExpressionConstant.ConstantType.BRACKET_LEFT.ident) - .substringBefore(TriggerExpressionConstant.ConstantType.BRACKET_RIGHT.ident) - if (token.isEmpty()) return emptySet() - - val tokens = token.split(TriggerExpressionConstant.ConstantType.EQUALS.ident) - if (tokens.isEmpty() || tokens.size != 2) return emptySet() - - val identifier = tokens[0] - val value = tokens[1] - val documents = mutableSetOf() - when (identifier) { - TriggerExpressionConstant.ConstantType.NAME.ident -> { - val key: Optional = queryToDocIds.keys.stream().filter { it.name == value }.findFirst() - if (key.isPresent) queryToDocIds[key.get()]?.let { doc -> documents.addAll(doc) } - } - - TriggerExpressionConstant.ConstantType.ID.ident -> { - val key: Optional = queryToDocIds.keys.stream().filter { it.id == value }.findFirst() - if (key.isPresent) queryToDocIds[key.get()]?.let { doc -> documents.addAll(doc) } - } - - // Iterate through all the queries with the same Tag - TriggerExpressionConstant.ConstantType.TAG.ident -> { - queryToDocIds.keys.stream().forEach { - if (it.tags.contains(value)) queryToDocIds[it]?.let { it1 -> documents.addAll(it1) } - } - } - } - return documents - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt deleted file mode 100644 index fea22c356..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/resolvers/TriggerExpressionResolver.kt +++ /dev/null @@ -1,12 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.resolvers - -import org.opensearch.commons.alerting.model.DocLevelQuery - -interface TriggerExpressionResolver { - fun evaluate(queryToDocIds: Map>): Set -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt deleted file mode 100644 index 2085bf2d3..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/ExpressionToken.kt +++ /dev/null @@ -1,8 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.tokens - -interface ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt deleted file mode 100644 index 80e662a21..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionConstant.kt +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.tokens - -/** - * To define all the tokens which could be part of expression constant such as query[id=new_id], query[name=new_name], - * query[tag=new_tag] - */ -class TriggerExpressionConstant(val type: ConstantType) : ExpressionToken { - - enum class ConstantType(val ident: String) { - QUERY("query"), - - TAG("tag"), - NAME("name"), - ID("id"), - - BRACKET_LEFT("["), - BRACKET_RIGHT("]"), - - EQUALS("=") - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt deleted file mode 100644 index de3c4a0df..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionOperator.kt +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.tokens - -/** - * To define all the operators used in the trigger expression - */ -enum class TriggerExpressionOperator(val value: String, val precedence: Int, val rightAssociative: Boolean) : ExpressionToken { - - AND("&&", 2, false), - OR("||", 2, false), - - NOT("!", 3, true), - - PAR_LEFT("(", 1, false), - PAR_RIGHT(")", 1, false) -} diff --git a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt b/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt deleted file mode 100644 index 808f7737d..000000000 --- a/alerting/bin/main/org/opensearch/alerting/triggercondition/tokens/TriggerExpressionToken.kt +++ /dev/null @@ -1,11 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggercondition.tokens - -/** - * To define the tokens in Trigger expression such as query[tag=“sev1"] or query[name=“sev1"] or query[id=“sev1"] - */ -internal data class TriggerExpressionToken(val value: String) : ExpressionToken diff --git a/alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt b/alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt deleted file mode 100644 index e1b6675b2..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/AggregationQueryRewriter.kt +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.TriggerAfterKey -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.Trigger -import org.opensearch.search.aggregations.AggregationBuilder -import org.opensearch.search.aggregations.AggregatorFactories -import org.opensearch.search.aggregations.bucket.SingleBucketAggregation -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregation -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder -import org.opensearch.search.aggregations.support.AggregationPath -import org.opensearch.search.builder.SearchSourceBuilder - -class AggregationQueryRewriter { - - companion object { - /** - * Add the bucket selector conditions for each trigger in input query. It also adds afterKeys from previous result - * for each trigger. - */ - fun rewriteQuery(query: SearchSourceBuilder, prevResult: InputRunResults?, triggers: List): SearchSourceBuilder { - triggers.forEach { trigger -> - if (trigger is BucketLevelTrigger) { - // add bucket selector pipeline aggregation for each trigger in query - query.aggregation(trigger.bucketSelector) - // if this request is processing the subsequent pages of input query result, then add after key - if (prevResult?.aggTriggersAfterKey?.get(trigger.id) != null) { - val parentBucketPath = AggregationPath.parse(trigger.bucketSelector.parentBucketPath) - var aggBuilders = (query.aggregations() as AggregatorFactories.Builder).aggregatorFactories - var factory: AggregationBuilder? = null - for (i in 0 until parentBucketPath.pathElements.size) { - factory = null - for (aggFactory in aggBuilders) { - if (aggFactory.name.equals(parentBucketPath.pathElements[i].name)) { - aggBuilders = aggFactory.subAggregations - factory = aggFactory - break - } - } - if (factory == null) { - throw IllegalArgumentException("ParentBucketPath: $parentBucketPath not found in input query results") - } - } - if (factory is CompositeAggregationBuilder) { - // if the afterKey from previous result is null, what does it signify? - // A) result set exhausted OR B) first page ? - val afterKey = prevResult.aggTriggersAfterKey[trigger.id]!!.afterKey - factory.aggregateAfter(afterKey) - } else { - throw IllegalStateException("AfterKeys are not expected to be present in non CompositeAggregationBuilder") - } - } - } - } - - return query - } - - /** - * For each trigger, returns the after keys if present in query result. - */ - fun getAfterKeysFromSearchResponse( - searchResponse: SearchResponse, - triggers: List, - prevBucketLevelTriggerAfterKeys: Map? - ): Map { - val bucketLevelTriggerAfterKeys = mutableMapOf() - triggers.forEach { trigger -> - if (trigger is BucketLevelTrigger) { - val parentBucketPath = AggregationPath.parse(trigger.bucketSelector.parentBucketPath) - var aggs = searchResponse.aggregations - // assuming all intermediate aggregations as SingleBucketAggregation - for (i in 0 until parentBucketPath.pathElements.size - 1) { - aggs = (aggs.asMap()[parentBucketPath.pathElements[i].name] as SingleBucketAggregation).aggregations - } - val lastAgg = aggs.asMap[parentBucketPath.pathElements.last().name] - // if leaf is CompositeAggregation, then fetch afterKey if present - if (lastAgg is CompositeAggregation) { - /* - * Bucket-Level Triggers can have different parent bucket paths that they are tracking for condition evaluation. - * These different bucket paths could have different page sizes, meaning one could be exhausted while another - * bucket path still has pages to iterate in the query responses. - * - * To ensure that these can be exhausted and tracked independently, the after key that led to the last page (which - * should be an empty result for the bucket path) will be saved when the last page is hit and will be continued - * to be passed on for that bucket path if there are still other bucket paths being paginated. - */ - val afterKey = lastAgg.afterKey() - val prevTriggerAfterKey = prevBucketLevelTriggerAfterKeys?.get(trigger.id) - bucketLevelTriggerAfterKeys[trigger.id] = when { - // If the previous TriggerAfterKey was null, this should be the first page - prevTriggerAfterKey == null -> TriggerAfterKey(afterKey, afterKey == null) - // If the previous TriggerAfterKey already hit the last page, pass along the after key it used to get there - prevTriggerAfterKey.lastPage -> prevTriggerAfterKey - // If the previous TriggerAfterKey had not reached the last page and the after key for the current result - // is null, then the last page has been reached so the after key that was used to get there is stored - afterKey == null -> TriggerAfterKey(prevTriggerAfterKey.afterKey, true) - // Otherwise, update the after key to the current one - else -> TriggerAfterKey(afterKey, false) - } - } - } - } - return bucketLevelTriggerAfterKeys - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt b/alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt deleted file mode 100644 index 4127afaa2..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/AlertingException.kt +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchException -import org.opensearch.OpenSearchSecurityException -import org.opensearch.OpenSearchStatusException -import org.opensearch.core.common.Strings -import org.opensearch.core.rest.RestStatus -import org.opensearch.index.IndexNotFoundException -import org.opensearch.index.engine.VersionConflictEngineException -import org.opensearch.indices.InvalidIndexNameException - -private val log = LogManager.getLogger(AlertingException::class.java) - -/** - * Converts into a user friendly message. - */ -class AlertingException(message: String, val status: RestStatus, ex: Exception) : OpenSearchException(message, ex) { - - override fun status(): RestStatus { - return status - } - - companion object { - @JvmStatic - fun wrap(ex: Exception): OpenSearchException { - log.error("Alerting error: $ex") - - var friendlyMsg = "Unknown error" - var status = RestStatus.INTERNAL_SERVER_ERROR - when (ex) { - is IndexNotFoundException -> { - status = ex.status() - friendlyMsg = "Configured indices are not found: ${ex.index}" - } - is OpenSearchSecurityException -> { - status = ex.status() - friendlyMsg = "User doesn't have permissions to execute this action. Contact administrator." - } - is OpenSearchStatusException -> { - status = ex.status() - friendlyMsg = ex.message as String - } - is IllegalArgumentException -> { - status = RestStatus.BAD_REQUEST - friendlyMsg = ex.message as String - } - is VersionConflictEngineException -> { - status = ex.status() - friendlyMsg = ex.message as String - } - is InvalidIndexNameException -> { - status = RestStatus.BAD_REQUEST - friendlyMsg = ex.message as String - } - else -> { - if (!Strings.isNullOrEmpty(ex.message)) { - friendlyMsg = ex.message as String - } - } - } - // Wrapping the origin exception as runtime to avoid it being formatted. - // Currently, alerting-kibana is using `error.root_cause.reason` as text in the toast message. - // Below logic is to set friendly message to error.root_cause.reason. - return AlertingException(friendlyMsg, status, Exception("${ex.javaClass.name}: ${ex.message}")) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt deleted file mode 100644 index 33911b216..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/AlertingUtils.kt +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.settings.DestinationSettings -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.model.AggregationResultBucket -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.action.Action -import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy -import org.opensearch.commons.alerting.model.action.ActionExecutionScope -import org.opensearch.commons.alerting.util.isBucketLevelMonitor - -private val logger = LogManager.getLogger("AlertingUtils") - -val MAX_SEARCH_SIZE = 10000 - -/** - * RFC 5322 compliant pattern matching: https://www.ietf.org/rfc/rfc5322.txt - * Regex was based off of this post: https://stackoverflow.com/a/201378 - */ -fun isValidEmail(email: String): Boolean { - val validEmailPattern = Regex( - "(?:[a-z0-9!#\$%&'*+\\/=?^_`{|}~-]+(?:\\.[a-z0-9!#\$%&'*+\\/=?^_`{|}~-]+)*" + - "|\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\")" + - "@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?" + - "|\\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\\.){3}" + - "(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:" + - "(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])", - RegexOption.IGNORE_CASE - ) - - return validEmailPattern.matches(email) -} - -fun getRoleFilterEnabled(clusterService: ClusterService, settings: Settings, settingPath: String): Boolean { - var adBackendRoleFilterEnabled: Boolean - val metaData = clusterService.state().metadata() - - // get default value for setting - if (clusterService.clusterSettings.get(settingPath) != null) { - adBackendRoleFilterEnabled = clusterService.clusterSettings.get(settingPath).getDefault(settings) as Boolean - } else { - // default setting doesn't exist, so returning false as it means AD plugins isn't in cluster anyway - return false - } - - // Transient settings are prioritized so those are checked first. - return if (metaData.transientSettings().get(settingPath) != null) { - metaData.transientSettings().getAsBoolean(settingPath, adBackendRoleFilterEnabled) - } else if (metaData.persistentSettings().get(settingPath) != null) { - metaData.persistentSettings().getAsBoolean(settingPath, adBackendRoleFilterEnabled) - } else { - adBackendRoleFilterEnabled - } -} - -/** Allowed Destinations are ones that are specified in the [DestinationSettings.ALLOW_LIST] setting. */ -fun Destination.isAllowed(allowList: List): Boolean = allowList.contains(this.type.value) - -fun Destination.isTestAction(): Boolean = this.type == DestinationType.TEST_ACTION - -fun Monitor.isDocLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.DOC_LEVEL_MONITOR - -fun Monitor.isQueryLevelMonitor(): Boolean = this.monitorType == Monitor.MonitorType.QUERY_LEVEL_MONITOR - -/** - * Since buckets can have multi-value keys, this converts the bucket key values to a string that can be used - * as the key for a HashMap to easily retrieve [AggregationResultBucket] based on the bucket key values. - */ -fun AggregationResultBucket.getBucketKeysHash(): String = this.bucketKeys.joinToString(separator = "#") - -fun Action.getActionExecutionPolicy(monitor: Monitor): ActionExecutionPolicy? { - // When the ActionExecutionPolicy is null for an Action, the default is resolved at runtime - // so it can be chosen based on the Monitor type at that time. - // The Action config is not aware of the Monitor type which is why the default was not stored during - // the parse. - return this.actionExecutionPolicy ?: if (monitor.isBucketLevelMonitor()) { - ActionExecutionPolicy.getDefaultConfigurationForBucketLevelMonitor() - } else if (monitor.isDocLevelMonitor()) { - ActionExecutionPolicy.getDefaultConfigurationForDocumentLevelMonitor() - } else { - null - } -} - -fun BucketLevelTriggerRunResult.getCombinedTriggerRunResult( - prevTriggerRunResult: BucketLevelTriggerRunResult? -): BucketLevelTriggerRunResult { - if (prevTriggerRunResult == null) return this - - // The aggregation results and action results across to two trigger run results should not have overlapping keys - // since they represent different pages of aggregations so a simple concatenation will combine them - val mergedAggregationResultBuckets = prevTriggerRunResult.aggregationResultBuckets + this.aggregationResultBuckets - val mergedActionResultsMap = (prevTriggerRunResult.actionResultsMap + this.actionResultsMap).toMutableMap() - - // Update to the most recent error if it's not null, otherwise keep the old one - val error = this.error ?: prevTriggerRunResult.error - - return this.copy(aggregationResultBuckets = mergedAggregationResultBuckets, actionResultsMap = mergedActionResultsMap, error = error) -} - -fun defaultToPerExecutionAction( - maxActionableAlertCount: Long, - monitorId: String, - triggerId: String, - totalActionableAlertCount: Int, - monitorOrTriggerError: Exception? -): Boolean { - // If the monitorId or triggerResult has an error, then also default to PER_EXECUTION to communicate the error - if (monitorOrTriggerError != null) { - logger.debug( - "Trigger [$triggerId] in monitor [$monitorId] encountered an error. Defaulting to " + - "[${ActionExecutionScope.Type.PER_EXECUTION}] for action execution to communicate error." - ) - return true - } - - // If the MAX_ACTIONABLE_ALERT_COUNT is set to -1, consider it unbounded and proceed regardless of actionable Alert count - if (maxActionableAlertCount < 0) return false - - // If the total number of Alerts to execute Actions on exceeds the MAX_ACTIONABLE_ALERT_COUNT setting then default to - // PER_EXECUTION for less intrusive Actions - if (totalActionableAlertCount > maxActionableAlertCount) { - logger.debug( - "The total actionable alerts for trigger [$triggerId] in monitor [$monitorId] is [$totalActionableAlertCount] " + - "which exceeds the maximum of [$maxActionableAlertCount]. " + - "Defaulting to [${ActionExecutionScope.Type.PER_EXECUTION}] for action execution." - ) - return true - } - - return false -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt deleted file mode 100644 index e83f45a15..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/AnomalyDetectionUtils.kt +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.apache.lucene.search.join.ScoreMode -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.authuser.User -import org.opensearch.core.common.Strings -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.NestedQueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder - -/** - * AD monitor is search input monitor on top of anomaly result index. This method will return - * true if monitor input only contains anomaly result index. - */ -fun isADMonitor(monitor: Monitor): Boolean { - // If monitor has other input than AD result index, it's not AD monitor - if (monitor.inputs.size != 1) { - return false - } - val input = monitor.inputs[0] - // AD monitor can only have 1 anomaly result index. - if (input is SearchInput && input.indices.size == 1 && input.indices[0] == ".opendistro-anomaly-results*") { - return true - } - return false -} - -fun addUserBackendRolesFilter(user: User?, searchSourceBuilder: SearchSourceBuilder): SearchSourceBuilder { - var boolQueryBuilder = BoolQueryBuilder() - val userFieldName = "user" - val userBackendRoleFieldName = "user.backend_roles.keyword" - if (user == null || Strings.isEmpty(user.name)) { - // For 1) old monitor and detector 2) security disabled or superadmin access, they have no/empty user field - val userRolesFilterQuery = QueryBuilders.existsQuery(userFieldName) - val nestedQueryBuilder = NestedQueryBuilder(userFieldName, userRolesFilterQuery, ScoreMode.None) - boolQueryBuilder.mustNot(nestedQueryBuilder) - } else if (user.backendRoles.isNullOrEmpty()) { - // For simple FGAC user, they may have no backend roles, these users should be able to see detectors - // of other users whose backend role is empty. - val userRolesFilterQuery = QueryBuilders.existsQuery(userBackendRoleFieldName) - val nestedQueryBuilder = NestedQueryBuilder(userFieldName, userRolesFilterQuery, ScoreMode.None) - - val userExistsQuery = QueryBuilders.existsQuery(userFieldName) - val userExistsNestedQueryBuilder = NestedQueryBuilder(userFieldName, userExistsQuery, ScoreMode.None) - - boolQueryBuilder.mustNot(nestedQueryBuilder) - boolQueryBuilder.must(userExistsNestedQueryBuilder) - } else { - // For normal case, user should have backend roles. - val userRolesFilterQuery = QueryBuilders.termsQuery(userBackendRoleFieldName, user.backendRoles) - val nestedQueryBuilder = NestedQueryBuilder(userFieldName, userRolesFilterQuery, ScoreMode.None) - boolQueryBuilder.must(nestedQueryBuilder) - } - val query = searchSourceBuilder.query() - if (query == null) { - searchSourceBuilder.query(boolQueryBuilder) - } else { - (query as BoolQueryBuilder).filter(boolQueryBuilder) - } - return searchSourceBuilder -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt b/alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt deleted file mode 100644 index d714288f8..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/DestinationType.kt +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -enum class DestinationType(val value: String) { - CHIME("chime"), - SLACK("slack"), - CUSTOM_WEBHOOK("custom_webhook"), - EMAIL("email"), - TEST_ACTION("test_action"); - - override fun toString(): String { - return value - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt b/alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt deleted file mode 100644 index 0a98a139d..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/DocLevelMonitorQueries.kt +++ /dev/null @@ -1,608 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.apache.logging.log4j.LogManager -import org.opensearch.ExceptionsHelper -import org.opensearch.OpenSearchStatusException -import org.opensearch.ResourceAlreadyExistsException -import org.opensearch.action.admin.indices.alias.Alias -import org.opensearch.action.admin.indices.create.CreateIndexRequest -import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest -import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest -import org.opensearch.action.admin.indices.rollover.RolloverRequest -import org.opensearch.action.admin.indices.rollover.RolloverResponse -import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest -import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse -import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.support.WriteRequest.RefreshPolicy -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.MonitorRunnerService.monitorCtx -import org.opensearch.alerting.model.MonitorMetadata -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.client.Client -import org.opensearch.cluster.ClusterState -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.rest.RestStatus -import org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING - -private val log = LogManager.getLogger(DocLevelMonitorQueries::class.java) - -class DocLevelMonitorQueries(private val client: Client, private val clusterService: ClusterService) { - companion object { - - const val PROPERTIES = "properties" - const val NESTED = "nested" - const val TYPE = "type" - const val INDEX_PATTERN_SUFFIX = "-000001" - const val QUERY_INDEX_BASE_FIELDS_COUNT = 8 // 3 fields we defined and 5 builtin additional metadata fields - @JvmStatic - fun docLevelQueriesMappings(): String { - return DocLevelMonitorQueries::class.java.classLoader.getResource("mappings/doc-level-queries.json").readText() - } - fun docLevelQueriesSettings(): Settings { - return Settings.builder().loadFromSource( - DocLevelMonitorQueries::class.java.classLoader.getResource("settings/doc-level-queries.json").readText(), - XContentType.JSON - ).build() - } - } - - suspend fun initDocLevelQueryIndex(): Boolean { - if (!docLevelQueryIndexExists()) { - // Since we changed queryIndex to be alias now, for backwards compatibility, we have to delete index with same name - // as our alias, to avoid name clash. - if (clusterService.state().metadata.hasIndex(ScheduledJob.DOC_LEVEL_QUERIES_INDEX)) { - val acknowledgedResponse: AcknowledgedResponse = client.suspendUntil { - admin().indices().delete(DeleteIndexRequest(ScheduledJob.DOC_LEVEL_QUERIES_INDEX), it) - } - if (!acknowledgedResponse.isAcknowledged) { - val errorMessage = "Deletion of old queryIndex [${ScheduledJob.DOC_LEVEL_QUERIES_INDEX}] index is not acknowledged!" - log.error(errorMessage) - throw AlertingException.wrap(OpenSearchStatusException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR)) - } - } - val alias = ScheduledJob.DOC_LEVEL_QUERIES_INDEX - val indexPattern = ScheduledJob.DOC_LEVEL_QUERIES_INDEX + INDEX_PATTERN_SUFFIX - val indexRequest = CreateIndexRequest(indexPattern) - .mapping(docLevelQueriesMappings()) - .alias(Alias(alias)) - .settings(docLevelQueriesSettings()) - return try { - val createIndexResponse: CreateIndexResponse = client.suspendUntil { client.admin().indices().create(indexRequest, it) } - createIndexResponse.isAcknowledged - } catch (t: Exception) { - if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { - true - } else { - throw t - } - } - } - return true - } - suspend fun initDocLevelQueryIndex(dataSources: DataSources): Boolean { - if (dataSources.queryIndex == ScheduledJob.DOC_LEVEL_QUERIES_INDEX) { - return initDocLevelQueryIndex() - } - // Since we changed queryIndex to be alias now, for backwards compatibility, we have to delete index with same name - // as our alias, to avoid name clash. - if (clusterService.state().metadata.hasIndex(dataSources.queryIndex)) { - val acknowledgedResponse: AcknowledgedResponse = client.suspendUntil { - admin().indices().delete(DeleteIndexRequest(dataSources.queryIndex), it) - } - if (!acknowledgedResponse.isAcknowledged) { - log.warn("Deletion of old queryIndex [${dataSources.queryIndex}] index is not acknowledged!") - } - } - val alias = dataSources.queryIndex - val indexPattern = dataSources.queryIndex + INDEX_PATTERN_SUFFIX - if (!clusterService.state().metadata.hasAlias(alias)) { - val indexRequest = CreateIndexRequest(indexPattern) - .mapping(docLevelQueriesMappings()) - .alias(Alias(alias)) - .settings( - Settings.builder().put("index.hidden", true) - .build() - ) - return try { - val createIndexResponse: CreateIndexResponse = client.suspendUntil { client.admin().indices().create(indexRequest, it) } - createIndexResponse.isAcknowledged - } catch (t: Exception) { - if (ExceptionsHelper.unwrapCause(t) is ResourceAlreadyExistsException) { - true - } else { - throw t - } - } - } - return true - } - - fun docLevelQueryIndexExists(dataSources: DataSources): Boolean { - val clusterState = clusterService.state() - return clusterState.metadata.hasAlias(dataSources.queryIndex) - } - - fun docLevelQueryIndexExists(): Boolean { - val clusterState = clusterService.state() - return clusterState.metadata.hasAlias(ScheduledJob.DOC_LEVEL_QUERIES_INDEX) - } - - /** - * Does a DFS traversal of index mappings tree. - * Calls processLeafFn on every leaf node. - * Populates flattenPaths list with full paths of leaf nodes - * @param node current node which we're visiting - * @param currentPath current node path from root node - * @param processLeafFn leaf processor function which is called on every leaf discovered - * @param flattenPaths list of full paths of all leaf nodes relative to root - */ - fun traverseMappingsAndUpdate( - node: MutableMap, - currentPath: String, - processLeafFn: (String, String, MutableMap) -> Triple>, - flattenPaths: MutableMap> - ) { - // If node contains "properties" property then it is internal(non-leaf) node - log.debug("Node in traverse: $node") - // newNodes will hold list of updated leaf properties - var newNodes = ArrayList>(node.size) - node.entries.forEach { - // Compute full path relative to root - val fullPath = if (currentPath.isEmpty()) it.key - else "$currentPath.${it.key}" - val nodeProps = it.value as MutableMap - // If it has type property and type is not "nested" then this is a leaf - if (nodeProps.containsKey(TYPE) && nodeProps[TYPE] != NESTED) { - // At this point we know full path of node, so we add it to output array - flattenPaths.put(fullPath, nodeProps) - // Calls processLeafFn and gets old node name, new node name and new properties of node. - // This is all information we need to update this node - val (oldName, newName, props) = processLeafFn(it.key, fullPath, it.value as MutableMap) - newNodes.add(Triple(oldName, newName, props)) - } else { - // Internal(non-leaf) node - visit children - traverseMappingsAndUpdate(nodeProps[PROPERTIES] as MutableMap, fullPath, processLeafFn, flattenPaths) - } - } - // Here we can update all processed leaves in tree - newNodes.forEach { - // If we renamed leaf, we have to remove it first - if (it.first != it.second) { - node.remove(it.first) - } - // Put new properties of leaf - node.put(it.second, it.third) - } - } - - suspend fun indexDocLevelQueries( - monitor: Monitor, - monitorId: String, - monitorMetadata: MonitorMetadata, - refreshPolicy: RefreshPolicy = RefreshPolicy.IMMEDIATE, - indexTimeout: TimeValue - ) { - val docLevelMonitorInput = monitor.inputs[0] as DocLevelMonitorInput - val queries: List = docLevelMonitorInput.queries - - val indices = docLevelMonitorInput.indices - val clusterState = clusterService.state() - - // Run through each backing index and apply appropriate mappings to query index - indices.forEach { indexName -> - var concreteIndices = IndexUtils.resolveAllIndices( - listOf(indexName), - monitorCtx.clusterService!!, - monitorCtx.indexNameExpressionResolver!! - ) - if (IndexUtils.isAlias(indexName, monitorCtx.clusterService!!.state()) || - IndexUtils.isDataStream(indexName, monitorCtx.clusterService!!.state()) - ) { - val lastWriteIndex = concreteIndices.find { monitorMetadata.lastRunContext.containsKey(it) } - if (lastWriteIndex != null) { - val lastWriteIndexCreationDate = - IndexUtils.getCreationDateForIndex(lastWriteIndex, monitorCtx.clusterService!!.state()) - concreteIndices = IndexUtils.getNewestIndicesByCreationDate( - concreteIndices, - monitorCtx.clusterService!!.state(), - lastWriteIndexCreationDate - ) - } - } - val updatedIndexName = indexName.replace("*", "_") - val updatedProperties = mutableMapOf() - val allFlattenPaths = mutableSetOf>() - var sourceIndexFieldLimit = 0L - val conflictingFields = getAllConflictingFields(clusterState, concreteIndices) - - concreteIndices.forEach { concreteIndexName -> - if (clusterState.routingTable.hasIndex(concreteIndexName)) { - val indexMetadata = clusterState.metadata.index(concreteIndexName) - if (indexMetadata.mapping()?.sourceAsMap?.get("properties") != null) { - val properties = ( - (indexMetadata.mapping()?.sourceAsMap?.get("properties")) - as MutableMap - ) - // Node processor function is used to process leaves of index mappings tree - // - val leafNodeProcessor = - fun(fieldName: String, fullPath: String, props: MutableMap): - Triple> { - val newProps = props.toMutableMap() - if (monitor.dataSources.queryIndexMappingsByType.isNotEmpty()) { - val mappingsByType = monitor.dataSources.queryIndexMappingsByType - if (props.containsKey("type") && mappingsByType.containsKey(props["type"]!!)) { - mappingsByType[props["type"]]?.entries?.forEach { iter: Map.Entry -> - newProps[iter.key] = iter.value - } - } - } - - return if (conflictingFields.contains(fullPath)) { - if (props.containsKey("path")) { - newProps["path"] = "${props["path"]}_${concreteIndexName}_$monitorId" - } - Triple(fieldName, "${fieldName}_${concreteIndexName}_$monitorId", newProps) - } else { - if (props.containsKey("path")) { - newProps["path"] = "${props["path"]}_${updatedIndexName}_$monitorId" - } - Triple(fieldName, "${fieldName}_${updatedIndexName}_$monitorId", newProps) - } - } - // Traverse and update index mappings here while extracting flatten field paths - val flattenPaths = mutableMapOf>() - traverseMappingsAndUpdate(properties, "", leafNodeProcessor, flattenPaths) - flattenPaths.keys.forEach { allFlattenPaths.add(Pair(it, concreteIndexName)) } - // Updated mappings ready to be applied on queryIndex - properties.forEach { - if ( - it.value is Map<*, *> && - (it.value as Map).containsKey("type") && - (it.value as Map)["type"] == NESTED - ) { - } else { - if (updatedProperties.containsKey(it.key) && updatedProperties[it.key] != it.value) { - val mergedField = mergeConflictingFields( - updatedProperties[it.key] as Map, - it.value as Map - ) - updatedProperties[it.key] = mergedField - } else { - updatedProperties[it.key] = it.value - } - } - } - sourceIndexFieldLimit += checkMaxFieldLimit(concreteIndexName) - } - } - } - // Updates mappings of concrete queryIndex. This can rollover queryIndex if field mapping limit is reached. - val (updateMappingResponse, concreteQueryIndex) = updateQueryIndexMappings( - monitor, - monitorMetadata, - updatedIndexName, - sourceIndexFieldLimit, - updatedProperties - ) - - if (updateMappingResponse.isAcknowledged) { - doIndexAllQueries( - concreteQueryIndex, - updatedIndexName, - monitorId, - queries, - allFlattenPaths, - conflictingFields, - refreshPolicy, - indexTimeout - ) - } - } - } - - private suspend fun doIndexAllQueries( - concreteQueryIndex: String, - sourceIndex: String, - monitorId: String, - queries: List, - flattenPaths: MutableSet>, - conflictingPaths: Set, - refreshPolicy: RefreshPolicy, - indexTimeout: TimeValue - ) { - val indexRequests = mutableListOf() - val conflictingPathToConcreteIndices = mutableMapOf>() - flattenPaths.forEach { fieldPath -> - if (conflictingPaths.contains(fieldPath.first)) { - if (conflictingPathToConcreteIndices.containsKey(fieldPath.first)) { - val concreteIndexSet = conflictingPathToConcreteIndices[fieldPath.first] - concreteIndexSet!!.add(fieldPath.second) - conflictingPathToConcreteIndices[fieldPath.first] = concreteIndexSet - } else { - val concreteIndexSet = mutableSetOf() - concreteIndexSet.add(fieldPath.second) - conflictingPathToConcreteIndices[fieldPath.first] = concreteIndexSet - } - } - } - - val newQueries = mutableListOf() - queries.forEach { - val filteredConcreteIndices = mutableSetOf() - var query = it.query - conflictingPaths.forEach { conflictingPath -> - if (query.contains(conflictingPath)) { - query = query.replace("$conflictingPath:", "${conflictingPath}__$monitorId:") - filteredConcreteIndices.addAll(conflictingPathToConcreteIndices[conflictingPath]!!) - } - } - - if (filteredConcreteIndices.isNotEmpty()) { - filteredConcreteIndices.forEach { filteredConcreteIndex -> - val newQuery = it.copy( - id = "${it.id}_$filteredConcreteIndex", - query = query.replace("", filteredConcreteIndex) - ) - newQueries.add(newQuery) - } - } else { - newQueries.add(it.copy(id = "${it.id}_$sourceIndex")) - } - } - - newQueries.forEach { - var query = it.query - flattenPaths.forEach { fieldPath -> - if (!conflictingPaths.contains(fieldPath.first)) { - query = query.replace("${fieldPath.first}:", "${fieldPath.first}_${sourceIndex}_$monitorId:") - } - } - val indexRequest = IndexRequest(concreteQueryIndex) - .id(it.id + "_$monitorId") - .source( - mapOf( - "query" to mapOf("query_string" to mapOf("query" to query, "fields" to it.fields)), - "monitor_id" to monitorId, - "index" to sourceIndex - ) - ) - indexRequests.add(indexRequest) - } - log.debug("bulk inserting percolate [${queries.size}] queries") - if (indexRequests.isNotEmpty()) { - val bulkResponse: BulkResponse = client.suspendUntil { - client.bulk( - BulkRequest().setRefreshPolicy(refreshPolicy).timeout(indexTimeout).add(indexRequests), - it - ) - } - bulkResponse.forEach { bulkItemResponse -> - if (bulkItemResponse.isFailed) { - log.debug(bulkItemResponse.failureMessage) - } - } - } - } - - private suspend fun updateQueryIndexMappings( - monitor: Monitor, - monitorMetadata: MonitorMetadata, - sourceIndex: String, - sourceIndexFieldLimit: Long, - updatedProperties: MutableMap - ): Pair { - var targetQueryIndex = monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] - if (targetQueryIndex == null) { - // queryIndex is alias which will always have only 1 backing index which is writeIndex - // This is due to a fact that that _rollover API would maintain only single index under alias - // if you don't add is_write_index setting when creating index initially - targetQueryIndex = getWriteIndexNameForAlias(monitor.dataSources.queryIndex) - if (targetQueryIndex == null) { - val message = "Failed to get write index for queryIndex alias:${monitor.dataSources.queryIndex}" - log.error(message) - throw AlertingException.wrap( - OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) - ) - } - monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] = targetQueryIndex - } - val updateMappingRequest = PutMappingRequest(targetQueryIndex) - updateMappingRequest.source(mapOf("properties" to updatedProperties)) - var updateMappingResponse = AcknowledgedResponse(false) - try { - // Adjust max field limit in mappings for query index, if needed. - adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit, targetQueryIndex) - updateMappingResponse = client.suspendUntil { - client.admin().indices().putMapping(updateMappingRequest, it) - } - return Pair(updateMappingResponse, targetQueryIndex) - } catch (e: Exception) { - val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception - log.debug("exception after rollover queryIndex index: $targetQueryIndex exception: ${unwrappedException.message}") - // If we reached limit for total number of fields in mappings, do a rollover here - if (unwrappedException.message?.contains("Limit of total fields") == true) { - try { - // Do queryIndex rollover - targetQueryIndex = rolloverQueryIndex(monitor) - // Adjust max field limit in mappings for new index. - adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit, targetQueryIndex) - // PUT mappings to newly created index - val updateMappingRequest = PutMappingRequest(targetQueryIndex) - updateMappingRequest.source(mapOf("properties" to updatedProperties)) - updateMappingResponse = client.suspendUntil { - client.admin().indices().putMapping(updateMappingRequest, it) - } - } catch (e: Exception) { - // If we reached limit for total number of fields in mappings after rollover - // it means that source index has more then (FIELD_LIMIT - 3) fields (every query index has 3 fields defined) - // TODO maybe split queries/mappings between multiple query indices? - val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception - log.debug("exception after rollover queryIndex index: $targetQueryIndex exception: ${unwrappedException.message}") - if (unwrappedException.message?.contains("Limit of total fields") == true) { - val errorMessage = - "Monitor [${monitorMetadata.monitorId}] can't process index [$sourceIndex] due to field mapping limit" - log.error(errorMessage) - throw AlertingException(errorMessage, RestStatus.INTERNAL_SERVER_ERROR, e) - } else { - throw AlertingException.wrap(e) - } - } - } else { - log.debug("unknown exception during PUT mapping on queryIndex: $targetQueryIndex") - val unwrappedException = ExceptionsHelper.unwrapCause(e) as Exception - throw AlertingException.wrap(unwrappedException) - } - } - // We did rollover, so try to apply mappings again on new targetQueryIndex - if (targetQueryIndex.isNotEmpty()) { - // add newly created index to monitor's metadata object so that we can fetch it later on, when either applying mappings or running queries - monitorMetadata.sourceToQueryIndexMapping[sourceIndex + monitor.id] = targetQueryIndex - } else { - val failureMessage = "Failed to resolve targetQueryIndex!" - log.error(failureMessage) - throw AlertingException(failureMessage, RestStatus.INTERNAL_SERVER_ERROR, IllegalStateException(failureMessage)) - } - return Pair(updateMappingResponse, targetQueryIndex) - } - - /** - * merge conflicting leaf fields in the mapping tree - */ - private fun mergeConflictingFields(oldField: Map, newField: Map): Map { - val mergedField = mutableMapOf() - oldField.entries.forEach { - if (newField.containsKey(it.key)) { - if (it.value is Map<*, *> && newField[it.key] is Map<*, *>) { - mergedField[it.key] = - mergeConflictingFields(it.value as Map, newField[it.key] as Map) - } else { - mergedField[it.key] = it.value - } - } else { - mergedField[it.key] = it.value - } - } - - newField.entries.forEach { - if (!oldField.containsKey(it.key)) { - mergedField[it.key] = it.value - } - } - return mergedField - } - - /** - * get all fields which have same name but different mappings belonging to an index pattern - */ - fun getAllConflictingFields(clusterState: ClusterState, concreteIndices: List): Set { - val conflictingFields = mutableSetOf() - val allFlattenPaths = mutableMapOf>() - concreteIndices.forEach { concreteIndexName -> - if (clusterState.routingTable.hasIndex(concreteIndexName)) { - val indexMetadata = clusterState.metadata.index(concreteIndexName) - if (indexMetadata.mapping()?.sourceAsMap?.get("properties") != null) { - val properties = ( - (indexMetadata.mapping()?.sourceAsMap?.get("properties")) - as MutableMap - ) - // Node processor function is used to process leaves of index mappings tree - // - val leafNodeProcessor = - fun(fieldName: String, _: String, props: MutableMap): Triple> { - return Triple(fieldName, fieldName, props) - } - // Traverse and update index mappings here while extracting flatten field paths - val flattenPaths = mutableMapOf>() - traverseMappingsAndUpdate(properties, "", leafNodeProcessor, flattenPaths) - - flattenPaths.forEach { - if (allFlattenPaths.containsKey(it.key) && allFlattenPaths[it.key]!! != it.value) { - conflictingFields.add(it.key) - } - allFlattenPaths.putIfAbsent(it.key, it.value) - } - } - } - } - return conflictingFields - } - - /** - * checks the max field limit for a concrete index - */ - private suspend fun checkMaxFieldLimit(sourceIndex: String): Long { - val getSettingsResponse: GetSettingsResponse = client.suspendUntil { - admin().indices().getSettings(GetSettingsRequest().indices(sourceIndex), it) - } - return getSettingsResponse.getSetting(sourceIndex, INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key)?.toLong() ?: 1000L - } - - /** - * Adjusts max field limit index setting for query index if source index has higher limit. - * This will prevent max field limit exception, when source index has more fields then query index limit - */ - private suspend fun adjustMaxFieldLimitForQueryIndex(sourceIndexFieldLimit: Long, concreteQueryIndex: String) { - val getSettingsResponse: GetSettingsResponse = client.suspendUntil { - admin().indices().getSettings(GetSettingsRequest().indices(concreteQueryIndex), it) - } - val queryIndexLimit = - getSettingsResponse.getSetting(concreteQueryIndex, INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key)?.toLong() ?: 1000L - // Our query index initially has 3 fields we defined and 5 more builtin metadata fields in mappings so we have to account for that - if (sourceIndexFieldLimit > (queryIndexLimit - QUERY_INDEX_BASE_FIELDS_COUNT)) { - val updateSettingsResponse: AcknowledgedResponse = client.suspendUntil { - admin().indices().updateSettings( - UpdateSettingsRequest(concreteQueryIndex).settings( - Settings.builder().put( - INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key, - sourceIndexFieldLimit + QUERY_INDEX_BASE_FIELDS_COUNT - ) - ), - it - ) - } - } - } - - private suspend fun rolloverQueryIndex(monitor: Monitor): String { - val queryIndex = monitor.dataSources.queryIndex - val queryIndexPattern = monitor.dataSources.queryIndex + INDEX_PATTERN_SUFFIX - - val request = RolloverRequest(queryIndex, null) - request.createIndexRequest.index(queryIndexPattern) - .mapping(docLevelQueriesMappings()) - .settings(docLevelQueriesSettings()) - val response: RolloverResponse = client.suspendUntil { - client.admin().indices().rolloverIndex(request, it) - } - if (response.isRolledOver == false) { - val message = "failed to rollover queryIndex:$queryIndex queryIndexPattern:$queryIndexPattern" - log.error(message) - throw AlertingException.wrap( - OpenSearchStatusException(message, RestStatus.INTERNAL_SERVER_ERROR) - ) - } - return response.newIndex - } - - private fun getWriteIndexNameForAlias(alias: String): String? { - return this.clusterService.state().metadata().indicesLookup?.get(alias)?.writeIndex?.index?.name - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt deleted file mode 100644 index 387f5cb22..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/IndexUtils.kt +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest -import org.opensearch.action.support.IndicesOptions -import org.opensearch.action.support.master.AcknowledgedResponse -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.client.IndicesAdminClient -import org.opensearch.cluster.ClusterState -import org.opensearch.cluster.metadata.IndexAbstraction -import org.opensearch.cluster.metadata.IndexMetadata -import org.opensearch.cluster.metadata.IndexNameExpressionResolver -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.util.IndexUtils -import org.opensearch.core.action.ActionListener -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser - -class IndexUtils { - - companion object { - val VALID_INDEX_NAME_REGEX = Regex("""^(?![_\-\+])(?!.*\.\.)[^\s,\\\/\*\?"<>|#:\.]{1,255}$""") - - const val _META = "_meta" - const val SCHEMA_VERSION = "schema_version" - - var scheduledJobIndexSchemaVersion: Int - private set - var alertIndexSchemaVersion: Int - private set - var findingIndexSchemaVersion: Int - private set - - var scheduledJobIndexUpdated: Boolean = false - private set - var alertIndexUpdated: Boolean = false - private set - var findingIndexUpdated: Boolean = false - private set - var lastUpdatedAlertHistoryIndex: String? = null - var lastUpdatedFindingHistoryIndex: String? = null - - init { - scheduledJobIndexSchemaVersion = getSchemaVersion(ScheduledJobIndices.scheduledJobMappings()) - alertIndexSchemaVersion = getSchemaVersion(AlertIndices.alertMapping()) - findingIndexSchemaVersion = getSchemaVersion(AlertIndices.findingMapping()) - } - - @JvmStatic - fun scheduledJobIndexUpdated() { - scheduledJobIndexUpdated = true - } - - @JvmStatic - fun alertIndexUpdated() { - alertIndexUpdated = true - } - - @JvmStatic - fun findingIndexUpdated() { - findingIndexUpdated = true - } - - @JvmStatic - fun getSchemaVersion(mapping: String): Int { - val xcp = XContentType.JSON.xContent().createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - mapping - ) - - while (!xcp.isClosed) { - val token = xcp.currentToken() - if (token != null && token != XContentParser.Token.END_OBJECT && token != XContentParser.Token.START_OBJECT) { - if (xcp.currentName() != _META) { - xcp.nextToken() - xcp.skipChildren() - } else { - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - when (xcp.currentName()) { - SCHEMA_VERSION -> { - val version = xcp.intValue() - require(version > -1) - return version - } - else -> xcp.nextToken() - } - } - } - } - xcp.nextToken() - } - return IndexUtils.NO_SCHEMA_VERSION - } - - @JvmStatic - fun getIndexNameWithAlias(clusterState: ClusterState, alias: String): String { - return clusterState.metadata.indices.entries.first { it.value.aliases.containsKey(alias) }.key - } - - @JvmStatic - fun shouldUpdateIndex(index: IndexMetadata, mapping: String): Boolean { - var oldVersion = IndexUtils.NO_SCHEMA_VERSION - val newVersion = getSchemaVersion(mapping) - - val indexMapping = index.mapping()?.sourceAsMap() - if (indexMapping != null && indexMapping.containsKey(_META) && indexMapping[_META] is HashMap<*, *>) { - val metaData = indexMapping[_META] as HashMap<*, *> - if (metaData.containsKey(SCHEMA_VERSION)) { - oldVersion = metaData[SCHEMA_VERSION] as Int - } - } - return newVersion > oldVersion - } - - @JvmStatic - fun updateIndexMapping( - index: String, - mapping: String, - clusterState: ClusterState, - client: IndicesAdminClient, - actionListener: ActionListener - ) { - if (clusterState.metadata.indices.containsKey(index)) { - if (shouldUpdateIndex(clusterState.metadata.indices[index]!!, mapping)) { - val putMappingRequest: PutMappingRequest = PutMappingRequest(index).source(mapping, XContentType.JSON) - client.putMapping(putMappingRequest, actionListener) - } else { - actionListener.onResponse(AcknowledgedResponse(true)) - } - } - } - - @JvmStatic - fun resolveAllIndices(indices: List, clusterService: ClusterService, resolver: IndexNameExpressionResolver): List { - val result = mutableListOf() - - indices.forEach { index -> - val concreteIndices = resolver.concreteIndexNames( - clusterService.state(), - IndicesOptions.lenientExpand(), - true, - index - ) - result.addAll(concreteIndices) - } - - return result - } - - @JvmStatic - fun isDataStream(name: String, clusterState: ClusterState): Boolean { - return clusterState.metadata().dataStreams().containsKey(name) - } - - @JvmStatic - fun isAlias(name: String, clusterState: ClusterState): Boolean { - return clusterState.metadata().hasAlias(name) - } - - @JvmStatic - fun getWriteIndex(index: String, clusterState: ClusterState): String? { - if (isAlias(index, clusterState) || isDataStream(index, clusterState)) { - val metadata = clusterState.metadata.indicesLookup[index]?.writeIndex - if (metadata != null) { - return metadata.index.name - } - } - return null - } - - @JvmStatic - fun getNewestIndicesByCreationDate(concreteIndices: List, clusterState: ClusterState, thresholdDate: Long): List { - val filteredIndices = mutableListOf() - val lookup = clusterState.metadata().indicesLookup - concreteIndices.forEach { indexName -> - val index = lookup[indexName] - val indexMetadata = clusterState.metadata.index(indexName) - if (index != null && index.type == IndexAbstraction.Type.CONCRETE_INDEX) { - if (indexMetadata.creationDate >= thresholdDate) { - filteredIndices.add(indexName) - } - } - } - return filteredIndices - } - - @JvmStatic - fun getCreationDateForIndex(index: String, clusterState: ClusterState): Long { - return clusterState.metadata.index(index).creationDate - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt deleted file mode 100644 index b5aeaa542..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/RestHandlerUtils.kt +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.core.common.Strings -import org.opensearch.rest.RestRequest -import org.opensearch.search.fetch.subphase.FetchSourceContext - -/** - * Checks to see if the request came from Kibana, if so we want to return the UI Metadata from the document. - * If the request came from the client then we exclude the UI Metadata from the search result. - * - * @param request - * @return FetchSourceContext - */ -fun context(request: RestRequest): FetchSourceContext? { - val userAgent = if (request.header("User-Agent") == null) "" else request.header("User-Agent") - return if (!userAgent.contains(AlertingPlugin.OPEN_SEARCH_DASHBOARDS_USER_AGENT)) { - FetchSourceContext(true, Strings.EMPTY_ARRAY, AlertingPlugin.UI_METADATA_EXCLUDE) - } else null -} - -const val IF_SEQ_NO = "if_seq_no" -const val IF_PRIMARY_TERM = "if_primary_term" -const val REFRESH = "refresh" diff --git a/alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt deleted file mode 100644 index 70fe42a38..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/ScheduledJobUtils.kt +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.get.GetResponse -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry - -private val log = LogManager.getLogger(ScheduledJobUtils::class.java) - -class ScheduledJobUtils { - companion object { - const val WORKFLOW_DELEGATE_PATH = "workflow.inputs.composite_input.sequence.delegates" - const val WORKFLOW_MONITOR_PATH = "workflow.inputs.composite_input.sequence.delegates.monitor_id" - fun parseWorkflowFromScheduledJobDocSource(xContentRegistry: NamedXContentRegistry, response: GetResponse): Workflow { - XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, XContentType.JSON - ).use { xcp -> - try { - val workflow = ScheduledJob.parse(xcp, response.id, response.version) - if (workflow is Workflow) { - return workflow - } else { - log.error("Unable to parse workflow from ${response.source}") - throw OpenSearchStatusException( - "Unable to parse workflow from ${response.source}", - RestStatus.INTERNAL_SERVER_ERROR - ) - } - } catch (e: java.lang.Exception) { - throw AlertingException("Unable to parse workflow from ${response.source}", RestStatus.INTERNAL_SERVER_ERROR, e) - } - } - } - - fun parseMonitorFromScheduledJobDocSource(xContentRegistry: NamedXContentRegistry, response: GetResponse): Monitor { - XContentHelper.createParser( - xContentRegistry, LoggingDeprecationHandler.INSTANCE, - response.sourceAsBytesRef, XContentType.JSON - ).use { xcp -> - try { - val monitor = ScheduledJob.parse(xcp, response.id, response.version) - if (monitor is Monitor) { - return monitor - } else { - log.error("Unable to parse monitor from ${response.source}") - throw OpenSearchStatusException( - "Unable to parse monitor from ${response.source}", - RestStatus.INTERNAL_SERVER_ERROR - ) - } - } catch (e: java.lang.Exception) { - throw AlertingException("Unable to parse monitor from ${response.source}", RestStatus.INTERNAL_SERVER_ERROR, e) - } - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt deleted file mode 100644 index 8e92b597f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesHelpers.kt +++ /dev/null @@ -1,859 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.clusterMetricsMonitorHelpers - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.ValidateActions -import org.opensearch.action.admin.cluster.health.ClusterHealthRequest -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse -import org.opensearch.action.admin.cluster.state.ClusterStateRequest -import org.opensearch.action.admin.cluster.state.ClusterStateResponse -import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest -import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse -import org.opensearch.action.admin.indices.stats.CommonStats -import org.opensearch.action.admin.indices.stats.IndicesStatsRequest -import org.opensearch.action.admin.indices.stats.IndicesStatsResponse -import org.opensearch.action.support.IndicesOptions -import org.opensearch.alerting.util.IndexUtils.Companion.VALID_INDEX_NAME_REGEX -import org.opensearch.cluster.metadata.IndexMetadata -import org.opensearch.common.time.DateFormatter -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentObject -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.IndexSettings -import java.time.Instant -import java.time.ZoneOffset -import java.time.ZonedDateTime -import java.util.Locale - -class CatIndicesRequestWrapper(val pathParams: String = "") : ActionRequest() { - val log = LogManager.getLogger(CatIndicesRequestWrapper::class.java) - - var clusterHealthRequest: ClusterHealthRequest = - ClusterHealthRequest().indicesOptions(IndicesOptions.lenientExpandHidden()) - var clusterStateRequest: ClusterStateRequest = - ClusterStateRequest().indicesOptions(IndicesOptions.lenientExpandHidden()) - var indexSettingsRequest: GetSettingsRequest = - GetSettingsRequest() - .indicesOptions(IndicesOptions.lenientExpandHidden()) - .names(IndexSettings.INDEX_SEARCH_THROTTLED.key) - var indicesStatsRequest: IndicesStatsRequest = - IndicesStatsRequest().all().indicesOptions(IndicesOptions.lenientExpandHidden()) - var indicesList = arrayOf() - - init { - if (pathParams.isNotBlank()) { - indicesList = pathParams.split(",").toTypedArray() - - require(validate() == null) { - "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases." - } - - clusterHealthRequest = clusterHealthRequest.indices(*indicesList) - clusterStateRequest = clusterStateRequest.indices(*indicesList) - indexSettingsRequest = indexSettingsRequest.indices(*indicesList) - indicesStatsRequest = indicesStatsRequest.indices(*indicesList) - } - } - - override fun validate(): ActionRequestValidationException? { - var exception: ActionRequestValidationException? = null - if (pathParams.isNotBlank() && indicesList.any { !VALID_INDEX_NAME_REGEX.containsMatchIn(it) }) - exception = ValidateActions.addValidationError( - "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases.", - exception - ) - return exception - } -} - -class CatIndicesResponseWrapper( - clusterHealthResponse: ClusterHealthResponse, - clusterStateResponse: ClusterStateResponse, - indexSettingsResponse: GetSettingsResponse, - indicesStatsResponse: IndicesStatsResponse -) : ActionResponse(), ToXContentObject { - var indexInfoList: List = listOf() - - init { - indexInfoList = compileIndexInfo( - clusterHealthResponse, - clusterStateResponse, - indexSettingsResponse, - indicesStatsResponse - ) - } - - companion object { - const val WRAPPER_FIELD = "indices" - } - - override fun writeTo(out: StreamOutput) { - out.writeList(indexInfoList) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - builder.startArray(WRAPPER_FIELD) - indexInfoList.forEach { it.toXContent(builder, params) } - builder.endArray() - return builder.endObject() - } - - private fun compileIndexInfo( - clusterHealthResponse: ClusterHealthResponse, - clusterStateResponse: ClusterStateResponse, - indexSettingsResponse: GetSettingsResponse, - indicesStatsResponse: IndicesStatsResponse - ): List { - val list = mutableListOf() - - val indicesSettings = indexSettingsResponse.indexToSettings - val indicesHealths = clusterHealthResponse.indices - val indicesStats = indicesStatsResponse.indices - val indicesMetadatas = hashMapOf() - clusterStateResponse.state.metadata.forEach { indicesMetadatas[it.index.name] = it } - - indicesSettings.forEach { (indexName, settings) -> - if (!indicesMetadatas.containsKey(indexName)) return@forEach - - val indexMetadata = indicesMetadatas[indexName] - val indexState = indexMetadata?.state - val indexStats = indicesStats[indexName] - val searchThrottled = IndexSettings.INDEX_SEARCH_THROTTLED.get(settings) - val indexHealth = indicesHealths[indexName] - - var health = "" - if (indexHealth != null) { - health = indexHealth.status.toString().lowercase(Locale.ROOT) - } else if (indexStats != null) { - health = "red*" - } - - val primaryStats: CommonStats? - val totalStats: CommonStats? - if (indexStats == null || indexState == IndexMetadata.State.CLOSE) { - primaryStats = CommonStats() - totalStats = CommonStats() - } else { - primaryStats = indexStats.primaries - totalStats = indexStats.total - } - - list.add( - IndexInfo( - health = health, - status = indexState.toString().lowercase(Locale.ROOT), - index = indexName, - uuid = indexMetadata?.indexUUID, - pri = "${indexHealth?.numberOfShards}", - rep = "${indexHealth?.numberOfReplicas}", - docsCount = "${primaryStats?.getDocs()?.count}", - docsDeleted = "${primaryStats?.getDocs()?.deleted}", - creationDate = "${indexMetadata?.creationDate}", - creationDateString = DateFormatter.forPattern("strict_date_time") - .format(ZonedDateTime.ofInstant(Instant.ofEpochMilli(indexMetadata!!.creationDate), ZoneOffset.UTC)), - storeSize = "${totalStats?.store?.size}", - priStoreSize = "${primaryStats?.store?.size}", - completionSize = "${totalStats?.completion?.size}", - priCompletionSize = "${primaryStats?.completion?.size}", - fieldDataMemorySize = "${totalStats?.fieldData?.memorySize}", - priFieldDataMemorySize = "${primaryStats?.fieldData?.memorySize}", - fieldDataEvictions = "${totalStats?.fieldData?.evictions}", - priFieldDataEvictions = "${primaryStats?.fieldData?.evictions}", - queryCacheMemorySize = "${totalStats?.queryCache?.memorySize}", - priQueryCacheMemorySize = "${primaryStats?.queryCache?.memorySize}", - queryCacheEvictions = "${totalStats?.queryCache?.evictions}", - priQueryCacheEvictions = "${primaryStats?.queryCache?.evictions}", - requestCacheMemorySize = "${totalStats?.requestCache?.memorySize}", - priRequestCacheMemorySize = "${primaryStats?.requestCache?.memorySize}", - requestCacheEvictions = "${totalStats?.requestCache?.evictions}", - priRequestCacheEvictions = "${primaryStats?.requestCache?.evictions}", - requestCacheHitCount = "${totalStats?.requestCache?.hitCount}", - priRequestCacheHitCount = "${primaryStats?.requestCache?.hitCount}", - requestCacheMissCount = "${totalStats?.requestCache?.missCount}", - priRequestCacheMissCount = "${primaryStats?.requestCache?.missCount}", - flushTotal = "${totalStats?.flush?.total}", - priFlushTotal = "${primaryStats?.flush?.total}", - flushTotalTime = "${totalStats?.flush?.totalTime}", - priFlushTotalTime = "${primaryStats?.flush?.totalTime}", - getCurrent = "${totalStats?.get?.current()}", - priGetCurrent = "${primaryStats?.get?.current()}", - getTime = "${totalStats?.get?.time}", - priGetTime = "${primaryStats?.get?.time}", - getTotal = "${totalStats?.get?.count}", - priGetTotal = "${primaryStats?.get?.count}", - getExistsTime = "${totalStats?.get?.existsTime}", - priGetExistsTime = "${primaryStats?.get?.existsTime}", - getExistsTotal = "${totalStats?.get?.existsCount}", - priGetExistsTotal = "${primaryStats?.get?.existsCount}", - getMissingTime = "${totalStats?.get?.missingTime}", - priGetMissingTime = "${primaryStats?.get?.missingTime}", - getMissingTotal = "${totalStats?.get?.missingCount}", - priGetMissingTotal = "${primaryStats?.get?.missingCount}", - indexingDeleteCurrent = "${totalStats?.indexing?.total?.deleteCurrent}", - priIndexingDeleteCurrent = "${primaryStats?.indexing?.total?.deleteCurrent}", - indexingDeleteTime = "${totalStats?.indexing?.total?.deleteTime}", - priIndexingDeleteTime = "${primaryStats?.indexing?.total?.deleteTime}", - indexingDeleteTotal = "${totalStats?.indexing?.total?.deleteCount}", - priIndexingDeleteTotal = "${primaryStats?.indexing?.total?.deleteCount}", - indexingIndexCurrent = "${totalStats?.indexing?.total?.indexCurrent}", - priIndexingIndexCurrent = "${primaryStats?.indexing?.total?.indexCurrent}", - indexingIndexTime = "${totalStats?.indexing?.total?.indexTime}", - priIndexingIndexTime = "${primaryStats?.indexing?.total?.indexTime}", - indexingIndexTotal = "${totalStats?.indexing?.total?.indexCount}", - priIndexingIndexTotal = "${primaryStats?.indexing?.total?.indexCount}", - indexingIndexFailed = "${totalStats?.indexing?.total?.indexFailedCount}", - priIndexingIndexFailed = "${primaryStats?.indexing?.total?.indexFailedCount}", - mergesCurrent = "${totalStats?.merge?.current}", - priMergesCurrent = "${primaryStats?.merge?.current}", - mergesCurrentDocs = "${totalStats?.merge?.currentNumDocs}", - priMergesCurrentDocs = "${primaryStats?.merge?.currentNumDocs}", - mergesCurrentSize = "${totalStats?.merge?.currentSize}", - priMergesCurrentSize = "${primaryStats?.merge?.currentSize}", - mergesTotal = "${totalStats?.merge?.total}", - priMergesTotal = "${primaryStats?.merge?.total}", - mergesTotalDocs = "${totalStats?.merge?.totalNumDocs}", - priMergesTotalDocs = "${primaryStats?.merge?.totalNumDocs}", - mergesTotalSize = "${totalStats?.merge?.totalSize}", - priMergesTotalSize = "${primaryStats?.merge?.totalSize}", - mergesTotalTime = "${totalStats?.merge?.totalTime}", - priMergesTotalTime = "${primaryStats?.merge?.totalTime}", - refreshTotal = "${totalStats?.refresh?.total}", - priRefreshTotal = "${primaryStats?.refresh?.total}", - refreshTime = "${totalStats?.refresh?.totalTime}", - priRefreshTime = "${primaryStats?.refresh?.totalTime}", - refreshExternalTotal = "${totalStats?.refresh?.externalTotal}", - priRefreshExternalTotal = "${primaryStats?.refresh?.externalTotal}", - refreshExternalTime = "${totalStats?.refresh?.externalTotalTime}", - priRefreshExternalTime = "${primaryStats?.refresh?.externalTotalTime}", - refreshListeners = "${totalStats?.refresh?.listeners}", - priRefreshListeners = "${primaryStats?.refresh?.listeners}", - searchFetchCurrent = "${totalStats?.search?.total?.fetchCurrent}", - priSearchFetchCurrent = "${primaryStats?.search?.total?.fetchCurrent}", - searchFetchTime = "${totalStats?.search?.total?.fetchTime}", - priSearchFetchTime = "${primaryStats?.search?.total?.fetchTime}", - searchFetchTotal = "${totalStats?.search?.total?.fetchCount}", - priSearchFetchTotal = "${primaryStats?.search?.total?.fetchCount}", - searchOpenContexts = "${totalStats?.search?.openContexts}", - priSearchOpenContexts = "${primaryStats?.search?.openContexts}", - searchQueryCurrent = "${totalStats?.search?.total?.queryCurrent}", - priSearchQueryCurrent = "${primaryStats?.search?.total?.queryCurrent}", - searchQueryTime = "${totalStats?.search?.total?.queryTime}", - priSearchQueryTime = "${primaryStats?.search?.total?.queryTime}", - searchQueryTotal = "${totalStats?.search?.total?.queryCount}", - priSearchQueryTotal = "${primaryStats?.search?.total?.queryCount}", - searchScrollCurrent = "${totalStats?.search?.total?.scrollCurrent}", - priSearchScrollCurrent = "${primaryStats?.search?.total?.scrollCurrent}", - searchScrollTime = "${totalStats?.search?.total?.scrollTime}", - priSearchScrollTime = "${primaryStats?.search?.total?.scrollTime}", - searchScrollTotal = "${totalStats?.search?.total?.scrollCount}", - priSearchScrollTotal = "${primaryStats?.search?.total?.scrollCount}", - searchPointInTimeCurrent = "${totalStats?.search?.total?.pitCurrent}", - priSearchPointInTimeCurrent = "${primaryStats?.search?.total?.pitCurrent}", - searchPointInTimeTime = "${totalStats?.search?.total?.pitTime}", - priSearchPointInTimeTime = "${primaryStats?.search?.total?.pitTime}", - searchPointInTimeTotal = "${totalStats?.search?.total?.pitCount}", - priSearchPointInTimeTotal = "${primaryStats?.search?.total?.pitCount}", - segmentsCount = "${totalStats?.segments?.count}", - priSegmentsCount = "${primaryStats?.segments?.count}", - segmentsMemory = "${totalStats?.segments?.zeroMemory}", - priSegmentsMemory = "${primaryStats?.segments?.zeroMemory}", - segmentsIndexWriterMemory = "${totalStats?.segments?.indexWriterMemory}", - priSegmentsIndexWriterMemory = "${primaryStats?.segments?.indexWriterMemory}", - segmentsVersionMapMemory = "${totalStats?.segments?.versionMapMemory}", - priSegmentsVersionMapMemory = "${primaryStats?.segments?.versionMapMemory}", - segmentsFixedBitsetMemory = "${totalStats?.segments?.bitsetMemory}", - priSegmentsFixedBitsetMemory = "${primaryStats?.segments?.bitsetMemory}", - warmerCurrent = "${totalStats?.warmer?.current()}", - priWarmerCurrent = "${primaryStats?.warmer?.current()}", - warmerTotal = "${totalStats?.warmer?.total()}", - priWarmerTotal = "${primaryStats?.warmer?.total()}", - warmerTotalTime = "${totalStats?.warmer?.totalTime()}", - priWarmerTotalTime = "${primaryStats?.warmer?.totalTime()}", - suggestCurrent = "${totalStats?.search?.total?.suggestCurrent}", - priSuggestCurrent = "${primaryStats?.search?.total?.suggestCurrent}", - suggestTime = "${totalStats?.search?.total?.suggestTime}", - priSuggestTime = "${primaryStats?.search?.total?.suggestTime}", - suggestTotal = "${totalStats?.search?.total?.suggestCount}", - priSuggestTotal = "${primaryStats?.search?.total?.suggestCount}", - memoryTotal = "${totalStats?.totalMemory}", - priMemoryTotal = "${primaryStats?.totalMemory}", - searchThrottled = "$searchThrottled", - ) - ) - } - - return list - } - - data class IndexInfo( - val health: String?, - val status: String?, - val index: String?, - val uuid: String?, - val pri: String?, - val rep: String?, - val docsCount: String?, - val docsDeleted: String?, - val creationDate: String?, - val creationDateString: String?, - val storeSize: String?, - val priStoreSize: String?, - val completionSize: String?, - val priCompletionSize: String?, - val fieldDataMemorySize: String?, - val priFieldDataMemorySize: String?, - val fieldDataEvictions: String?, - val priFieldDataEvictions: String?, - val queryCacheMemorySize: String?, - val priQueryCacheMemorySize: String?, - val queryCacheEvictions: String?, - val priQueryCacheEvictions: String?, - val requestCacheMemorySize: String?, - val priRequestCacheMemorySize: String?, - val requestCacheEvictions: String?, - val priRequestCacheEvictions: String?, - val requestCacheHitCount: String?, - val priRequestCacheHitCount: String?, - val requestCacheMissCount: String?, - val priRequestCacheMissCount: String?, - val flushTotal: String?, - val priFlushTotal: String?, - val flushTotalTime: String?, - val priFlushTotalTime: String?, - val getCurrent: String?, - val priGetCurrent: String?, - val getTime: String?, - val priGetTime: String?, - val getTotal: String?, - val priGetTotal: String?, - val getExistsTime: String?, - val priGetExistsTime: String?, - val getExistsTotal: String?, - val priGetExistsTotal: String?, - val getMissingTime: String?, - val priGetMissingTime: String?, - val getMissingTotal: String?, - val priGetMissingTotal: String?, - val indexingDeleteCurrent: String?, - val priIndexingDeleteCurrent: String?, - val indexingDeleteTime: String?, - val priIndexingDeleteTime: String?, - val indexingDeleteTotal: String?, - val priIndexingDeleteTotal: String?, - val indexingIndexCurrent: String?, - val priIndexingIndexCurrent: String?, - val indexingIndexTime: String?, - val priIndexingIndexTime: String?, - val indexingIndexTotal: String?, - val priIndexingIndexTotal: String?, - val indexingIndexFailed: String?, - val priIndexingIndexFailed: String?, - val mergesCurrent: String?, - val priMergesCurrent: String?, - val mergesCurrentDocs: String?, - val priMergesCurrentDocs: String?, - val mergesCurrentSize: String?, - val priMergesCurrentSize: String?, - val mergesTotal: String?, - val priMergesTotal: String?, - val mergesTotalDocs: String?, - val priMergesTotalDocs: String?, - val mergesTotalSize: String?, - val priMergesTotalSize: String?, - val mergesTotalTime: String?, - val priMergesTotalTime: String?, - val refreshTotal: String?, - val priRefreshTotal: String?, - val refreshTime: String?, - val priRefreshTime: String?, - val refreshExternalTotal: String?, - val priRefreshExternalTotal: String?, - val refreshExternalTime: String?, - val priRefreshExternalTime: String?, - val refreshListeners: String?, - val priRefreshListeners: String?, - val searchFetchCurrent: String?, - val priSearchFetchCurrent: String?, - val searchFetchTime: String?, - val priSearchFetchTime: String?, - val searchFetchTotal: String?, - val priSearchFetchTotal: String?, - val searchOpenContexts: String?, - val priSearchOpenContexts: String?, - val searchQueryCurrent: String?, - val priSearchQueryCurrent: String?, - val searchQueryTime: String?, - val priSearchQueryTime: String?, - val searchQueryTotal: String?, - val priSearchQueryTotal: String?, - val searchScrollCurrent: String?, - val priSearchScrollCurrent: String?, - val searchScrollTime: String?, - val priSearchScrollTime: String?, - val searchScrollTotal: String?, - val priSearchScrollTotal: String?, - val searchPointInTimeCurrent: String?, - val priSearchPointInTimeCurrent: String?, - val searchPointInTimeTime: String?, - val priSearchPointInTimeTime: String?, - val searchPointInTimeTotal: String?, - val priSearchPointInTimeTotal: String?, - val segmentsCount: String?, - val priSegmentsCount: String?, - val segmentsMemory: String?, - val priSegmentsMemory: String?, - val segmentsIndexWriterMemory: String?, - val priSegmentsIndexWriterMemory: String?, - val segmentsVersionMapMemory: String?, - val priSegmentsVersionMapMemory: String?, - val segmentsFixedBitsetMemory: String?, - val priSegmentsFixedBitsetMemory: String?, - val warmerCurrent: String?, - val priWarmerCurrent: String?, - val warmerTotal: String?, - val priWarmerTotal: String?, - val warmerTotalTime: String?, - val priWarmerTotalTime: String?, - val suggestCurrent: String?, - val priSuggestCurrent: String?, - val suggestTime: String?, - val priSuggestTime: String?, - val suggestTotal: String?, - val priSuggestTotal: String?, - val memoryTotal: String?, - val priMemoryTotal: String?, - val searchThrottled: String? - ) : ToXContentObject, Writeable { - companion object { - const val HEALTH_FIELD = "health" - const val STATUS_FIELD = "status" - const val INDEX_FIELD = "index" - const val UUID_FIELD = "uuid" - const val PRI_FIELD = "pri" - const val REP_FIELD = "rep" - const val DOCS_COUNT_FIELD = "docs.count" - const val DOCS_DELETED_FIELD = "docs.deleted" - const val CREATION_DATE_FIELD = "creation.date" - const val CREATION_DATE_STRING_FIELD = "creation.date.string" - const val STORE_SIZE_FIELD = "store.size" - const val PRI_STORE_SIZE_FIELD = "pri.store.size" - const val COMPLETION_SIZE_FIELD = "completion.size" - const val PRI_COMPLETION_SIZE_FIELD = "pri.completion.size" - const val FIELD_DATA_MEMORY_SIZE_FIELD = "fielddata.memory_size" - const val PRI_FIELD_DATA_MEMORY_SIZE_FIELD = "pri.fielddata.memory_size" - const val FIELD_DATA_EVICTIONS_FIELD = "fielddata.evictions" - const val PRI_FIELD_DATA_EVICTIONS_FIELD = "pri.fielddata.evictions" - const val QUERY_CACHE_MEMORY_SIZE_FIELD = "query_cache.memory_size" - const val PRI_QUERY_CACHE_MEMORY_SIZE_FIELD = "pri.query_cache.memory_size" - const val QUERY_CACHE_EVICTIONS_FIELD = "query_cache.evictions" - const val PRI_QUERY_CACHE_EVICTIONS_FIELD = "pri.query_cache.evictions" - const val REQUEST_CACHE_MEMORY_SIZE_FIELD = "request_cache.memory_size" - const val PRI_REQUEST_CACHE_MEMORY_SIZE_FIELD = "pri.request_cache.memory_size" - const val REQUEST_CACHE_EVICTIONS_FIELD = "request_cache.evictions" - const val PRI_REQUEST_CACHE_EVICTIONS_FIELD = "pri.request_cache.evictions" - const val REQUEST_CACHE_HIT_COUNT_FIELD = "request_cache.hit_count" - const val PRI_REQUEST_CACHE_HIT_COUNT_FIELD = "pri.request_cache.hit_count" - const val REQUEST_CACHE_MISS_COUNT_FIELD = "request_cache.miss_count" - const val PRI_REQUEST_CACHE_MISS_COUNT_FIELD = "pri.request_cache.miss_count" - const val FLUSH_TOTAL_FIELD = "flush.total" - const val PRI_FLUSH_TOTAL_FIELD = "pri.flush.total" - const val FLUSH_TOTAL_TIME_FIELD = "flush.total_time" - const val PRI_FLUSH_TOTAL_TIME_FIELD = "pri.flush.total_time" - const val GET_CURRENT_FIELD = "get.current" - const val PRI_GET_CURRENT_FIELD = "pri.get.current" - const val GET_TIME_FIELD = "get.time" - const val PRI_GET_TIME_FIELD = "pri.get.time" - const val GET_TOTAL_FIELD = "get.total" - const val PRI_GET_TOTAL_FIELD = "pri.get.total" - const val GET_EXISTS_TIME_FIELD = "get.exists_time" - const val PRI_GET_EXISTS_TIME_FIELD = "pri.get.exists_time" - const val GET_EXISTS_TOTAL_FIELD = "get.exists_total" - const val PRI_GET_EXISTS_TOTAL_FIELD = "pri.get.exists_total" - const val GET_MISSING_TIME_FIELD = "get.missing_time" - const val PRI_GET_MISSING_TIME_FIELD = "pri.get.missing_time" - const val GET_MISSING_TOTAL_FIELD = "get.missing_total" - const val PRI_GET_MISSING_TOTAL_FIELD = "pri.get.missing_total" - const val INDEXING_DELETE_CURRENT_FIELD = "indexing.delete_current" - const val PRI_INDEXING_DELETE_CURRENT_FIELD = "pri.indexing.delete_current" - const val INDEXING_DELETE_TIME_FIELD = "indexing.delete_time" - const val PRI_INDEXING_DELETE_TIME_FIELD = "pri.indexing.delete_time" - const val INDEXING_DELETE_TOTAL_FIELD = "indexing.delete_total" - const val PRI_INDEXING_DELETE_TOTAL_FIELD = "pri.indexing.delete_total" - const val INDEXING_INDEX_CURRENT_FIELD = "indexing.index_current" - const val PRI_INDEXING_INDEX_CURRENT_FIELD = "pri.indexing.index_current" - const val INDEXING_INDEX_TIME_FIELD = "indexing.index_time" - const val PRI_INDEXING_INDEX_TIME_FIELD = "pri.indexing.index_time" - const val INDEXING_INDEX_TOTAL_FIELD = "indexing.index_total" - const val PRI_INDEXING_INDEX_TOTAL_FIELD = "pri.indexing.index_total" - const val INDEXING_INDEX_FAILED_FIELD = "indexing.index_failed" - const val PRI_INDEXING_INDEX_FAILED_FIELD = "pri.indexing.index_failed" - const val MERGES_CURRENT_FIELD = "merges.current" - const val PRI_MERGES_CURRENT_FIELD = "pri.merges.current" - const val MERGES_CURRENT_DOCS_FIELD = "merges.current_docs" - const val PRI_MERGES_CURRENT_DOCS_FIELD = "pri.merges.current_docs" - const val MERGES_CURRENT_SIZE_FIELD = "merges.current_size" - const val PRI_MERGES_CURRENT_SIZE_FIELD = "pri.merges.current_size" - const val MERGES_TOTAL_FIELD = "merges.total" - const val PRI_MERGES_TOTAL_FIELD = "pri.merges.total" - const val MERGES_TOTAL_DOCS_FIELD = "merges.total_docs" - const val PRI_MERGES_TOTAL_DOCS_FIELD = "pri.merges.total_docs" - const val MERGES_TOTAL_SIZE_FIELD = "merges.total_size" - const val PRI_MERGES_TOTAL_SIZE_FIELD = "pri.merges.total_size" - const val MERGES_TOTAL_TIME_FIELD = "merges.total_time" - const val PRI_MERGES_TOTAL_TIME_FIELD = "pri.merges.total_time" - const val REFRESH_TOTAL_FIELD = "refresh.total" - const val PRI_REFRESH_TOTAL_FIELD = "pri.refresh.total" - const val REFRESH_TIME_FIELD = "refresh.time" - const val PRI_REFRESH_TIME_FIELD = "pri.refresh.time" - const val REFRESH_EXTERNAL_TOTAL_FIELD = "refresh.external_total" - const val PRI_REFRESH_EXTERNAL_TOTAL_FIELD = "pri.refresh.external_total" - const val REFRESH_EXTERNAL_TIME_FIELD = "refresh.external_time" - const val PRI_REFRESH_EXTERNAL_TIME_FIELD = "pri.refresh.external_time" - const val REFRESH_LISTENERS_FIELD = "refresh.listeners" - const val PRI_REFRESH_LISTENERS_FIELD = "pri.refresh.listeners" - const val SEARCH_FETCH_CURRENT_FIELD = "search.fetch_current" - const val PRI_SEARCH_FETCH_CURRENT_FIELD = "pri.search.fetch_current" - const val SEARCH_FETCH_TIME_FIELD = "search.fetch_time" - const val PRI_SEARCH_FETCH_TIME_FIELD = "pri.search.fetch_time" - const val SEARCH_FETCH_TOTAL_FIELD = "search.fetch_total" - const val PRI_SEARCH_FETCH_TOTAL_FIELD = "pri.search.fetch_total" - const val SEARCH_OPEN_CONTEXTS_FIELD = "search.open_contexts" - const val PRI_SEARCH_OPEN_CONTEXTS_FIELD = "pri.search.open_contexts" - const val SEARCH_QUERY_CURRENT_FIELD = "search.query_current" - const val PRI_SEARCH_QUERY_CURRENT_FIELD = "pri.search.query_current" - const val SEARCH_QUERY_TIME_FIELD = "search.query_time" - const val PRI_SEARCH_QUERY_TIME_FIELD = "pri.search.query_time" - const val SEARCH_QUERY_TOTAL_FIELD = "search.query_total" - const val PRI_SEARCH_QUERY_TOTAL_FIELD = "pri.search.query_total" - const val SEARCH_SCROLL_CURRENT_FIELD = "search.scroll_current" - const val PRI_SEARCH_SCROLL_CURRENT_FIELD = "pri.search.scroll_current" - const val SEARCH_SCROLL_TIME_FIELD = "search.scroll_time" - const val PRI_SEARCH_SCROLL_TIME_FIELD = "pri.search.scroll_time" - const val SEARCH_SCROLL_TOTAL_FIELD = "search.scroll_total" - const val PRI_SEARCH_SCROLL_TOTAL_FIELD = "pri.search.scroll_total" - const val SEARCH_POINT_IN_TIME_CURRENT_FIELD = "search.point_in_time_current" - const val PRI_SEARCH_POINT_IN_TIME_CURRENT_FIELD = "pri.search.point_in_time_current" - const val SEARCH_POINT_IN_TIME_TIME_FIELD = "search.point_in_time_time" - const val PRI_SEARCH_POINT_IN_TIME_TIME_FIELD = "pri.search.point_in_time_time" - const val SEARCH_POINT_IN_TIME_TOTAL_FIELD = "search.point_in_time_total" - const val PRI_SEARCH_POINT_IN_TIME_TOTAL_FIELD = "pri.search.point_in_time_total" - const val SEGMENTS_COUNT_FIELD = "segments.count" - const val PRI_SEGMENTS_COUNT_FIELD = "pri.segments.count" - const val SEGMENTS_MEMORY_FIELD = "segments.memory" - const val PRI_SEGMENTS_MEMORY_FIELD = "pri.segments.memory" - const val SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "segments.index_writer_memory" - const val PRI_SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "pri.segments.index_writer_memory" - const val SEGMENTS_VERSION_MAP_MEMORY_FIELD = "segments.version_map_memory" - const val PRI_SEGMENTS_VERSION_MAP_MEMORY_FIELD = "pri.segments.version_map_memory" - const val SEGMENTS_FIXED_BITSET_MEMORY_FIELD = "segments.fixed_bitset_memory" - const val PRI_SEGMENTS_FIXED_BITSET_MEMORY_FIELD = "pri.segments.fixed_bitset_memory" - const val WARMER_CURRENT_FIELD = "warmer.current" - const val PRI_WARMER_CURRENT_FIELD = "pri.warmer.current" - const val WARMER_TOTAL_FIELD = "warmer.total" - const val PRI_WARMER_TOTAL_FIELD = "pri.warmer.total" - const val WARMER_TOTAL_TIME_FIELD = "warmer.total_time" - const val PRI_WARMER_TOTAL_TIME_FIELD = "pri.warmer.total_time" - const val SUGGEST_CURRENT_FIELD = "suggest.current" - const val PRI_SUGGEST_CURRENT_FIELD = "pri.suggest.current" - const val SUGGEST_TIME_FIELD = "suggest.time" - const val PRI_SUGGEST_TIME_FIELD = "pri.suggest.time" - const val SUGGEST_TOTAL_FIELD = "suggest.total" - const val PRI_SUGGEST_TOTAL_FIELD = "pri.suggest.total" - const val MEMORY_TOTAL_FIELD = "memory.total" - const val PRI_MEMORY_TOTAL_FIELD = "pri.memory.total" - const val SEARCH_THROTTLED_FIELD = "search.throttled" - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(HEALTH_FIELD, health) - .field(STATUS_FIELD, status) - .field(INDEX_FIELD, index) - .field(UUID_FIELD, uuid) - .field(PRI_FIELD, pri) - .field(REP_FIELD, rep) - .field(DOCS_COUNT_FIELD, docsCount) - .field(DOCS_DELETED_FIELD, docsDeleted) - .field(CREATION_DATE_FIELD, creationDate) - .field(CREATION_DATE_STRING_FIELD, creationDateString) - .field(STORE_SIZE_FIELD, storeSize) - .field(PRI_STORE_SIZE_FIELD, priStoreSize) - .field(COMPLETION_SIZE_FIELD, completionSize) - .field(PRI_COMPLETION_SIZE_FIELD, priCompletionSize) - .field(FIELD_DATA_MEMORY_SIZE_FIELD, fieldDataMemorySize) - .field(PRI_FIELD_DATA_MEMORY_SIZE_FIELD, priFieldDataMemorySize) - .field(FIELD_DATA_EVICTIONS_FIELD, fieldDataEvictions) - .field(PRI_FIELD_DATA_EVICTIONS_FIELD, priFieldDataEvictions) - .field(QUERY_CACHE_MEMORY_SIZE_FIELD, queryCacheMemorySize) - .field(PRI_QUERY_CACHE_MEMORY_SIZE_FIELD, priQueryCacheMemorySize) - .field(QUERY_CACHE_EVICTIONS_FIELD, queryCacheEvictions) - .field(PRI_QUERY_CACHE_EVICTIONS_FIELD, priQueryCacheEvictions) - .field(REQUEST_CACHE_MEMORY_SIZE_FIELD, requestCacheMemorySize) - .field(PRI_REQUEST_CACHE_MEMORY_SIZE_FIELD, priRequestCacheMemorySize) - .field(REQUEST_CACHE_EVICTIONS_FIELD, requestCacheEvictions) - .field(PRI_REQUEST_CACHE_EVICTIONS_FIELD, priRequestCacheEvictions) - .field(REQUEST_CACHE_HIT_COUNT_FIELD, requestCacheHitCount) - .field(PRI_REQUEST_CACHE_HIT_COUNT_FIELD, priRequestCacheHitCount) - .field(REQUEST_CACHE_MISS_COUNT_FIELD, requestCacheMissCount) - .field(PRI_REQUEST_CACHE_MISS_COUNT_FIELD, priRequestCacheMissCount) - .field(FLUSH_TOTAL_FIELD, flushTotal) - .field(PRI_FLUSH_TOTAL_FIELD, priFlushTotal) - .field(FLUSH_TOTAL_TIME_FIELD, flushTotalTime) - .field(PRI_FLUSH_TOTAL_TIME_FIELD, priFlushTotalTime) - .field(GET_CURRENT_FIELD, getCurrent) - .field(PRI_GET_CURRENT_FIELD, priGetCurrent) - .field(GET_TIME_FIELD, getTime) - .field(PRI_GET_TIME_FIELD, priGetTime) - .field(GET_TOTAL_FIELD, getTotal) - .field(PRI_GET_TOTAL_FIELD, priGetTotal) - .field(GET_EXISTS_TIME_FIELD, getExistsTime) - .field(PRI_GET_EXISTS_TIME_FIELD, priGetExistsTime) - .field(GET_EXISTS_TOTAL_FIELD, getExistsTotal) - .field(PRI_GET_EXISTS_TOTAL_FIELD, priGetExistsTotal) - .field(GET_MISSING_TIME_FIELD, getMissingTime) - .field(PRI_GET_MISSING_TIME_FIELD, priGetMissingTime) - .field(GET_MISSING_TOTAL_FIELD, getMissingTotal) - .field(PRI_GET_MISSING_TOTAL_FIELD, priGetMissingTotal) - .field(INDEXING_DELETE_CURRENT_FIELD, indexingDeleteCurrent) - .field(PRI_INDEXING_DELETE_CURRENT_FIELD, priIndexingDeleteCurrent) - .field(INDEXING_DELETE_TIME_FIELD, indexingDeleteTime) - .field(PRI_INDEXING_DELETE_TIME_FIELD, priIndexingDeleteTime) - .field(INDEXING_DELETE_TOTAL_FIELD, indexingDeleteTotal) - .field(PRI_INDEXING_DELETE_TOTAL_FIELD, priIndexingDeleteTotal) - .field(INDEXING_INDEX_CURRENT_FIELD, indexingIndexCurrent) - .field(PRI_INDEXING_INDEX_CURRENT_FIELD, priIndexingIndexCurrent) - .field(INDEXING_INDEX_TIME_FIELD, indexingIndexTime) - .field(PRI_INDEXING_INDEX_TIME_FIELD, priIndexingIndexTime) - .field(INDEXING_INDEX_TOTAL_FIELD, indexingIndexTotal) - .field(PRI_INDEXING_INDEX_TOTAL_FIELD, priIndexingIndexTotal) - .field(INDEXING_INDEX_FAILED_FIELD, indexingIndexFailed) - .field(PRI_INDEXING_INDEX_FAILED_FIELD, priIndexingIndexFailed) - .field(MERGES_CURRENT_FIELD, mergesCurrent) - .field(PRI_MERGES_CURRENT_FIELD, priMergesCurrent) - .field(MERGES_CURRENT_DOCS_FIELD, mergesCurrentDocs) - .field(PRI_MERGES_CURRENT_DOCS_FIELD, priMergesCurrentDocs) - .field(MERGES_CURRENT_SIZE_FIELD, mergesCurrentSize) - .field(PRI_MERGES_CURRENT_SIZE_FIELD, priMergesCurrentSize) - .field(MERGES_TOTAL_FIELD, mergesTotal) - .field(PRI_MERGES_TOTAL_FIELD, priMergesTotal) - .field(MERGES_TOTAL_DOCS_FIELD, mergesTotalDocs) - .field(PRI_MERGES_TOTAL_DOCS_FIELD, priMergesTotalDocs) - .field(MERGES_TOTAL_SIZE_FIELD, mergesTotalSize) - .field(PRI_MERGES_TOTAL_SIZE_FIELD, priMergesTotalSize) - .field(MERGES_TOTAL_TIME_FIELD, mergesTotalTime) - .field(PRI_MERGES_TOTAL_TIME_FIELD, priMergesTotalTime) - .field(REFRESH_TOTAL_FIELD, refreshTotal) - .field(PRI_REFRESH_TOTAL_FIELD, priRefreshTotal) - .field(REFRESH_TIME_FIELD, refreshTime) - .field(PRI_REFRESH_TIME_FIELD, priRefreshTime) - .field(REFRESH_EXTERNAL_TOTAL_FIELD, refreshExternalTotal) - .field(PRI_REFRESH_EXTERNAL_TOTAL_FIELD, priRefreshExternalTotal) - .field(REFRESH_EXTERNAL_TIME_FIELD, refreshExternalTime) - .field(PRI_REFRESH_EXTERNAL_TIME_FIELD, priRefreshExternalTime) - .field(REFRESH_LISTENERS_FIELD, refreshListeners) - .field(PRI_REFRESH_LISTENERS_FIELD, priRefreshListeners) - .field(SEARCH_FETCH_CURRENT_FIELD, searchFetchCurrent) - .field(PRI_SEARCH_FETCH_CURRENT_FIELD, priSearchFetchCurrent) - .field(SEARCH_FETCH_TIME_FIELD, searchFetchTime) - .field(PRI_SEARCH_FETCH_TIME_FIELD, priSearchFetchTime) - .field(SEARCH_FETCH_TOTAL_FIELD, searchFetchTotal) - .field(PRI_SEARCH_FETCH_TOTAL_FIELD, priSearchFetchTotal) - .field(SEARCH_OPEN_CONTEXTS_FIELD, searchOpenContexts) - .field(PRI_SEARCH_OPEN_CONTEXTS_FIELD, priSearchOpenContexts) - .field(SEARCH_QUERY_CURRENT_FIELD, searchQueryCurrent) - .field(PRI_SEARCH_QUERY_CURRENT_FIELD, priSearchQueryCurrent) - .field(SEARCH_QUERY_TIME_FIELD, searchQueryTime) - .field(PRI_SEARCH_QUERY_TIME_FIELD, priSearchQueryTime) - .field(SEARCH_QUERY_TOTAL_FIELD, searchQueryTotal) - .field(PRI_SEARCH_QUERY_TOTAL_FIELD, priSearchQueryTotal) - .field(SEARCH_SCROLL_CURRENT_FIELD, searchScrollCurrent) - .field(PRI_SEARCH_SCROLL_CURRENT_FIELD, priSearchScrollCurrent) - .field(SEARCH_SCROLL_TIME_FIELD, searchScrollTime) - .field(PRI_SEARCH_SCROLL_TIME_FIELD, priSearchScrollTime) - .field(SEARCH_SCROLL_TOTAL_FIELD, searchScrollTotal) - .field(PRI_SEARCH_SCROLL_TOTAL_FIELD, priSearchScrollTotal) - .field(SEARCH_POINT_IN_TIME_CURRENT_FIELD, searchPointInTimeCurrent) - .field(PRI_SEARCH_POINT_IN_TIME_CURRENT_FIELD, priSearchPointInTimeCurrent) - .field(SEARCH_POINT_IN_TIME_TIME_FIELD, searchPointInTimeTime) - .field(PRI_SEARCH_POINT_IN_TIME_TIME_FIELD, priSearchPointInTimeTime) - .field(SEARCH_POINT_IN_TIME_TOTAL_FIELD, searchPointInTimeTotal) - .field(PRI_SEARCH_POINT_IN_TIME_TOTAL_FIELD, priSearchPointInTimeTotal) - .field(SEGMENTS_COUNT_FIELD, segmentsCount) - .field(PRI_SEGMENTS_COUNT_FIELD, priSegmentsCount) - .field(SEGMENTS_MEMORY_FIELD, segmentsMemory) - .field(PRI_SEGMENTS_MEMORY_FIELD, priSegmentsMemory) - .field(SEGMENTS_INDEX_WRITER_MEMORY_FIELD, segmentsIndexWriterMemory) - .field(PRI_SEGMENTS_INDEX_WRITER_MEMORY_FIELD, priSegmentsIndexWriterMemory) - .field(SEGMENTS_VERSION_MAP_MEMORY_FIELD, segmentsVersionMapMemory) - .field(PRI_SEGMENTS_VERSION_MAP_MEMORY_FIELD, priSegmentsVersionMapMemory) - .field(SEGMENTS_FIXED_BITSET_MEMORY_FIELD, segmentsFixedBitsetMemory) - .field(PRI_SEGMENTS_FIXED_BITSET_MEMORY_FIELD, priSegmentsFixedBitsetMemory) - .field(WARMER_CURRENT_FIELD, warmerCurrent) - .field(PRI_WARMER_CURRENT_FIELD, priWarmerCurrent) - .field(WARMER_TOTAL_FIELD, warmerTotal) - .field(PRI_WARMER_TOTAL_FIELD, priWarmerTotal) - .field(WARMER_TOTAL_TIME_FIELD, warmerTotalTime) - .field(PRI_WARMER_TOTAL_TIME_FIELD, priWarmerTotalTime) - .field(SUGGEST_CURRENT_FIELD, suggestCurrent) - .field(PRI_SUGGEST_CURRENT_FIELD, priSuggestCurrent) - .field(SUGGEST_TIME_FIELD, suggestTime) - .field(PRI_SUGGEST_TIME_FIELD, priSuggestTime) - .field(SUGGEST_TOTAL_FIELD, suggestTotal) - .field(PRI_SUGGEST_TOTAL_FIELD, priSuggestTotal) - .field(MEMORY_TOTAL_FIELD, memoryTotal) - .field(PRI_MEMORY_TOTAL_FIELD, priMemoryTotal) - .field(SEARCH_THROTTLED_FIELD, searchThrottled) - return builder.endObject() - } - - override fun writeTo(out: StreamOutput) { - out.writeString(health) - out.writeString(status) - out.writeString(index) - out.writeString(uuid) - out.writeString(pri) - out.writeString(rep) - out.writeString(docsCount) - out.writeString(docsDeleted) - out.writeString(creationDate) - out.writeString(creationDateString) - out.writeString(storeSize) - out.writeString(priStoreSize) - out.writeString(completionSize) - out.writeString(priCompletionSize) - out.writeString(fieldDataMemorySize) - out.writeString(priFieldDataMemorySize) - out.writeString(fieldDataEvictions) - out.writeString(priFieldDataEvictions) - out.writeString(queryCacheMemorySize) - out.writeString(priQueryCacheMemorySize) - out.writeString(queryCacheEvictions) - out.writeString(priQueryCacheEvictions) - out.writeString(requestCacheMemorySize) - out.writeString(priRequestCacheMemorySize) - out.writeString(requestCacheEvictions) - out.writeString(priRequestCacheEvictions) - out.writeString(requestCacheHitCount) - out.writeString(priRequestCacheHitCount) - out.writeString(requestCacheMissCount) - out.writeString(priRequestCacheMissCount) - out.writeString(flushTotal) - out.writeString(priFlushTotal) - out.writeString(flushTotalTime) - out.writeString(priFlushTotalTime) - out.writeString(getCurrent) - out.writeString(priGetCurrent) - out.writeString(getTime) - out.writeString(priGetTime) - out.writeString(getTotal) - out.writeString(priGetTotal) - out.writeString(getExistsTime) - out.writeString(priGetExistsTime) - out.writeString(getExistsTotal) - out.writeString(priGetExistsTotal) - out.writeString(getMissingTime) - out.writeString(priGetMissingTime) - out.writeString(getMissingTotal) - out.writeString(priGetMissingTotal) - out.writeString(indexingDeleteCurrent) - out.writeString(priIndexingDeleteCurrent) - out.writeString(indexingDeleteTime) - out.writeString(priIndexingDeleteTime) - out.writeString(indexingDeleteTotal) - out.writeString(priIndexingDeleteTotal) - out.writeString(indexingIndexCurrent) - out.writeString(priIndexingIndexCurrent) - out.writeString(indexingIndexTime) - out.writeString(priIndexingIndexTime) - out.writeString(indexingIndexTotal) - out.writeString(priIndexingIndexTotal) - out.writeString(indexingIndexFailed) - out.writeString(priIndexingIndexFailed) - out.writeString(mergesCurrent) - out.writeString(priMergesCurrent) - out.writeString(mergesCurrentDocs) - out.writeString(priMergesCurrentDocs) - out.writeString(mergesCurrentSize) - out.writeString(priMergesCurrentSize) - out.writeString(mergesTotal) - out.writeString(priMergesTotal) - out.writeString(mergesTotalDocs) - out.writeString(priMergesTotalDocs) - out.writeString(mergesTotalSize) - out.writeString(priMergesTotalSize) - out.writeString(mergesTotalTime) - out.writeString(priMergesTotalTime) - out.writeString(refreshTotal) - out.writeString(priRefreshTotal) - out.writeString(refreshTime) - out.writeString(priRefreshTime) - out.writeString(refreshExternalTotal) - out.writeString(priRefreshExternalTotal) - out.writeString(refreshExternalTime) - out.writeString(priRefreshExternalTime) - out.writeString(refreshListeners) - out.writeString(priRefreshListeners) - out.writeString(searchFetchCurrent) - out.writeString(priSearchFetchCurrent) - out.writeString(searchFetchTime) - out.writeString(priSearchFetchTime) - out.writeString(searchFetchTotal) - out.writeString(priSearchFetchTotal) - out.writeString(searchOpenContexts) - out.writeString(priSearchOpenContexts) - out.writeString(searchQueryCurrent) - out.writeString(priSearchQueryCurrent) - out.writeString(searchQueryTime) - out.writeString(priSearchQueryTime) - out.writeString(searchQueryTotal) - out.writeString(priSearchQueryTotal) - out.writeString(searchScrollCurrent) - out.writeString(priSearchScrollCurrent) - out.writeString(searchScrollTime) - out.writeString(priSearchScrollTime) - out.writeString(searchScrollTotal) - out.writeString(priSearchScrollTotal) - out.writeString(searchPointInTimeCurrent) - out.writeString(priSearchPointInTimeCurrent) - out.writeString(searchPointInTimeTime) - out.writeString(priSearchPointInTimeTime) - out.writeString(searchPointInTimeTotal) - out.writeString(priSearchPointInTimeTotal) - out.writeString(segmentsCount) - out.writeString(priSegmentsCount) - out.writeString(segmentsMemory) - out.writeString(priSegmentsMemory) - out.writeString(segmentsIndexWriterMemory) - out.writeString(priSegmentsIndexWriterMemory) - out.writeString(segmentsVersionMapMemory) - out.writeString(priSegmentsVersionMapMemory) - out.writeString(segmentsFixedBitsetMemory) - out.writeString(priSegmentsFixedBitsetMemory) - out.writeString(warmerCurrent) - out.writeString(priWarmerCurrent) - out.writeString(warmerTotal) - out.writeString(priWarmerTotal) - out.writeString(warmerTotalTime) - out.writeString(priWarmerTotalTime) - out.writeString(suggestCurrent) - out.writeString(priSuggestCurrent) - out.writeString(suggestTime) - out.writeString(priSuggestTime) - out.writeString(suggestTotal) - out.writeString(priSuggestTotal) - out.writeString(memoryTotal) - out.writeString(priMemoryTotal) - out.writeString(searchThrottled) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt deleted file mode 100644 index 12152e69d..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsHelpers.kt +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.clusterMetricsMonitorHelpers - -import org.opensearch.action.ActionRequest -import org.opensearch.action.ActionRequestValidationException -import org.opensearch.action.ValidateActions -import org.opensearch.action.admin.cluster.state.ClusterStateRequest -import org.opensearch.action.admin.cluster.state.ClusterStateResponse -import org.opensearch.action.admin.indices.stats.CommonStats -import org.opensearch.action.admin.indices.stats.IndicesStatsRequest -import org.opensearch.action.admin.indices.stats.IndicesStatsResponse -import org.opensearch.action.admin.indices.stats.ShardStats -import org.opensearch.alerting.util.IndexUtils.Companion.VALID_INDEX_NAME_REGEX -import org.opensearch.cluster.routing.UnassignedInfo -import org.opensearch.common.unit.TimeValue -import org.opensearch.core.action.ActionResponse -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentObject -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.cache.query.QueryCacheStats -import org.opensearch.index.engine.CommitStats -import org.opensearch.index.engine.Engine -import org.opensearch.index.engine.SegmentsStats -import org.opensearch.index.fielddata.FieldDataStats -import org.opensearch.index.flush.FlushStats -import org.opensearch.index.get.GetStats -import org.opensearch.index.merge.MergeStats -import org.opensearch.index.refresh.RefreshStats -import org.opensearch.index.search.stats.SearchStats -import org.opensearch.index.seqno.SeqNoStats -import org.opensearch.index.shard.DocsStats -import org.opensearch.index.store.StoreStats -import org.opensearch.search.suggest.completion.CompletionStats -import java.time.Instant -import java.util.Locale -import java.util.function.Function - -class CatShardsRequestWrapper(val pathParams: String = "") : ActionRequest() { - var clusterStateRequest: ClusterStateRequest = - ClusterStateRequest().clear().nodes(true).routingTable(true) - var indicesStatsRequest: IndicesStatsRequest = - IndicesStatsRequest().all() - var indicesList = arrayOf() - - init { - if (pathParams.isNotBlank()) { - indicesList = pathParams.split(",").toTypedArray() - - require(validate() == null) { - "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases." - } - - clusterStateRequest = clusterStateRequest.indices(*indicesList) - indicesStatsRequest = indicesStatsRequest.indices(*indicesList) - } - } - - override fun validate(): ActionRequestValidationException? { - var exception: ActionRequestValidationException? = null - if (pathParams.isNotBlank() && indicesList.any { !VALID_INDEX_NAME_REGEX.containsMatchIn(it) }) - exception = ValidateActions.addValidationError( - "The path parameters do not form a valid, comma-separated list of data streams, indices, or index aliases.", - exception - ) - return exception - } -} - -class CatShardsResponseWrapper( - stateResp: ClusterStateResponse, - indicesResp: IndicesStatsResponse -) : ActionResponse(), ToXContentObject { - var shardInfoList: List = listOf() - - init { - shardInfoList = compileShardInfo(stateResp, indicesResp) - } - - companion object { - const val WRAPPER_FIELD = "shards" - } - - override fun writeTo(out: StreamOutput) { - out.writeList(shardInfoList) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - builder.startArray(WRAPPER_FIELD) - shardInfoList.forEach { it.toXContent(builder, params) } - builder.endArray() - return builder.endObject() - } - - private fun getOrNull(stats: S?, accessor: Function, func: Function): Any? { - if (stats != null) { - val t: T? = accessor.apply(stats) - if (t != null) { - return func.apply(t) - } - } - return null - } - - private fun compileShardInfo( - stateResp: ClusterStateResponse, - indicesResp: IndicesStatsResponse - ): List { - val list = mutableListOf() - - for (shard in stateResp.state.routingTable.allShards()) { - val shardStats = indicesResp.asMap()[shard] - var commonStats: CommonStats? = null - var commitStats: CommitStats? = null - if (shardStats != null) { - commonStats = shardStats.stats - commitStats = shardStats.commitStats - } - - var shardInfo = ShardInfo( - index = shard.indexName, - shard = "${shard.id}", - primaryOrReplica = if (shard.primary()) "p" else "r", - state = shard.state().name, - docs = getOrNull(commonStats, CommonStats::getDocs, DocsStats::getCount)?.toString(), - store = getOrNull(commonStats, CommonStats::getStore, StoreStats::getSize)?.toString(), - id = null, // Added below - node = null, // Added below - completionSize = getOrNull(commonStats, CommonStats::getCompletion, CompletionStats::getSize)?.toString(), - fieldDataMemory = getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getMemorySize)?.toString(), - fieldDataEvictions = getOrNull(commonStats, CommonStats::getFieldData, FieldDataStats::getEvictions)?.toString(), - flushTotal = getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotal)?.toString(), - flushTotalTime = getOrNull(commonStats, CommonStats::getFlush, FlushStats::getTotalTime)?.toString(), - getCurrent = getOrNull(commonStats, CommonStats::getGet, GetStats::current)?.toString(), - getTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getTime)?.toString(), - getTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getCount)?.toString(), - getExistsTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsTime)?.toString(), - getExistsTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getExistsCount)?.toString(), - getMissingTime = getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingTime)?.toString(), - getMissingTotal = getOrNull(commonStats, CommonStats::getGet, GetStats::getMissingCount)?.toString(), - indexingDeleteCurrent = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteCurrent })?.toString(), - indexingDeleteTime = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteTime })?.toString(), - indexingDeleteTotal = getOrNull(commonStats, CommonStats::getIndexing, { it.total.deleteCount })?.toString(), - indexingIndexCurrent = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexCurrent })?.toString(), - indexingIndexTime = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexTime })?.toString(), - indexingIndexTotal = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexCount })?.toString(), - indexingIndexFailed = getOrNull(commonStats, CommonStats::getIndexing, { it.total.indexFailedCount })?.toString(), - mergesCurrent = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrent)?.toString(), - mergesCurrentDocs = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentNumDocs)?.toString(), - mergesCurrentSize = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getCurrentSize)?.toString(), - mergesTotal = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotal)?.toString(), - mergesTotalDocs = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalNumDocs)?.toString(), - mergesTotalSize = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalSize)?.toString(), - mergesTotalTime = getOrNull(commonStats, CommonStats::getMerge, MergeStats::getTotalTime)?.toString(), - queryCacheMemory = getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getMemorySize)?.toString(), - queryCacheEvictions = getOrNull(commonStats, CommonStats::getQueryCache, QueryCacheStats::getEvictions)?.toString(), - recoverySourceType = null, // Added below - refreshTotal = getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotal)?.toString(), - refreshTime = getOrNull(commonStats, CommonStats::getRefresh, RefreshStats::getTotalTime)?.toString(), - searchFetchCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchCurrent })?.toString(), - searchFetchTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchTime })?.toString(), - searchFetchTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.fetchCount })?.toString(), - searchOpenContexts = getOrNull(commonStats, CommonStats::getSearch, SearchStats::getOpenContexts)?.toString(), - searchQueryCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryCurrent })?.toString(), - searchQueryTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryTime })?.toString(), - searchQueryTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.queryCount })?.toString(), - searchScrollCurrent = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollCurrent })?.toString(), - searchScrollTime = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollTime })?.toString(), - searchScrollTotal = getOrNull(commonStats, CommonStats::getSearch, { it.total.scrollCount })?.toString(), - segmentsCount = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)?.toString(), - segmentsMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)?.toString(), - segmentsIndexWriterMemory = - getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getIndexWriterMemory)?.toString(), - segmentsVersionMapMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getVersionMapMemory)?.toString(), - fixedBitsetMemory = getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getBitsetMemory)?.toString(), - globalCheckpoint = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getGlobalCheckpoint)?.toString(), - localCheckpoint = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getLocalCheckpoint)?.toString(), - maxSeqNo = getOrNull(shardStats, ShardStats::getSeqNoStats, SeqNoStats::getMaxSeqNo)?.toString(), - syncId = commitStats?.userData?.get(Engine.SYNC_COMMIT_ID), - unassignedAt = null, // Added below - unassignedDetails = null, // Added below - unassignedFor = null, // Added below - unassignedReason = null // Added below - ) - - if (shard.assignedToNode()) { - val id = shard.currentNodeId() - val node = StringBuilder() - node.append(stateResp.state.nodes().get(id).name) - - if (shard.relocating()) { - val reloNodeId = shard.relocatingNodeId() - val reloName = stateResp.state.nodes().get(reloNodeId).name - node.append(" -> ") - node.append(reloNodeId) - node.append(" ") - node.append(reloName) - } - - shardInfo = shardInfo.copy( - id = id, - node = node.toString() - ) - } - - if (shard.unassignedInfo() != null) { - val unassignedTime = Instant.ofEpochMilli(shard.unassignedInfo().unassignedTimeInMillis) - shardInfo = shardInfo.copy( - unassignedReason = shard.unassignedInfo().reason.name, - unassignedAt = UnassignedInfo.DATE_TIME_FORMATTER.format(unassignedTime), - unassignedFor = - TimeValue.timeValueMillis(System.currentTimeMillis() - shard.unassignedInfo().unassignedTimeInMillis).stringRep, - unassignedDetails = shard.unassignedInfo().details - ) - } - - if (shard.recoverySource() != null) { - shardInfo = shardInfo.copy( - recoverySourceType = shard.recoverySource().type.toString().lowercase(Locale.ROOT) - ) - } - - list.add(shardInfo) - } - return list - } - - data class ShardInfo( - val index: String?, - val shard: String?, - val primaryOrReplica: String?, - val state: String?, - val docs: String?, - val store: String?, - val id: String?, - val node: String?, - val completionSize: String?, - val fieldDataMemory: String?, - val fieldDataEvictions: String?, - val flushTotal: String?, - val flushTotalTime: String?, - val getCurrent: String?, - val getTime: String?, - val getTotal: String?, - val getExistsTime: String?, - val getExistsTotal: String?, - val getMissingTime: String?, - val getMissingTotal: String?, - val indexingDeleteCurrent: String?, - val indexingDeleteTime: String?, - val indexingDeleteTotal: String?, - val indexingIndexCurrent: String?, - val indexingIndexTime: String?, - val indexingIndexTotal: String?, - val indexingIndexFailed: String?, - val mergesCurrent: String?, - val mergesCurrentDocs: String?, - val mergesCurrentSize: String?, - val mergesTotal: String?, - val mergesTotalDocs: String?, - val mergesTotalSize: String?, - val mergesTotalTime: String?, - val queryCacheMemory: String?, - val queryCacheEvictions: String?, - val recoverySourceType: String?, - val refreshTotal: String?, - val refreshTime: String?, - val searchFetchCurrent: String?, - val searchFetchTime: String?, - val searchFetchTotal: String?, - val searchOpenContexts: String?, - val searchQueryCurrent: String?, - val searchQueryTime: String?, - val searchQueryTotal: String?, - val searchScrollCurrent: String?, - val searchScrollTime: String?, - val searchScrollTotal: String?, - val segmentsCount: String?, - val segmentsMemory: String?, - val segmentsIndexWriterMemory: String?, - val segmentsVersionMapMemory: String?, - val fixedBitsetMemory: String?, - val globalCheckpoint: String?, - val localCheckpoint: String?, - val maxSeqNo: String?, - val syncId: String?, - val unassignedAt: String?, - val unassignedDetails: String?, - val unassignedFor: String?, - val unassignedReason: String? - ) : ToXContentObject, Writeable { - companion object { - const val INDEX_FIELD = "index" - const val SHARD_FIELD = "shard" - const val PRIMARY_OR_REPLICA_FIELD = "primaryOrReplica" - const val STATE_FIELD = "state" - const val DOCS_FIELD = "docs" - const val STORE_FIELD = "store" - const val ID_FIELD = "id" - const val NODE_FIELD = "node" - const val COMPLETION_SIZE_FIELD = "completionSize" - const val FIELD_DATA_MEMORY_FIELD = "fielddataMemory" - const val FIELD_DATA_EVICTIONS_FIELD = "fielddataEvictions" - const val FLUSH_TOTAL_FIELD = "flushTotal" - const val FLUSH_TOTAL_TIME_FIELD = "flushTotalTime" - const val GET_CURRENT_FIELD = "getCurrent" - const val GET_TIME_FIELD = "getTime" - const val GET_TOTAL_FIELD = "getTotal" - const val GET_EXISTS_TIME_FIELD = "getExistsTime" - const val GET_EXISTS_TOTAL_FIELD = "getExistsTotal" - const val GET_MISSING_TIME_FIELD = "getMissingTime" - const val GET_MISSING_TOTAL_FIELD = "getMissingTotal" - const val INDEXING_DELETE_CURRENT_FIELD = "indexingDeleteCurrent" - const val INDEXING_DELETE_TIME_FIELD = "indexingDeleteTime" - const val INDEXING_DELETE_TOTAL_FIELD = "indexingDeleteTotal" - const val INDEXING_INDEX_CURRENT_FIELD = "indexingIndexCurrent" - const val INDEXING_INDEX_TIME_FIELD = "indexingIndexTime" - const val INDEXING_INDEX_TOTAL_FIELD = "indexingIndexTotal" - const val INDEXING_INDEX_FAILED_FIELD = "indexingIndexFailed" - const val MERGES_CURRENT_FIELD = "mergesCurrent" - const val MERGES_CURRENT_DOCS_FIELD = "mergesCurrentDocs" - const val MERGES_CURRENT_SIZE_FIELD = "mergesCurrentSize" - const val MERGES_TOTAL_FIELD = "mergesTotal" - const val MERGES_TOTAL_DOCS_FIELD = "mergesTotalDocs" - const val MERGES_TOTAL_SIZE_FIELD = "mergesTotalSize" - const val MERGES_TOTAL_TIME_FIELD = "mergesTotalTime" - const val QUERY_CACHE_MEMORY_FIELD = "queryCacheMemory" - const val QUERY_CACHE_EVICTIONS_FIELD = "queryCacheEvictions" - const val RECOVERY_SOURCE_TYPE_FIELD = "recoverysource.type" - const val REFRESH_TOTAL_FIELD = "refreshTotal" - const val REFRESH_TIME_FIELD = "refreshTime" - const val SEARCH_FETCH_CURRENT_FIELD = "searchFetchCurrent" - const val SEARCH_FETCH_TIME_FIELD = "searchFetchTime" - const val SEARCH_FETCH_TOTAL_FIELD = "searchFetchTotal" - const val SEARCH_OPEN_CONTEXTS_FIELD = "searchOpenContexts" - const val SEARCH_QUERY_CURRENT_FIELD = "searchQueryCurrent" - const val SEARCH_QUERY_TIME_FIELD = "searchQueryTime" - const val SEARCH_QUERY_TOTAL_FIELD = "searchQueryTotal" - const val SEARCH_SCROLL_CURRENT_FIELD = "searchScrollCurrent" - const val SEARCH_SCROLL_TIME_FIELD = "searchScrollTime" - const val SEARCH_SCROLL_TOTAL_FIELD = "searchScrollTotal" - const val SEGMENTS_COUNT_FIELD = "segmentsCount" - const val SEGMENTS_MEMORY_FIELD = "segmentsMemory" - const val SEGMENTS_INDEX_WRITER_MEMORY_FIELD = "segmentsIndexWriterMemory" - const val SEGMENTS_VERSION_MAP_MEMORY_FIELD = "segmentsVersionMapMemory" - const val FIXED_BITSET_MEMORY_FIELD = "fixedBitsetMemory" - const val GLOBAL_CHECKPOINT_FIELD = "globalCheckpoint" - const val LOCAL_CHECKPOINT_FIELD = "localCheckpoint" - const val MAX_SEQ_NO_FIELD = "maxSeqNo" - const val SYNC_ID_FIELD = "sync_id" - const val UNASSIGNED_AT_FIELD = "unassigned.at" - const val UNASSIGNED_DETAILS_FIELD = "unassigned.details" - const val UNASSIGNED_FOR_FIELD = "unassigned.for" - const val UNASSIGNED_REASON_FIELD = "unassigned.reason" - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.startObject() - .field(INDEX_FIELD, index) - .field(SHARD_FIELD, shard) - .field(PRIMARY_OR_REPLICA_FIELD, primaryOrReplica) - .field(STATE_FIELD, state) - .field(DOCS_FIELD, docs) - .field(STORE_FIELD, store) - .field(ID_FIELD, id) - .field(NODE_FIELD, node) - .field(COMPLETION_SIZE_FIELD, completionSize) - .field(FIELD_DATA_MEMORY_FIELD, fieldDataMemory) - .field(FIELD_DATA_EVICTIONS_FIELD, fieldDataEvictions) - .field(FLUSH_TOTAL_FIELD, flushTotal) - .field(FLUSH_TOTAL_TIME_FIELD, flushTotalTime) - .field(GET_CURRENT_FIELD, getCurrent) - .field(GET_TIME_FIELD, getTime) - .field(GET_TOTAL_FIELD, getTotal) - .field(GET_EXISTS_TIME_FIELD, getExistsTime) - .field(GET_EXISTS_TOTAL_FIELD, getExistsTotal) - .field(GET_MISSING_TIME_FIELD, getMissingTime) - .field(GET_MISSING_TOTAL_FIELD, getMissingTotal) - .field(INDEXING_DELETE_CURRENT_FIELD, indexingDeleteCurrent) - .field(INDEXING_DELETE_TIME_FIELD, indexingDeleteTime) - .field(INDEXING_DELETE_TOTAL_FIELD, indexingDeleteTotal) - .field(INDEXING_INDEX_CURRENT_FIELD, indexingIndexCurrent) - .field(INDEXING_INDEX_TIME_FIELD, indexingIndexTime) - .field(INDEXING_INDEX_TOTAL_FIELD, indexingIndexTotal) - .field(INDEXING_INDEX_FAILED_FIELD, indexingIndexFailed) - .field(MERGES_CURRENT_FIELD, mergesCurrent) - .field(MERGES_CURRENT_DOCS_FIELD, mergesCurrentDocs) - .field(MERGES_CURRENT_SIZE_FIELD, mergesCurrentSize) - .field(MERGES_TOTAL_FIELD, mergesTotal) - .field(MERGES_TOTAL_DOCS_FIELD, mergesTotalDocs) - .field(MERGES_TOTAL_SIZE_FIELD, mergesTotalSize) - .field(MERGES_TOTAL_TIME_FIELD, mergesTotalTime) - .field(QUERY_CACHE_MEMORY_FIELD, queryCacheMemory) - .field(QUERY_CACHE_EVICTIONS_FIELD, queryCacheEvictions) - .field(RECOVERY_SOURCE_TYPE_FIELD, recoverySourceType) - .field(REFRESH_TOTAL_FIELD, refreshTotal) - .field(REFRESH_TIME_FIELD, refreshTime) - .field(SEARCH_FETCH_CURRENT_FIELD, searchFetchCurrent) - .field(SEARCH_FETCH_TIME_FIELD, searchFetchTime) - .field(SEARCH_FETCH_TOTAL_FIELD, searchFetchTotal) - .field(SEARCH_OPEN_CONTEXTS_FIELD, searchOpenContexts) - .field(SEARCH_QUERY_CURRENT_FIELD, searchQueryCurrent) - .field(SEARCH_QUERY_TIME_FIELD, searchQueryTime) - .field(SEARCH_QUERY_TOTAL_FIELD, searchQueryTotal) - .field(SEARCH_SCROLL_CURRENT_FIELD, searchScrollCurrent) - .field(SEARCH_SCROLL_TIME_FIELD, searchScrollTime) - .field(SEARCH_SCROLL_TOTAL_FIELD, searchScrollTotal) - .field(SEGMENTS_COUNT_FIELD, segmentsCount) - .field(SEGMENTS_MEMORY_FIELD, segmentsMemory) - .field(SEGMENTS_INDEX_WRITER_MEMORY_FIELD, segmentsIndexWriterMemory) - .field(SEGMENTS_VERSION_MAP_MEMORY_FIELD, segmentsVersionMapMemory) - .field(FIXED_BITSET_MEMORY_FIELD, fixedBitsetMemory) - .field(GLOBAL_CHECKPOINT_FIELD, globalCheckpoint) - .field(LOCAL_CHECKPOINT_FIELD, localCheckpoint) - .field(MAX_SEQ_NO_FIELD, maxSeqNo) - .field(SYNC_ID_FIELD, syncId) - .field(UNASSIGNED_AT_FIELD, unassignedAt) - .field(UNASSIGNED_DETAILS_FIELD, unassignedDetails) - .field(UNASSIGNED_FOR_FIELD, unassignedFor) - .field(UNASSIGNED_REASON_FIELD, unassignedReason) - return builder.endObject() - } - - override fun writeTo(out: StreamOutput) { - out.writeString(index) - out.writeString(shard) - out.writeString(primaryOrReplica) - out.writeString(state) - out.writeString(docs) - out.writeString(store) - out.writeString(id) - out.writeString(node) - out.writeString(completionSize) - out.writeString(fieldDataMemory) - out.writeString(fieldDataEvictions) - out.writeString(flushTotal) - out.writeString(flushTotalTime) - out.writeString(getCurrent) - out.writeString(getTime) - out.writeString(getTotal) - out.writeString(getExistsTime) - out.writeString(getExistsTotal) - out.writeString(getMissingTime) - out.writeString(getMissingTotal) - out.writeString(indexingDeleteCurrent) - out.writeString(indexingDeleteTime) - out.writeString(indexingDeleteTotal) - out.writeString(indexingIndexCurrent) - out.writeString(indexingIndexTime) - out.writeString(indexingIndexTotal) - out.writeString(indexingIndexFailed) - out.writeString(mergesCurrent) - out.writeString(mergesCurrentDocs) - out.writeString(mergesCurrentSize) - out.writeString(mergesTotal) - out.writeString(mergesTotalDocs) - out.writeString(mergesTotalSize) - out.writeString(mergesTotalTime) - out.writeString(queryCacheMemory) - out.writeString(queryCacheEvictions) - out.writeString(recoverySourceType) - out.writeString(refreshTotal) - out.writeString(refreshTime) - out.writeString(searchFetchCurrent) - out.writeString(searchFetchTime) - out.writeString(searchFetchTotal) - out.writeString(searchOpenContexts) - out.writeString(searchQueryCurrent) - out.writeString(searchQueryTime) - out.writeString(searchQueryTotal) - out.writeString(searchScrollCurrent) - out.writeString(searchScrollTime) - out.writeString(searchScrollTotal) - out.writeString(segmentsCount) - out.writeString(segmentsMemory) - out.writeString(segmentsIndexWriterMemory) - out.writeString(segmentsVersionMapMemory) - out.writeString(fixedBitsetMemory) - out.writeString(globalCheckpoint) - out.writeString(localCheckpoint) - out.writeString(maxSeqNo) - out.writeString(syncId) - out.writeString(unassignedAt) - out.writeString(unassignedDetails) - out.writeString(unassignedFor) - out.writeString(unassignedReason) - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt b/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt deleted file mode 100644 index 92605c652..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensions.kt +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.clusterMetricsMonitorHelpers - -import org.opensearch.action.admin.cluster.health.ClusterHealthRequest -import org.opensearch.action.admin.cluster.health.ClusterHealthResponse -import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest -import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse -import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest -import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse -import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse -import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest -import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse -import org.opensearch.action.admin.cluster.state.ClusterStateRequest -import org.opensearch.action.admin.cluster.state.ClusterStateResponse -import org.opensearch.action.admin.cluster.stats.ClusterStatsRequest -import org.opensearch.action.admin.cluster.stats.ClusterStatsResponse -import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksRequest -import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse -import org.opensearch.action.admin.indices.recovery.RecoveryRequest -import org.opensearch.action.admin.indices.recovery.RecoveryResponse -import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse -import org.opensearch.action.admin.indices.stats.IndicesStatsResponse -import org.opensearch.alerting.opensearchapi.convertToMap -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.settings.SupportedClusterMetricsSettings -import org.opensearch.alerting.settings.SupportedClusterMetricsSettings.Companion.resolveToActionRequest -import org.opensearch.client.Client -import org.opensearch.cluster.metadata.Metadata -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.support.XContentMapValues -import org.opensearch.commons.alerting.model.ClusterMetricsInput -import org.opensearch.core.action.ActionResponse - -/** - * Calls the appropriate transport action for the API requested in the [clusterMetricsInput]. - * @param clusterMetricsInput The [ClusterMetricsInput] to resolve. - * @param client The [Client] used to call the respective transport action. - * @throws IllegalArgumentException When the requested API is not supported by this feature. - */ -suspend fun executeTransportAction(clusterMetricsInput: ClusterMetricsInput, client: Client): ActionResponse { - val request = resolveToActionRequest(clusterMetricsInput) - return when (clusterMetricsInput.clusterMetricType) { - ClusterMetricsInput.ClusterMetricType.CAT_INDICES -> { - request as CatIndicesRequestWrapper - val healthResponse: ClusterHealthResponse = client.suspendUntil { admin().cluster().health(request.clusterHealthRequest, it) } - val indexSettingsResponse: GetSettingsResponse = - client.suspendUntil { admin().indices().getSettings(request.indexSettingsRequest, it) } - val indicesResponse: IndicesStatsResponse = - client.suspendUntil { admin().indices().stats(request.indicesStatsRequest, it) } - val stateResponse: ClusterStateResponse = - client.suspendUntil { admin().cluster().state(request.clusterStateRequest, it) } - return CatIndicesResponseWrapper(healthResponse, stateResponse, indexSettingsResponse, indicesResponse) - } - ClusterMetricsInput.ClusterMetricType.CAT_PENDING_TASKS -> - client.suspendUntil { - admin().cluster().pendingClusterTasks(request as PendingClusterTasksRequest, it) - } - ClusterMetricsInput.ClusterMetricType.CAT_RECOVERY -> - client.suspendUntil { admin().indices().recoveries(request as RecoveryRequest, it) } - ClusterMetricsInput.ClusterMetricType.CAT_SHARDS -> { - request as CatShardsRequestWrapper - val stateResponse: ClusterStateResponse = - client.suspendUntil { admin().cluster().state(request.clusterStateRequest, it) } - val indicesResponse: IndicesStatsResponse = - client.suspendUntil { admin().indices().stats(request.indicesStatsRequest, it) } - return CatShardsResponseWrapper(stateResponse, indicesResponse) - } - ClusterMetricsInput.ClusterMetricType.CAT_SNAPSHOTS -> - client.suspendUntil { admin().cluster().getSnapshots(request as GetSnapshotsRequest, it) } - ClusterMetricsInput.ClusterMetricType.CAT_TASKS -> - client.suspendUntil { admin().cluster().listTasks(request as ListTasksRequest, it) } - ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH -> - client.suspendUntil { admin().cluster().health(request as ClusterHealthRequest, it) } - ClusterMetricsInput.ClusterMetricType.CLUSTER_SETTINGS -> { - val stateResponse: ClusterStateResponse = - client.suspendUntil { admin().cluster().state(request as ClusterStateRequest, it) } - val metadata: Metadata = stateResponse.state.metadata - return ClusterGetSettingsResponse(metadata.persistentSettings(), metadata.transientSettings(), Settings.EMPTY) - } - ClusterMetricsInput.ClusterMetricType.CLUSTER_STATS -> - client.suspendUntil { admin().cluster().clusterStats(request as ClusterStatsRequest, it) } - ClusterMetricsInput.ClusterMetricType.NODES_STATS -> - client.suspendUntil { admin().cluster().nodesStats(request as NodesStatsRequest, it) } - else -> throw IllegalArgumentException("Unsupported API request type: ${request.javaClass.name}") - } -} - -/** - * Populates a [HashMap] with the values in the [ActionResponse]. - * @return The [ActionResponse] values formatted in a [HashMap]. - * @throws IllegalArgumentException when the [ActionResponse] is not supported by this feature. - */ -fun ActionResponse.toMap(): Map { - return when (this) { - is ClusterHealthResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH.defaultPath) - ) - is ClusterStatsResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_STATS.defaultPath) - ) - is ClusterGetSettingsResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CLUSTER_SETTINGS.defaultPath) - ) - is CatIndicesResponseWrapper -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_INDICES.defaultPath) - ) - is CatShardsResponseWrapper -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_SHARDS.defaultPath) - ) - is NodesStatsResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.NODES_STATS.defaultPath) - ) - is PendingClusterTasksResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_PENDING_TASKS.defaultPath) - ) - is RecoveryResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_RECOVERY.defaultPath) - ) - is GetSnapshotsResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_SNAPSHOTS.defaultPath) - ) - is ListTasksResponse -> redactFieldsFromResponse( - this.convertToMap(), - SupportedClusterMetricsSettings.getSupportedJsonPayload(ClusterMetricsInput.ClusterMetricType.CAT_TASKS.defaultPath) - ) - else -> throw IllegalArgumentException("Unsupported ActionResponse type: ${this.javaClass.name}") - } -} - -/** - * Populates a [HashMap] with only the values that support being exposed to users. - * @param mappedActionResponse The response from the [ClusterMetricsInput] API call. - * @param supportedJsonPayload The JSON payload as configured in [SupportedClusterMetricsSettings.RESOURCE_FILE]. - * @return The response values [HashMap] without the redacted fields. - */ -@Suppress("UNCHECKED_CAST") -fun redactFieldsFromResponse( - mappedActionResponse: Map, - supportedJsonPayload: Map> -): Map { - return when { - supportedJsonPayload.isEmpty() -> mappedActionResponse - else -> { - val output = hashMapOf() - for ((key, value) in supportedJsonPayload) { - when (val mappedValue = mappedActionResponse[key]) { - is Map<*, *> -> output[key] = XContentMapValues.filter( - mappedActionResponse[key] as MutableMap?, - value.toTypedArray(), - arrayOf() - ) - else -> output[key] = mappedValue ?: hashMapOf() - } - } - output - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt deleted file mode 100644 index 667548c60..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationConversionUtils.kt +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.destinationmigration - -import org.apache.http.client.utils.URIBuilder -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.Recipient -import org.opensearch.alerting.util.DestinationType -import org.opensearch.commons.notifications.model.Chime -import org.opensearch.commons.notifications.model.ConfigType -import org.opensearch.commons.notifications.model.Email -import org.opensearch.commons.notifications.model.EmailGroup -import org.opensearch.commons.notifications.model.EmailRecipient -import org.opensearch.commons.notifications.model.HttpMethodType -import org.opensearch.commons.notifications.model.MethodType -import org.opensearch.commons.notifications.model.NotificationConfig -import org.opensearch.commons.notifications.model.Slack -import org.opensearch.commons.notifications.model.SmtpAccount -import org.opensearch.commons.notifications.model.Webhook -import org.opensearch.core.common.Strings -import java.net.URI -import java.net.URISyntaxException -import java.util.Locale - -class DestinationConversionUtils { - - companion object { - - fun convertDestinationToNotificationConfig(destination: Destination): NotificationConfig? { - when (destination.type) { - DestinationType.CHIME -> { - val alertChime = destination.chime ?: return null - val chime = Chime(alertChime.url) - val description = "Chime destination created from the Alerting plugin" - return NotificationConfig( - destination.name, - description, - ConfigType.CHIME, - chime - ) - } - DestinationType.SLACK -> { - val alertSlack = destination.slack ?: return null - val slack = Slack(alertSlack.url) - val description = "Slack destination created from the Alerting plugin" - return NotificationConfig( - destination.name, - description, - ConfigType.SLACK, - slack - ) - } - // TODO: Add this back after adding SNS to Destination data models -// DestinationType.SNS -> { -// val alertSNS = destination.sns ?: return null -// val sns = Sns(alertSNS.topicARN, alertSNS.roleARN) -// val description = "SNS destination created from the Alerting plugin" -// return NotificationConfig( -// destination.name, -// description, -// ConfigType.SNS, -// sns -// ) -// } - DestinationType.CUSTOM_WEBHOOK -> { - val alertWebhook = destination.customWebhook ?: return null - val uri = buildUri( - alertWebhook.url, - alertWebhook.scheme, - alertWebhook.host, - alertWebhook.port, - alertWebhook.path, - alertWebhook.queryParams - ).toString() - val methodType = when (alertWebhook.method?.uppercase(Locale.ENGLISH)) { - "POST" -> HttpMethodType.POST - "PUT" -> HttpMethodType.PUT - "PATCH" -> HttpMethodType.PATCH - else -> HttpMethodType.POST - } - val webhook = Webhook(uri, alertWebhook.headerParams, methodType) - val description = "Webhook destination created from the Alerting plugin" - return NotificationConfig( - destination.name, - description, - ConfigType.WEBHOOK, - webhook - ) - } - DestinationType.EMAIL -> { - val alertEmail = destination.email ?: return null - val recipients = mutableListOf() - val emailGroupIds = mutableListOf() - alertEmail.recipients.forEach { - if (it.type == Recipient.RecipientType.EMAIL_GROUP) { - it.emailGroupID?.let { emailGroup -> emailGroupIds.add(emailGroup) } - } else it.email?.let { emailRecipient -> recipients.add(EmailRecipient(emailRecipient)) } - } - - val email = Email(alertEmail.emailAccountID, recipients, emailGroupIds) - val description = "Email destination created from the Alerting plugin" - return NotificationConfig( - destination.name, - description, - ConfigType.EMAIL, - email - ) - } - else -> return null - } - } - - fun convertEmailAccountToNotificationConfig(emailAccount: EmailAccount): NotificationConfig { - val methodType = convertAlertingToNotificationMethodType(emailAccount.method) - val smtpAccount = SmtpAccount(emailAccount.host, emailAccount.port, methodType, emailAccount.email) - val description = "Email account created from the Alerting plugin" - return NotificationConfig( - emailAccount.name, - description, - ConfigType.SMTP_ACCOUNT, - smtpAccount - ) - } - - fun convertEmailGroupToNotificationConfig( - emailGroup: org.opensearch.alerting.model.destination.email.EmailGroup - ): NotificationConfig { - val recipients = mutableListOf() - emailGroup.emails.forEach { - recipients.add(EmailRecipient(it.email)) - } - val notificationEmailGroup = EmailGroup(recipients) - - val description = "Email group created from the Alerting plugin" - return NotificationConfig( - emailGroup.name, - description, - ConfigType.EMAIL_GROUP, - notificationEmailGroup - ) - } - - private fun buildUri( - endpoint: String?, - scheme: String?, - host: String?, - port: Int, - path: String?, - queryParams: Map - ): URI? { - return try { - if (Strings.isNullOrEmpty(endpoint)) { - if (host == null) { - throw IllegalStateException("No host was provided when endpoint was null") - } - var uriScheme = scheme - if (Strings.isNullOrEmpty(scheme)) { - uriScheme = "https" - } - val uriBuilder = URIBuilder() - if (queryParams.isNotEmpty()) { - for ((key, value) in queryParams) uriBuilder.addParameter(key, value) - } - return uriBuilder.setScheme(uriScheme).setHost(host).setPort(port).setPath(path).build() - } - URIBuilder(endpoint).build() - } catch (e: URISyntaxException) { - throw IllegalStateException("Error creating URI", e) - } - } - - fun convertAlertingToNotificationMethodType(alertMethodType: EmailAccount.MethodType): MethodType { - return when (alertMethodType) { - EmailAccount.MethodType.NONE -> MethodType.NONE - EmailAccount.MethodType.SSL -> MethodType.SSL - EmailAccount.MethodType.TLS -> MethodType.START_TLS - } - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt deleted file mode 100644 index 82891396e..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationCoordinator.kt +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.destinationmigration - -import kotlinx.coroutines.CoroutineName -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.launch -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.client.Client -import org.opensearch.client.node.NodeClient -import org.opensearch.cluster.ClusterChangedEvent -import org.opensearch.cluster.ClusterStateListener -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.lifecycle.LifecycleListener -import org.opensearch.common.unit.TimeValue -import org.opensearch.threadpool.Scheduler -import org.opensearch.threadpool.ThreadPool -import kotlin.coroutines.CoroutineContext - -class DestinationMigrationCoordinator( - private val client: Client, - private val clusterService: ClusterService, - private val threadPool: ThreadPool, - private val scheduledJobIndices: ScheduledJobIndices -) : ClusterStateListener, CoroutineScope, LifecycleListener() { - - private val logger = LogManager.getLogger(javaClass) - - override val coroutineContext: CoroutineContext - get() = Dispatchers.Default + CoroutineName("DestinationMigrationCoordinator") - - private var scheduledMigration: Scheduler.Cancellable? = null - - @Volatile - private var runningLock = false - - init { - clusterService.addListener(this) - clusterService.addLifecycleListener(this) - } - - override fun clusterChanged(event: ClusterChangedEvent) { - logger.info("Detected cluster change event for destination migration") - if (DestinationMigrationUtilService.finishFlag) { - logger.info("Reset destination migration process.") - scheduledMigration?.cancel() - DestinationMigrationUtilService.finishFlag = false - } - if ( - event.localNodeClusterManager() && - !runningLock && - (scheduledMigration == null || scheduledMigration!!.isCancelled) - ) { - try { - runningLock = true - initMigrateDestinations() - } finally { - runningLock = false - } - } else if (!event.localNodeClusterManager()) { - scheduledMigration?.cancel() - } - } - - private fun initMigrateDestinations() { - if (!scheduledJobIndices.scheduledJobIndexExists()) { - logger.debug("Alerting config index is not initialized") - scheduledMigration?.cancel() - return - } - - if (!clusterService.state().nodes().isLocalNodeElectedMaster) { - scheduledMigration?.cancel() - return - } - - if (DestinationMigrationUtilService.finishFlag) { - logger.info("Destination migration is already complete, cancelling migration process.") - scheduledMigration?.cancel() - return - } - - val scheduledJob = Runnable { - launch { - try { - if (DestinationMigrationUtilService.finishFlag) { - logger.info("Cancel background destination migration process.") - scheduledMigration?.cancel() - } - - logger.info("Performing migration of destination data.") - DestinationMigrationUtilService.migrateDestinations(client as NodeClient) - } catch (e: Exception) { - logger.error("Failed to migrate destination data", e) - } - } - } - - scheduledMigration = threadPool.scheduleWithFixedDelay(scheduledJob, TimeValue.timeValueMinutes(1), ThreadPool.Names.MANAGEMENT) - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt deleted file mode 100644 index f4c650a9f..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilService.kt +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.destinationmigration - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.delete.DeleteRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertDestinationToNotificationConfig -import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertEmailAccountToNotificationConfig -import org.opensearch.alerting.util.destinationmigration.DestinationConversionUtils.Companion.convertEmailGroupToNotificationConfig -import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils.Companion.createNotificationConfig -import org.opensearch.client.node.NodeClient -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.ConfigConstants -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.notifications.action.CreateNotificationConfigRequest -import org.opensearch.commons.notifications.model.NotificationConfig -import org.opensearch.commons.notifications.model.NotificationConfigInfo -import org.opensearch.core.common.Strings -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.fetch.subphase.FetchSourceContext -import java.time.Instant - -class DestinationMigrationUtilService { - - companion object { - - private val logger = LogManager.getLogger(DestinationMigrationUtilService::class) - - @Volatile - private var runningLock = false // In case 2 migrateDestinations() processes are running - - // Used in DestinationMigrationCoordinator to cancel scheduled process - @Volatile - var finishFlag = false - internal set - - suspend fun migrateDestinations(client: NodeClient) { - if (runningLock) { - logger.info("There is already a migrate destination process running...") - return - } else if (finishFlag) { - logger.info("Destination migration has finished.") - return - } - try { - runningLock = true - - val emailAccountsToMigrate = retrieveConfigsToMigrate(client, "email_account") - val emailGroupsToMigrate = retrieveConfigsToMigrate(client, "email_group") - val destinationsToMigrate = retrieveConfigsToMigrate(client, "destination") - val configsToMigrate = emailAccountsToMigrate + emailGroupsToMigrate + destinationsToMigrate - logger.info( - "Need to migrate ${emailAccountsToMigrate.size} email accounts, " + - "${emailGroupsToMigrate.size} email groups and " + - "${destinationsToMigrate.size} destinations " + - "(${configsToMigrate.size} configs total)" - ) - if (configsToMigrate.isEmpty()) { - finishFlag = true - runningLock = false - return - } - val migratedConfigs = createNotificationChannelIfNotExists(client, configsToMigrate) - logger.info("Migrated ${migratedConfigs.size} configs") - val failedDeleteConfigs = deleteOldDestinations(client, migratedConfigs) - logger.info("Failed to delete ${failedDeleteConfigs.size} configs from migration process cleanup") - } finally { - runningLock = false - } - } - - private suspend fun deleteOldDestinations(client: NodeClient, destinationIds: List): List { - val bulkDeleteRequest = BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - destinationIds.forEach { - val deleteRequest = DeleteRequest(ScheduledJob.SCHEDULED_JOBS_INDEX, it) - bulkDeleteRequest.add(deleteRequest) - } - - val failedToDeleteDestinations = mutableListOf() - try { - val bulkResponse: BulkResponse = client.suspendUntil { client.bulk(bulkDeleteRequest, it) } - failedToDeleteDestinations.addAll(bulkResponse.items.filter { it.isFailed }.map { it.id }) - } catch (e: Exception) { - logger.error("Failed to delete all destinations", e) - failedToDeleteDestinations.addAll(destinationIds) - } - return failedToDeleteDestinations - } - - private suspend fun createNotificationChannelIfNotExists( - client: NodeClient, - notificationConfigInfoList: List> - ): List { - val migratedNotificationConfigs = mutableListOf() - notificationConfigInfoList.forEach { - val notificationConfigInfo = it.first - val userStr = it.second - val createNotificationConfigRequest = CreateNotificationConfigRequest( - notificationConfigInfo.notificationConfig, - notificationConfigInfo.configId - ) - try { - // TODO: recreate user object to pass along the same permissions. Make sure this works when user based security is removed - client.threadPool().threadContext.stashContext().use { - if (userStr.isNotBlank()) { - client.threadPool().threadContext - .putTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT, userStr) - } - val createResponse = createNotificationConfig(client, createNotificationConfigRequest) - migratedNotificationConfigs.add(createResponse.configId) - logger.debug(("Migrated destination: ${createResponse.configId}")) - } - } catch (e: Exception) { - if (e.message?.contains("version conflict, document already exists") == true) { - migratedNotificationConfigs.add(notificationConfigInfo.configId) - } else { - logger.warn( - "Failed to migrate over Destination ${notificationConfigInfo.configId} because failed to " + - "create channel in Notification plugin.", - e - ) - } - } - } - return migratedNotificationConfigs - } - - private suspend fun retrieveConfigsToMigrate(client: NodeClient, configName: String): List> { - var start = 0 - val size = 100 - val notificationConfigInfoList = mutableListOf>() - var hasMoreResults = true - - while (hasMoreResults) { - val searchSourceBuilder = SearchSourceBuilder() - .size(size) - .from(start) - .fetchSource(FetchSourceContext(true, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)) - .seqNoAndPrimaryTerm(true) - .version(true) - val queryBuilder = QueryBuilders.boolQuery() - .should(QueryBuilders.existsQuery(configName)) - searchSourceBuilder.query(queryBuilder) - - val searchRequest = SearchRequest() - .source(searchSourceBuilder) - .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) - val response: SearchResponse = client.suspendUntil { client.search(searchRequest, it) } - - if (response.status() != RestStatus.OK) { - logger.error("Failed to retrieve ${configName}s to migrate") - hasMoreResults = false - } else { - if (response.hits.hits.isEmpty()) { - hasMoreResults = false - } - for (hit in response.hits) { - val xcp = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, hit.sourceAsString) - var notificationConfig: NotificationConfig? = null - var userStr = "" - when (configName) { - "email_group" -> { - val emailGroup = EmailGroup.parseWithType(xcp, hit.id, hit.version) - notificationConfig = convertEmailGroupToNotificationConfig(emailGroup) - } - "email_account" -> { - val emailAccount = EmailAccount.parseWithType(xcp, hit.id, hit.version) - notificationConfig = convertEmailAccountToNotificationConfig(emailAccount) - } - "destination" -> { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val destination = Destination.parse( - xcp, - hit.id, - hit.version, - hit.seqNo.toInt(), - hit.primaryTerm.toInt() - ) - userStr = destination.user.toString() - notificationConfig = convertDestinationToNotificationConfig(destination) - } - else -> logger.info("Unrecognized config name [$configName] to migrate") - } - - if (notificationConfig != null) { - notificationConfigInfoList.add( - Pair( - NotificationConfigInfo( - hit.id, - Instant.now(), - Instant.now(), - notificationConfig - ), - userStr - ) - ) - } - } - } - - start += size - } - - return notificationConfigInfoList - } - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt b/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt deleted file mode 100644 index 9d77e5b17..000000000 --- a/alerting/bin/main/org/opensearch/alerting/util/destinationmigration/NotificationApiUtils.kt +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.destinationmigration - -import org.apache.logging.log4j.LogManager -import org.opensearch.OpenSearchSecurityException -import org.opensearch.OpenSearchStatusException -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.opensearchapi.retryForNotification -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.client.Client -import org.opensearch.client.node.NodeClient -import org.opensearch.common.unit.TimeValue -import org.opensearch.commons.ConfigConstants -import org.opensearch.commons.destination.message.LegacyBaseMessage -import org.opensearch.commons.notifications.NotificationsPluginInterface -import org.opensearch.commons.notifications.action.CreateNotificationConfigRequest -import org.opensearch.commons.notifications.action.CreateNotificationConfigResponse -import org.opensearch.commons.notifications.action.GetNotificationConfigRequest -import org.opensearch.commons.notifications.action.GetNotificationConfigResponse -import org.opensearch.commons.notifications.action.LegacyPublishNotificationRequest -import org.opensearch.commons.notifications.action.LegacyPublishNotificationResponse -import org.opensearch.commons.notifications.action.SendNotificationResponse -import org.opensearch.commons.notifications.model.ChannelMessage -import org.opensearch.commons.notifications.model.EventSource -import org.opensearch.commons.notifications.model.NotificationConfigInfo -import org.opensearch.commons.notifications.model.SeverityType -import org.opensearch.core.rest.RestStatus - -class NotificationApiUtils { - - companion object { - - private val logger = LogManager.getLogger(NotificationApiUtils::class) - - private val defaultRetryPolicy = - BackoffPolicy.constantBackoff(TimeValue.timeValueMillis(100), 2) - - /** - * Gets a NotificationConfigInfo object by ID if it exists. - */ - suspend fun getNotificationConfigInfo(client: NodeClient, id: String): NotificationConfigInfo? { - return try { - val res: GetNotificationConfigResponse = getNotificationConfig(client, GetNotificationConfigRequest(setOf(id))) - res.searchResult.objectList.firstOrNull() - } catch (e: OpenSearchSecurityException) { - throw e - } catch (e: OpenSearchStatusException) { - if (e.status() == RestStatus.NOT_FOUND) { - logger.debug("Notification config [$id] was not found") - } - null - } - } - - private suspend fun getNotificationConfig( - client: NodeClient, - getNotificationConfigRequest: GetNotificationConfigRequest - ): GetNotificationConfigResponse { - val getNotificationConfigResponse: GetNotificationConfigResponse = NotificationsPluginInterface.suspendUntil { - this.getNotificationConfig( - client, - getNotificationConfigRequest, - it - ) - } - return getNotificationConfigResponse - } - - suspend fun createNotificationConfig( - client: NodeClient, - createNotificationConfigRequest: CreateNotificationConfigRequest, - retryPolicy: BackoffPolicy = defaultRetryPolicy - ): CreateNotificationConfigResponse { - lateinit var createNotificationConfigResponse: CreateNotificationConfigResponse - val userStr = client.threadPool().threadContext - .getTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT) - client.threadPool().threadContext.stashContext().use { - client.threadPool().threadContext.putTransient(ConfigConstants.OPENSEARCH_SECURITY_USER_INFO_THREAD_CONTEXT, userStr) - retryPolicy.retryForNotification(logger) { - createNotificationConfigResponse = NotificationsPluginInterface.suspendUntil { - this.createNotificationConfig( - client, - createNotificationConfigRequest, - it - ) - } - } - } - return createNotificationConfigResponse - } - } -} - -/** - * Extension function for publishing a notification to a legacy destination. - * - * We now support the new channels from the Notification plugin. However, we still need to support - * the old legacy destinations that have not been migrated to Notification configs. To accommodate this even after removing the - * notification logic in Alerting, we have a separate API in the NotificationsPluginInterface that allows - * us to publish these old legacy ones directly. - */ -suspend fun LegacyBaseMessage.publishLegacyNotification(client: Client): String { - val baseMessage = this - val res: LegacyPublishNotificationResponse = NotificationsPluginInterface.suspendUntil { - this.publishLegacyNotification( - (client as NodeClient), - LegacyPublishNotificationRequest(baseMessage), - it - ) - } - validateResponseStatus(RestStatus.fromCode(res.destinationResponse.statusCode), res.destinationResponse.responseContent) - return res.destinationResponse.responseContent -} - -/** - * Extension function for publishing a notification to a channel in the Notification plugin. - */ -suspend fun NotificationConfigInfo.sendNotification(client: Client, title: String, compiledMessage: String): String { - val config = this - val res: SendNotificationResponse = NotificationsPluginInterface.suspendUntil { - this.sendNotification( - (client as NodeClient), - EventSource(title, config.configId, SeverityType.INFO), - ChannelMessage(compiledMessage, null, null), - listOf(config.configId), - it - ) - } - validateResponseStatus(res.getStatus(), res.notificationEvent.toString()) - return res.notificationEvent.toString() -} - -/** - * A placeholder Alerting title will be used if no subject is passed in. - */ -fun NotificationConfigInfo.getTitle(subject: String?): String { - val defaultTitle = "Alerting-Notification Action" - return if (subject.isNullOrEmpty()) defaultTitle else subject -} - -/** - * All valid response statuses. - */ -private val VALID_RESPONSE_STATUS = setOf( - RestStatus.OK.status, - RestStatus.CREATED.status, - RestStatus.ACCEPTED.status, - RestStatus.NON_AUTHORITATIVE_INFORMATION.status, - RestStatus.NO_CONTENT.status, - RestStatus.RESET_CONTENT.status, - RestStatus.PARTIAL_CONTENT.status, - RestStatus.MULTI_STATUS.status -) - -@Throws(OpenSearchStatusException::class) -fun validateResponseStatus(restStatus: RestStatus, responseContent: String) { - if (!VALID_RESPONSE_STATUS.contains(restStatus.status)) { - throw OpenSearchStatusException("Failed: $responseContent", restStatus) - } -} - -/** - * Small data class used to hold either a Destination or a Notification channel config. - * This is used since an ID being referenced in a Monitor action could be either config depending on if - * it's prior to or after migration. - */ -data class NotificationActionConfigs(val destination: Destination?, val channel: NotificationConfigInfo?) diff --git a/alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt b/alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt deleted file mode 100644 index 94e8b9bc3..000000000 --- a/alerting/bin/main/org/opensearch/alerting/workflow/CompositeWorkflowRunner.kt +++ /dev/null @@ -1,395 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.workflow - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.BucketLevelMonitorRunner -import org.opensearch.alerting.DocumentLevelMonitorRunner -import org.opensearch.alerting.MonitorRunnerExecutionContext -import org.opensearch.alerting.QueryLevelMonitorRunner -import org.opensearch.alerting.WorkflowMetadataService -import org.opensearch.alerting.model.ChainedAlertTriggerRunResult -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.WorkflowRunResult -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.isDocLevelMonitor -import org.opensearch.alerting.util.isQueryLevelMonitor -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.Delegate -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.alerting.util.isBucketLevelMonitor -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.query.QueryBuilders.boolQuery -import org.opensearch.index.query.QueryBuilders.existsQuery -import org.opensearch.index.query.QueryBuilders.termsQuery -import java.time.Instant -import java.time.LocalDateTime -import java.time.ZoneOffset -import java.util.UUID - -object CompositeWorkflowRunner : WorkflowRunner() { - - private val logger = LogManager.getLogger(javaClass) - - override suspend fun runWorkflow( - workflow: Workflow, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - dryRun: Boolean, - ): WorkflowRunResult { - val workflowExecutionStartTime = Instant.now() - - val isTempWorkflow = dryRun || workflow.id == Workflow.NO_ID - - val executionId = generateExecutionId(isTempWorkflow, workflow) - - val (workflowMetadata, _) = WorkflowMetadataService.getOrCreateWorkflowMetadata( - workflow = workflow, - skipIndex = isTempWorkflow, - executionId = executionId - ) - var dataSources: DataSources? = null - logger.debug("Workflow ${workflow.id} in $executionId execution is running") - val delegates = (workflow.inputs[0] as CompositeInput).sequence.delegates.sortedBy { it.order } - var monitors: List - - try { - monitors = monitorCtx.workflowService!!.getMonitorsById(delegates.map { it.monitorId }, delegates.size) - } catch (e: Exception) { - logger.error("Failed getting workflow delegates. Error: ${e.message}", e) - return WorkflowRunResult( - workflow.id, - workflow.name, - emptyList(), - workflowExecutionStartTime, - Instant.now(), - executionId, - AlertingException.wrap(e) - ) - } - // Validate the monitors size - validateMonitorSize(delegates, monitors, workflow) - val monitorsById = monitors.associateBy { it.id } - val resultList = mutableListOf>() - var lastErrorDelegateRun: Exception? = null - - for (delegate in delegates) { - var indexToDocIds = mapOf>() - var delegateMonitor: Monitor - delegateMonitor = monitorsById[delegate.monitorId] - ?: throw AlertingException.wrap( - IllegalStateException("Delegate monitor not found ${delegate.monitorId} for the workflow $workflow.id") - ) - if (delegate.chainedMonitorFindings != null) { - val chainedMonitorIds: MutableList = mutableListOf() - if (delegate.chainedMonitorFindings!!.monitorId.isNullOrBlank()) { - chainedMonitorIds.addAll(delegate.chainedMonitorFindings!!.monitorIds) - } else { - chainedMonitorIds.add(delegate.chainedMonitorFindings!!.monitorId!!) - } - val chainedMonitors = mutableListOf() - chainedMonitorIds.forEach { - val chainedMonitor = monitorsById[it] - ?: throw AlertingException.wrap( - IllegalStateException("Chained finding monitor not found ${delegate.monitorId} for the workflow $workflow.id") - ) - chainedMonitors.add(chainedMonitor) - } - - try { - indexToDocIds = monitorCtx.workflowService!!.getFindingDocIdsByExecutionId(chainedMonitors, executionId) - } catch (e: Exception) { - logger.error("Failed to execute workflow due to failure in chained findings. Error: ${e.message}", e) - return WorkflowRunResult( - workflow.id, workflow.name, emptyList(), workflowExecutionStartTime, Instant.now(), executionId, - AlertingException.wrap(e) - ) - } - } - val workflowRunContext = WorkflowRunContext( - workflowId = workflowMetadata.workflowId, - workflowMetadataId = workflowMetadata.id, - chainedMonitorId = delegate.chainedMonitorFindings?.monitorId, - matchingDocIdsPerIndex = indexToDocIds, - auditDelegateMonitorAlerts = if (workflow.auditDelegateMonitorAlerts == null) true - else workflow.auditDelegateMonitorAlerts!! - ) - try { - dataSources = delegateMonitor.dataSources - val delegateRunResult = - runDelegateMonitor(delegateMonitor, monitorCtx, periodStart, periodEnd, dryRun, workflowRunContext, executionId) - resultList.add(delegateRunResult!!) - } catch (ex: Exception) { - logger.error("Error executing workflow delegate monitor ${delegate.monitorId}", ex) - lastErrorDelegateRun = AlertingException.wrap(ex) - break - } - } - logger.debug("Workflow ${workflow.id} delegate monitors in execution $executionId completed") - // Update metadata only if the workflow is not temp - if (!isTempWorkflow) { - WorkflowMetadataService.upsertWorkflowMetadata( - workflowMetadata.copy(latestRunTime = workflowExecutionStartTime, latestExecutionId = executionId), - true - ) - } - val triggerResults = mutableMapOf() - val workflowRunResult = WorkflowRunResult( - workflowId = workflow.id, - workflowName = workflow.name, - monitorRunResults = resultList, - executionStartTime = workflowExecutionStartTime, - executionEndTime = null, - executionId = executionId, - error = lastErrorDelegateRun, - triggerResults = triggerResults - ) - val currentAlerts = try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex(dataSources!!) - monitorCtx.alertIndices!!.createOrUpdateInitialAlertHistoryIndex(dataSources) - monitorCtx.alertService!!.loadCurrentAlertsForWorkflow(workflow, dataSources) - } catch (e: Exception) { - logger.error("Failed to fetch current alerts for workflow", e) - // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts - val id = if (workflow.id.trim().isEmpty()) "_na_" else workflow.id - logger.error("Error loading alerts for workflow: $id", e) - return workflowRunResult.copy(error = e) - } - try { - monitorCtx.alertIndices!!.createOrUpdateAlertIndex(dataSources) - val updatedAlerts = mutableListOf() - val monitorIdToAlertIdsMap = fetchAlertsGeneratedInCurrentExecution(dataSources, executionId, monitorCtx, workflow) - for (trigger in workflow.triggers) { - val currentAlert = currentAlerts[trigger] - val caTrigger = trigger as ChainedAlertTrigger - val triggerCtx = ChainedAlertTriggerExecutionContext( - workflow = workflow, - workflowRunResult = workflowRunResult, - periodStart = workflowRunResult.executionStartTime, - periodEnd = workflowRunResult.executionEndTime, - trigger = caTrigger, - alertGeneratingMonitors = monitorIdToAlertIdsMap.keys, - monitorIdToAlertIdsMap = monitorIdToAlertIdsMap, - alert = currentAlert - ) - runChainedAlertTrigger( - monitorCtx, - workflow, - trigger, - executionId, - triggerCtx, - dryRun, - triggerResults, - updatedAlerts - ) - } - if (!dryRun && workflow.id != Workflow.NO_ID && updatedAlerts.isNotEmpty()) { - monitorCtx.retryPolicy?.let { - monitorCtx.alertService!!.saveAlerts( - dataSources, - updatedAlerts, - it, - routingId = workflow.id - ) - } - } - } catch (e: Exception) { - // We can't save ERROR alerts to the index here as we don't know if there are existing ACTIVE alerts - val id = if (workflow.id.trim().isEmpty()) "_na_" else workflow.id - logger.error("Error loading current chained alerts for workflow: $id", e) - return WorkflowRunResult( - workflowId = workflow.id, - workflowName = workflow.name, - monitorRunResults = emptyList(), - executionStartTime = workflowExecutionStartTime, - executionEndTime = Instant.now(), - executionId = executionId, - error = AlertingException.wrap(e), - triggerResults = emptyMap() - ) - } - workflowRunResult.executionEndTime = Instant.now() - - val sr = SearchRequest(dataSources!!.alertsIndex) - sr.source().query(QueryBuilders.matchAllQuery()).size(10) - val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(sr, it) } - searchResponse.hits - return workflowRunResult - } - - private suspend fun runDelegateMonitor( - delegateMonitor: Monitor, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - dryRun: Boolean, - workflowRunContext: WorkflowRunContext, - executionId: String, - ): MonitorRunResult<*>? { - - if (delegateMonitor.isBucketLevelMonitor()) { - return BucketLevelMonitorRunner.runMonitor( - delegateMonitor, - monitorCtx, - periodStart, - periodEnd, - dryRun, - workflowRunContext, - executionId - ) - } else if (delegateMonitor.isDocLevelMonitor()) { - return DocumentLevelMonitorRunner.runMonitor( - delegateMonitor, - monitorCtx, - periodStart, - periodEnd, - dryRun, - workflowRunContext, - executionId - ) - } else if (delegateMonitor.isQueryLevelMonitor()) { - return QueryLevelMonitorRunner.runMonitor( - delegateMonitor, - monitorCtx, - periodStart, - periodEnd, - dryRun, - workflowRunContext, - executionId - ) - } else { - throw AlertingException.wrap( - IllegalStateException("Unsupported monitor type ${delegateMonitor.monitorType}") - ) - } - } - - fun generateExecutionId( - isTempWorkflow: Boolean, - workflow: Workflow, - ): String { - val randomPart = "_${LocalDateTime.now(ZoneOffset.UTC)}_${UUID.randomUUID()}" - return if (isTempWorkflow) randomPart else workflow.id.plus(randomPart) - } - - private fun validateMonitorSize( - delegates: List, - monitors: List, - workflow: Workflow, - ) { - if (delegates.size != monitors.size) { - val diffMonitorIds = delegates.map { it.monitorId }.minus(monitors.map { it.id }.toSet()).joinToString() - logger.error("Delegate monitors don't exist $diffMonitorIds for the workflow $workflow.id") - throw AlertingException.wrap( - IllegalStateException("Delegate monitors don't exist $diffMonitorIds for the workflow $workflow.id") - ) - } - } - - private suspend fun runChainedAlertTrigger( - monitorCtx: MonitorRunnerExecutionContext, - workflow: Workflow, - trigger: ChainedAlertTrigger, - executionId: String, - triggerCtx: ChainedAlertTriggerExecutionContext, - dryRun: Boolean, - triggerResults: MutableMap, - updatedAlerts: MutableList, - ) { - val triggerRunResult = monitorCtx.triggerService!!.runChainedAlertTrigger( - workflow, trigger, triggerCtx.alertGeneratingMonitors, triggerCtx.monitorIdToAlertIdsMap - ) - triggerResults[trigger.id] = triggerRunResult - if (monitorCtx.triggerService!!.isChainedAlertTriggerActionable(triggerCtx, triggerRunResult)) { - val actionCtx = triggerCtx - for (action in trigger.actions) { - triggerRunResult.actionResults[action.id] = this.runAction(action, actionCtx, monitorCtx, workflow, dryRun) - } - } - val alert = monitorCtx.alertService!!.composeChainedAlert( - triggerCtx, executionId, workflow, triggerRunResult.associatedAlertIds.toList(), triggerRunResult - ) - if (alert != null) { - updatedAlerts.add(alert) - } - } - - private suspend fun fetchAlertsGeneratedInCurrentExecution( - dataSources: DataSources, - executionId: String, - monitorCtx: MonitorRunnerExecutionContext, - workflow: Workflow, - ): MutableMap> { - try { - val searchRequest = - SearchRequest(getDelegateMonitorAlertIndex(dataSources, workflow, monitorCtx.alertIndices!!.isAlertHistoryEnabled())) - val queryBuilder = boolQuery() - queryBuilder.must(QueryBuilders.termQuery("execution_id", executionId)) - queryBuilder.must(QueryBuilders.termQuery("state", getDelegateMonitorAlertState(workflow))) - val noErrorQuery = boolQuery() - .should(boolQuery().mustNot(existsQuery(Alert.ERROR_MESSAGE_FIELD))) - .should(termsQuery(Alert.ERROR_MESSAGE_FIELD, "")) - queryBuilder.must(noErrorQuery) - searchRequest.source().query(queryBuilder).size(9999) - val searchResponse: SearchResponse = monitorCtx.client!!.suspendUntil { monitorCtx.client!!.search(searchRequest, it) } - val alerts = searchResponse.hits.map { hit -> - val xcp = XContentHelper.createParser( - monitorCtx.xContentRegistry, LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alert - } - val map = mutableMapOf>() - for (alert in alerts) { - if (map.containsKey(alert.monitorId)) { - map[alert.monitorId]!!.add(alert.id) - } else { - map[alert.monitorId] = mutableSetOf(alert.id) - } - } - return map - } catch (e: Exception) { - logger.error("failed to get alerts generated by delegate monitors in current execution $executionId", e) - return mutableMapOf() - } - } - - fun getDelegateMonitorAlertIndex( - dataSources: DataSources, - workflow: Workflow, - isAlertHistoryEnabled: Boolean, - ): String { - return if (workflow.triggers.isNotEmpty()) { - if (isAlertHistoryEnabled) { - dataSources.alertsHistoryIndex!! - } else dataSources.alertsIndex - } else dataSources.alertsIndex - } - - fun getDelegateMonitorAlertState( - workflow: Workflow, - ): Alert.State { - return if (workflow.triggers.isNotEmpty()) { - Alert.State.AUDIT - } else Alert.State.ACTIVE - } -} diff --git a/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt b/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt deleted file mode 100644 index 14488a16a..000000000 --- a/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunContext.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.workflow - -data class WorkflowRunContext( - // In case of dry run it's random generated id, while in other cases it's workflowId - val workflowId: String, - val workflowMetadataId: String, - val chainedMonitorId: String?, - val matchingDocIdsPerIndex: Map>, - val auditDelegateMonitorAlerts: Boolean -) diff --git a/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt b/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt deleted file mode 100644 index ce6ac23f9..000000000 --- a/alerting/bin/main/org/opensearch/alerting/workflow/WorkflowRunner.kt +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.workflow - -import org.opensearch.OpenSearchSecurityException -import org.opensearch.alerting.MonitorRunnerExecutionContext -import org.opensearch.alerting.MonitorRunnerService -import org.opensearch.alerting.action.GetDestinationsAction -import org.opensearch.alerting.action.GetDestinationsRequest -import org.opensearch.alerting.action.GetDestinationsResponse -import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.WorkflowRunResult -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.opensearchapi.InjectorContextElement -import org.opensearch.alerting.opensearchapi.suspendUntil -import org.opensearch.alerting.opensearchapi.withClosableContext -import org.opensearch.alerting.script.ChainedAlertTriggerExecutionContext -import org.opensearch.alerting.util.destinationmigration.NotificationActionConfigs -import org.opensearch.alerting.util.destinationmigration.NotificationApiUtils -import org.opensearch.alerting.util.destinationmigration.getTitle -import org.opensearch.alerting.util.destinationmigration.publishLegacyNotification -import org.opensearch.alerting.util.destinationmigration.sendNotification -import org.opensearch.alerting.util.isAllowed -import org.opensearch.alerting.util.isTestAction -import org.opensearch.client.node.NodeClient -import org.opensearch.commons.alerting.model.Table -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.alerting.model.action.Action -import org.opensearch.commons.notifications.model.NotificationConfigInfo -import org.opensearch.core.common.Strings -import org.opensearch.script.Script -import org.opensearch.script.TemplateScript -import java.time.Instant - -abstract class WorkflowRunner { - abstract suspend fun runWorkflow( - workflow: Workflow, - monitorCtx: MonitorRunnerExecutionContext, - periodStart: Instant, - periodEnd: Instant, - dryRun: Boolean - ): WorkflowRunResult - - suspend fun runAction( - action: Action, - ctx: ChainedAlertTriggerExecutionContext, - monitorCtx: MonitorRunnerExecutionContext, - workflow: Workflow, - dryrun: Boolean - ): ActionRunResult { - return try { - if (!MonitorRunnerService.isActionActionable(action, ctx.alert)) { - return ActionRunResult(action.id, action.name, mapOf(), true, null, null) - } - val actionOutput = mutableMapOf() - actionOutput[Action.SUBJECT] = if (action.subjectTemplate != null) { - compileTemplate(action.subjectTemplate!!, ctx) - } else "" - actionOutput[Action.MESSAGE] = compileTemplate(action.messageTemplate, ctx) - if (Strings.isNullOrEmpty(actionOutput[Action.MESSAGE])) { - throw IllegalStateException("Message content missing in the Destination with id: ${action.destinationId}") - } - if (!dryrun) { - val client = monitorCtx.client - client!!.threadPool().threadContext.stashContext().use { - withClosableContext( - InjectorContextElement( - workflow.id, - monitorCtx.settings!!, - monitorCtx.threadPool!!.threadContext, - workflow.user?.roles, - workflow.user - ) - ) { - actionOutput[Action.MESSAGE_ID] = getConfigAndSendNotification( - action, - monitorCtx, - actionOutput[Action.SUBJECT], - actionOutput[Action.MESSAGE]!! - ) - } - } - } - ActionRunResult(action.id, action.name, actionOutput, false, MonitorRunnerService.currentTime(), null) - } catch (e: Exception) { - ActionRunResult(action.id, action.name, mapOf(), false, MonitorRunnerService.currentTime(), e) - } - } - - protected suspend fun getConfigAndSendNotification( - action: Action, - monitorCtx: MonitorRunnerExecutionContext, - subject: String?, - message: String - ): String { - val config = getConfigForNotificationAction(action, monitorCtx) - if (config.destination == null && config.channel == null) { - throw IllegalStateException("Unable to find a Notification Channel or Destination config with id [${action.destinationId}]") - } - - // Adding a check on TEST_ACTION Destination type here to avoid supporting it as a LegacyBaseMessage type - // just for Alerting integration tests - if (config.destination?.isTestAction() == true) { - return "test action" - } - - if (config.destination?.isAllowed(monitorCtx.allowList) == false) { - throw IllegalStateException( - "Monitor contains a Destination type that is not allowed: ${config.destination.type}" - ) - } - - var actionResponseContent = "" - actionResponseContent = config.channel - ?.sendNotification( - monitorCtx.client!!, - config.channel.getTitle(subject), - message - ) ?: actionResponseContent - - actionResponseContent = config.destination - ?.buildLegacyBaseMessage(subject, message, monitorCtx.destinationContextFactory!!.getDestinationContext(config.destination)) - ?.publishLegacyNotification(monitorCtx.client!!) - ?: actionResponseContent - - return actionResponseContent - } - - /** - * The "destination" ID referenced in a Monitor Action could either be a Notification config or a Destination config - * depending on whether the background migration process has already migrated it from a Destination to a Notification config. - * - * To cover both of these cases, the Notification config will take precedence and if it is not found, the Destination will be retrieved. - */ - private suspend fun getConfigForNotificationAction( - action: Action, - monitorCtx: MonitorRunnerExecutionContext - ): NotificationActionConfigs { - var destination: Destination? = null - var notificationPermissionException: Exception? = null - - var channel: NotificationConfigInfo? = null - try { - channel = NotificationApiUtils.getNotificationConfigInfo(monitorCtx.client as NodeClient, action.destinationId) - } catch (e: OpenSearchSecurityException) { - notificationPermissionException = e - } - - // If the channel was not found, try to retrieve the Destination - if (channel == null) { - destination = try { - val table = Table( - "asc", - "destination.name.keyword", - null, - 1, - 0, - null - ) - val getDestinationsRequest = GetDestinationsRequest( - action.destinationId, - 0L, - null, - table, - "ALL" - ) - - val getDestinationsResponse: GetDestinationsResponse = monitorCtx.client!!.suspendUntil { - monitorCtx.client!!.execute(GetDestinationsAction.INSTANCE, getDestinationsRequest, it) - } - getDestinationsResponse.destinations.firstOrNull() - } catch (e: IllegalStateException) { - // Catching the exception thrown when the Destination was not found so the NotificationActionConfigs object can be returned - null - } catch (e: OpenSearchSecurityException) { - if (notificationPermissionException != null) { - throw notificationPermissionException - } else { - throw e - } - } - - if (destination == null && notificationPermissionException != null) { - throw notificationPermissionException - } - } - - return NotificationActionConfigs(destination, channel) - } - - internal fun compileTemplate(template: Script, ctx: ChainedAlertTriggerExecutionContext): String { - return MonitorRunnerService.monitorCtx.scriptService!!.compile(template, TemplateScript.CONTEXT) - .newInstance(template.params + mapOf("ctx" to ctx.asTemplateArg())) - .execute() - } -} diff --git a/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$1.class b/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$1.class deleted file mode 100644 index 915343f8d988ec245c45d1773483190cddb7fa00..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2105 zcmbVNTT|0O6#h080wrjvLPf<37KBm>AYO|oauF2p0)x*>y0*cP)Lb0!SLux4=!4^v zGy0nx$Fpe)Qt7nVPP@A&doJI(eEIqN%U1xm@I-~eFyXib-6;v%6Wle5dP%s3V{zYc z^=HvmJP(As^Dr>2ym05X{q&fMSft5I+$aj&3JhTjo!i{n@k~#j6_zM)-?R(UQuI!^ zFI`?D+YBv!(e%=zq`!20*Exo+x7S_R;UGOXq_Q85E;$Cb*12oSb0mz(b|E*_KpIGJdQ6r$&27it*>VDp zdhYsvKxLwexK-BBGp+?N-;<$u96_*+GstKr9P7PO( zW@zKlP*Gw!2ZzdW1ioqM%e<6p#{dRZTxIA81FSsIFoX<46MznhL>7k~yqFX`R4L z6*p>yP(x26D!k2*q7Y`qD;`*fBcUSI4MY;rerHh6we8cBZ0dugMqR?tS}7nye?u)p zwOsYN@tW!z%|(3G30yQKxPc%xTOXpz#VE!wtlXDse{!c(z8+}=P39%f2P*dgYmoUaJa zUvNC1rjMq8UV%-Cgk(4%k(B%vuEcKiM4?|hG`iP8yT^2HrnNgWyo(c=Yr9BfdSbgs zX0o4f`a^g>MgJ{yAId45k#|EI-RQwtI?3}noF`cfy|@tBeMe^*M_=Y65_`Bf{2BcU zzF~Md`yI{M_asj4;o2Uqzo%PGkUo-R0ZKPIl@z*^Ge{|CLw}dbmiGPi;STPSpHBMb vFiD>7k#;NCFQilqla(ntE4Ysbw8k)v8O+lzNwP=ud@&~JzWjMii-PGtg$Z^8 diff --git a/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$2.class b/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt$2.class deleted file mode 100644 index e1530dd85788b9009763a25c0d0120c0d9ade397..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5688 zcmbtY`(GSa760C4mnqAHV4|&9NfSyb&y1~Uqq_;M5NOu$N|q9-v2mEaz=UCDJ3C9j zS2b#1+IMSxH?^tHs+)*= z4&omQVhjfycTRKi+%9n4HRiQEcMZqVJ;&81f=7P5$laBJqG{#0JGAVz->e|c&|Ai7 z+Bv?g-6Lo<$&sDc-Q1vKdwkhrXkOB-_QIxD`$x7TBF#{L$h1wbpW$@p_E@(^-8ID! zA9QlO0St{Q5~xq24m(sRXkxf&o63c_og=2r$BK(H+?~{CEY7fN#4&VhN_S21cknXq z&6@>=Tg&loDJtZk#&+Fy?3G2QSa=sh(;W9QA}S}iPN8GCv9n7`*7LeC&o!%PaGPs> z;6j8XNS!gg0{4bZ%M*+fzR8>Uf{c-|PDcq22+laBtqqu7mU|PflOv^WdKaZ&i;8#P z8iodf=^ryBWd1Phjlj7{AK6N5!;U*_ax2Ghca@+l>UnaWHY$GQB<92yMtD!*MqSmjrd7Q^Ac4*Oe7sIjM>+bTcTVZA@Pz zi{DD(&*)3KX6g2vmi1iIo+Ej^*sq|EihJX1eC7-{JQW%!1Fs?hgO*;PoZh`jf_zC? zz;LsQcjEv<9g#AOR&h|3wnA}~nv0%kX|qM!@Jz?nvc-JfGAYZFxE1eFaFC&=%wRuM z(aE9Wy-1Nw>&XzbK8E?uN+;9=;8#T`Cbd(fJSTZ_19@Panua(#iSL~sw9u2D3-0smE&yGulp#O>&B#1RZD z7-G1kOxV_}QgH{4Qi;!qn}p$fl~C6-FkN1Ak#RLXl>Q z=6NuhvzndjNkFbLL{RCH-kt=R3Iw6a*c3~K|Hpe&%$B}5lhfO&t}Q=yHSPn6c=NgM zuo14$`u9;V8bn?lvY`D-fec9wEgL*9YP+D|PUvNKAx zul7#r6)Nt+2k8!`o-^s>NB9!A)(4flqHOcAXErX=THvb^EBFvYMMx}=a<6?2%UWOn&k*YqeA z2mTv-WH`g}(+xNSOTj{9qyoyUsOwQ0!Bi~5rbrErk4}sXO%9zN8$X>L96vFXWoVB` zvT}a915P2Y;vP^Xss?p6kDuh`+&o=2)XuBM!k=oYpRWT<57!udd`*PG!;*qxaPe-= z%>c1as^Tn`Y0xbQ`w8(_6huG2wpK`Cf4Fq7d?eEAYvp#tF)}%t(~|fYKB3^_wE#;G zxGL_&C#g43a=2n^phg)vWCl3a6l4N&a?Lq>M!}~Ux+-H)#ZVQW#eL)?3Z@)uJ0dlf zGn1+Z@Q{KBBRx<~t>Sa|Jn2}}mkG1HNHa-1g2xm*%FtSgb2__R(@RVo@Pm>W*sSzd?CA@-XRD2O%qJ>6G^bGqVeHCV-YB$!Fww{Or zDEHJ_C!{6EEAOSfRrxcOt(3CJ&k~XNQN{&1Bw4dTo21*J{%las`y#XjJV7BcFyVvh zbbeBk&>dM%Hx_7Zkeiu;Z!+wu40<44HVXycX6UY@@%>e4FI{U{r|24-mMm(lt<~1; z{o=AC=MLG17Z^|S?ZbTo%MK$ zVI(Tueb;WS&4&xP8LuX3vHFRM<2XU`uh5I~4bI=v(#`mp*e-vm;#c@JRgxtgx6#8x zsTHKUPoeqc7x;N+0>5Y${A$Lwxhr=&PLta28E&YEaa5!f{DI1=HYuU=)hYNB0gOz; zY9lqZv|zJR@Yl+!@f}@_IhdQ`VmYD~HO33xQK#TxFZsBRK4Y-vUE&K7JtgQz!BzCW zfu3$dOrAG(_r8K;ch4%+AJOX=cGCa#M19>%t^jyZfxiS( zEx}2}TjD(>6jF(n#Or9if|co(L?tzlX=Gy0gbX@{FXJl&EP-Ks6<;H$O*o9N<5}^whd#fY$8!X> z8~gALJWo^?$dGHn{!P}`D|kV{IR)QRa7uxv(6lD9?>jO|-^KT&%`f5y5D6#J>kkCR%8@|0Dif-#{M^{(`@ew?w*$e}AW+7+(Aje3;%d diff --git a/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt.class b/alerting/bin/main/org/opensearch/percolator/PercolateQueryBuilderExt.class deleted file mode 100644 index ee24cb1101d5087d6966c904da1f2040b181be60..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33817 zcmdUYd0?6a>)zVd zTGv*^)~zmRt5#7HmR7J9siL*j+C{(G+S*0CSikMp+E&Z&bMAX@=FQB@Vt?O1em`6C z-n;KE=brtXd*7Ys|Gn)gB09!;!6wBtEs`|7%$c(Ogw6-y_Qbql@Q z`02gz|WKtG-6hwJC%-grx(Y%dgBo)L6Hwhe0h9*nrY)3~Uyc@_{eP$!EU7qv7k zXl5!}DboTXyJKtOOeGzO_TDw|?sST&%tdN%y0fcpUS}$extFwbuIi4Zdy}X-EARF3 zp{Q$5tXY%ju3NDo9Z%KGl)sDOE91#{cYD0yMD)0?Kj4|5RvH7hZsN?>Jw08W@g&p10W_`b?QTz_ZJqxb;4d8P{eOnj;)OF7wKOhbI%aSz z8u>`}0;KL$3s#&LZ%-RWPXelo|Ff0_@cQyLh~zi82ry_|oapQ@Y%zLbER~8U1zD-t zovGN0t~lt&OPM0aw|8}RccxEd(vznw2ItO7bi|`{I8C*w4&s^B1hcx(i9<)wG*Gc4 zp6X1-J7)OZwxq$w;Bs`bw1k+Ba%eJ5v1vMlW^f%4p<@AtPidR#!3xCqv?2u zP9T6;e7pzmIP_UTR&lJoJ>HWpCyPE)g7P`#R3vvy zS3C4YDn-kg3l_9^MqYaSl0(-@kB-;loUTMHEi!w(LpRWk5a?C$v}**QGn$QQ+}G!i zpqC+?ec7R}(9KNyd7UkaU(hS6#yAWuakv>{Cx?o;G``iLjna63uW{r0_O9Mk=UUg@ zFs(<;>+Fs<_pVtHPqvC86n7`l9_v~hOLoe$S2zM1t^NG@u^#U^6(%WRj53&sE+nEc z=NXa*t#*U3@SSwGO?NSkF&fcG!>mMC7i^2Da)-W7_kizGusB`ubfR1Q3Z{ucP+j6T z1XY6ZKBA!KeWC^Khk^?>@HA;a3z+(apu>~P0G{e zI2c-)Y75&8f?1}^hMsYO@f{M36B`Pl7zjyG2-d@b{%V`P#WW#n@tQ#1Rk1E8QR6v+ zgCUHdc7z^-EnQ|D%(E*yQ$74RTxPINc-c0reXDhNa_D;l}? zifsBW>V`>EA%_V--*e~yI#BMOb?6`$$nQIJuzUBsLx;F`FE}*Lz59Vfl{8*j{LrBZ zbSSh)uGGW(HIi=AE0{-ig04ud3C<%&LW1h6(E0d;CC2X+BK<^2Z5)t+C={WeF&((9 zvtytNqC8%6=ym!p1RC)*J?RbZRLXtDn~g+ldefh?nT~)leBU7BeT(SF2Sik^Dfb)w zCsHs+cz-Wq_UkCUOMkTK4@^_TxOcG89Qu=(#wdpGa5+jkd(WZ2NO8FoFYvq; z_q8zZe>*gWilyS;9Qq&n0PSH~GcnWwh0crjuAziJq<`A<4{?Ta(+OS;hyFz$!988P zeA@D<%csJI=4&&*t0=nqw?lgoK~V9E_)5g|phf|{{pwSe+Kep5ID<&TU!IDSm?54) zYJ@FBj=o}&XtZ)oZz^56B3{`IT@q!c$0KYbsgM_47FMNqxy0e91S+M1$C-&lS3K4Y zw6Wvx2p$QEkn+VYqa$WHHA-)AnZx^Vxp?T3;aC9oc=Y5`J;Rxii-vM4=6xL=!($OF zNiR&t6{0<8EV9hDB-xpci<2>N5gHmF_+8%L;R-$gF_3iT%4kyoGKbk;ftfF*m-!%v z4;Eb5ovB9A1E3#|bGTCISCZ&nkesnH9Zw2&COCX3PlQJypk!=1Q`GG`VdSIo!gn;6NZHDXCQnGlJ683I;c&ar4y$9S zW`JEvPjlSil`I;=psF@L@lh5<8F?KY1a^Z9E#Qix0H9c*(y`AE> zfeOEsZ?kzLvV8^A4Vy`(rNdw4uVIa(J<+paR&NsPE@?<6lAdM>XX+4S&YhM~zt{2| z4&O=D5TEw0L@FNTyZIiQzwXB!Mwhg%PA1lg*XZ!Qd>_2CSVxBhI>|V)?{Gi&4{C}` zWly_pa`*v$5ONXk?l7{BP-VCn^YyBzrEiK_dWdOJaIiwPgx8*4nHolFTw16J#DXWmq|hWPX;HVogQO2 zfWpCB7MdEjEu6B@TvZr$JgKvJ`%r9JFdA3{fTrTJJr|eh++kwR8#G)o45mt=yn~;% zd1n}`xk}fUf;m`yPKfpRt`Mm7yi}swqn)tZ_e9ioVTI&>1MorZ?QVy^Zv=29GR3P) zc^Nq0{Kn1TU@1LBmW$UO2-9R4XIh4ao%+J7yL`O@NkD$OAruTYw+jUZs97=reb#gP1OE|hx8A_uZx$#7-FR?=Nq;!UXT&ZuFxLE-|LkxbPqq0kSgFV} zzfcKq=;|=1x>Mq5S^0^lFunH}d9M@ruS_{aFSI^h2{NesKVX&RU9zwv?f>pzQT9Wo z`XJNzwexj(C=~EN-s68dSn~Z)?mlv`misZ&6v+?`XBJ3|AYpcJUR4jNSeROXEgVt) zH$7ZVwo+J%Q<~|v0xlYkz~sY|J4k_>7(U%#?3+oi1{PqWsz}+kDlTNY0=~^s>#EdIjv4{o0!}fxA@t`*>&9hr6Gr6} zQKKLzs*LHx0#xRqC3M1mK3N!7l}pO^7g+uPTP8QOtd1o+JmC#-2!_A{a#zKvL~pVk z2@2#QMQdQY4s;Y|1xtK)TDz3p($>%9GKrpJb;Iz(04YFuGxK4`~|8ZX(5F|n?$#Ja}yvTF{@ zztUxAJY_2^{7lG`!3C((Vs_SLa?vH~Fg3|m$YdUxSJ?y6t=|$)uI+4(J8H6;f(7dt z^X4sBvb^!kg^kTEO^X|s%ht7~#+K0SSH>qoc2PD9rRpI6a)UWnt6E#tge``Jws6$p zvRY6Kt~o8fp`1$8R7V}5fPnqh#Lka{JrN@>=}g1uxtsT3g>%;AGCDAuo%f2OYJ$Qp zo)gs3m_Ufy46}=ig=oh(>R3tHm3AlAHFeML?CR=FMN~cFl+-`I)1=O{bT_5umFdr*{Jzj~V+5F%lrEz_F;=;@hQ1e128dY=E zskVY=lAnAZ3_XrIP5390Mi1h|V3U%XFC5g2OiT#w3nv!z`MkN>3RzZn-OT856L!kN zMpP}JhlSWJj%rnBxK5NiD^HXV9Tf(rg%e16)gdrTRGY2NgdxsDR*pJX@NEgcZM9+uuv?6QXib=?Oz_s>&}j-5 zimigwz{Hl0m9{+Ol6~1 zgc!IFwsZ*S9Q6ftHB80A#zo%V?nRAfv@|Yn zXm)&)RA-ci(YHvz*6Cw+{Y6Gx*rMgC#_)ARB74Xe4rr|oxpKQ>4wjK(_ z1&+Dfz&M+A#wW^kpk%E}T_^7C^}?Y|K~3!OsI4H^gQ}-o){m+i)R!Ig71=XlK|iL1 z#cy%ct>RLZnFXR$I__oC;7#2IN@X2~%x2)mWHOPAs*O=1C1;`4jE z(w)9mbp0sdUTho@?ivEc4Xyz)W+l53ZZK!ENtB^Kj1ld-9rbl}kNDqa>xXPz2>SkR zskbi#tNjkeO4a@90b6Z?SCw7z%cdPYJBeEO85b|~Zm`g20w+aE4U&QRP$cMHqqxm=!WmA@7d}ZPf&6h z&}$!GgG?nplaPyHC`av5&w2?vzbB^7pvPpEgpsr#mg;C&y3}=Rhc~eqgH?y?|rr7LIyJ{SdYXp^H&Tvm|~q0YahldLw4* z$&!R>R4>aWrW-{y{Mb>is-J+Z)+I%7&M8pMF8&8pf#E84pz0+U`)7{&IZ3bvo;ai1 zFoqi}q-LaAk%o(^*VP-gg8m!ES6BIdxuf1x$PbjYC%RX5uELs-$86~A6V42~9|YRB z9Q7;pYd_=Q<^POr4p~O5^Vp*QNLBcLx|-u4ASo)-Hii)<_XPw=`RpZeKM36 z#A*ZD+KGluS_CF=_aW*XN4+ci+uV&g-d?goAf{JtVu@}O5`C23_x<`j&{HF1s8wf|is5>J#FoDAuuaMJ`9nE9* zf_z|(a>;cX%@<~piS-ndgIk}ifW^if-$f*;pz|Tu7v$5%QRvoR8*{l_1~P&p{Kj-s z7|jbgB{0*h2U!5p;PVZ$)uG&FYsbr9<}yx7^ql#Cb7t>Ke87pX^01u+g&U%}lpePA zsL*i9g{nzhTC?_)wDmq-3s(<*@ftW$<`zFSZTB$K70$q zu{)W}48j!U0Pw&eD5p`n0-Nvj0YFsHRm~Cy?tM1M$hcJk)XL7c1@pzz_b1654iM4qqZc~*biK%3a1$ef)gh+ex#}&@uS+`5Su){UbqjY@2fGM6M595rtFT_~ z@GJadAh0X8W<^JAA~bPR=vI<^5Ql*iS&Y{0KneZ0gb&kkN3SF#pR7|`kcTdU-vg2+XMk1%YFb+d@|n1S!~m`)$Stii~6eBO~OUxsmNQ3*xlw zEk1g&pw*NCxf_i6wm{(*+?=7`o}DxM6sy&l;-iN2|4mBRxCsc98cKDey1N{oC`)=?-|XmHBm-}+#KB0g z(!+^cGw_KF=%?MD^G79k#xG zkf^bsb4TB)?*daLO-LjgNCtLtvyG9(o2y=q>U;Ekw!YWfb~E^79DTpWB1<{EzL}lf zvE&9=9rMi>>{(baF)$c;&EfHl(GTLB6b%-c66QSyl|lwtGi(shfOX^P@mP{Gk0b94+}QK;8KYHs1^AX9X|DJ z{p27admj3;cKukUbb-u!i1-hzU_NE?HFc^7b}$apZEGxgx%*#?=aJc06xG}HcWjLX zufvAW`fgv2eo7WzBlvE2J=Ao{)>!YFoY!+DR&oz`D(bF_EYoa^>dDX zUhlySA*Sy88L64xQDj)250kDTHh^H?tA5eZKah-u#>$cy{2w~{NAes^bYq4FwFu)e zWcF#&Fewr+17Hap43N!1U|li3O8l)5CiW6~5*%&-0~d89+Qnxc9nw57!f{M*{ePt@ zLZ&k$Hlq4<{f4cPwQ`RFaRYc4myUi@KE91~dM5=&^%MG+j($ty!!hLP*2WM(G_Hs6 z$ctY)`fdFi5Z2WWM%lZ|%fjWk`pB4npV5$iz)IFZhLX^!Qf#)FnS&gVYr?{HiGD}_ z!Pf738=wa14Bva`KjLHZWizGCpB??4z8>~2v2rDtQHtJo^naV8u6XyV^y;Yot;YEa zCunRkwS&OI6?1aN``33tjK0fR$!3uU}lJ^`WYO_HqyHm zJC?FEL>ySoWE=)LN5V8Ja;##*U*e4{hWy%A#4`?o%!2FL1=0duMnQ&+I##KLw7fZD zBR4~Y1)HFDaCecr(h%r;I8<$jS|hD8+d>{cdxA)o0&;%Dyi%*&+Sj&57w+~Ag758~ zH{btRW2~{V#*SpTrRix>}*?;yi*QybxhEJfXiJp%t2}$-+?R~;NaH7MWZoc zdth#=EC_vA+NmIkJDd!Ggk@#;)-s*KM!BB(@$~9MM=EO7T6MN{I6U^Oo$Tyhi*I-9 z=9y#`D#lhqfy>u>)>I409;}pRikMyq%8=L6?cDuTI|Y}~Y<>)w9;glVpbm<{ex@<(R zIIhTQa;#IW(;z=aO56q0#vovRoBW}NV!Vb?Mb_NxI13%?bPIuZ(F*y5SB|BaQ=mVG z)8zuAit~iv6QaQ!LkL?_X0>9QeF0L#)j06rTVg1|EIWFHE3=FhYICfm;sYBS;`=1- zIlRCJQr+Hm#(@4A&Tx724K`U3cI0oFb(WzoTjx4@onAl6wwB{yn8K;%WPS2UY=#*q znN()2DC4E@(D^r*S-ec|iCUf3`L=Z)Lcc6}Jh5@P2$3ST0Qw*Jsw1o}rdxx|mLtCZ zYf8am;gH04kdPq)zB*zhuu*cE&tLJgD@9DOBqWFFbSF%V6ki{2@5Kg@%5|&b-IbGg zlAOT@&qbC=SK^D*_~8)?=ZNfYcED6F)Bkw~6hQ@NryEyHA9^4ZWuql(l`VIQ=)a^}D#n)()_I z+*D?*A5|i`IsbT$WK9A7 zB9rN~t?h-8g;AE7RjlmM3upzA!>K~H_0#}|Xk}XEu6?=Zo4{n6b53mQ>B8oPlI`}+1=)f z0EI9k+Paj4^{^RS*xsPgZ zBiq_UmVTU$+)hWgl^>(OL&x<~!{c<4d7hDZo@t(EXPz6)^C_9`1z=6Px6`K;}9PFs0o8=XtkPaTibs?D@&3w3U$xqY<8++VPb(nOo- zq&`~j7kpX@X7xUQwmGNa?B$P9g5OCQ}doJzMm2BK)X4S%mP)J@~a_jNc#c0Em{ z4RkV&{9Z^G0U4jB9=ez=qf6*|9J_rNpO15TFTp9fm(maTGJGz6IZg+?g8q)Ptu%jz zN8yyJ1948(6#g6z+xt8>;*6#&3O5vQPPxJMteFI9b#nVmn zRluU~?`!y1M9MiCD`?mRaJ$P$v;+rsTX}p?NV^?H-G~f!T2?5R!CI^O>7nY~G-5kF(pLUx zKW*Mo<3BwqPgT`VQh!y=leD#}b_;FaNl!mX&s6b~v`hZ%mOszQpFQ&DMfvlRKy4YQ zPsG9dgmY{yx(hOMHze{N=!1Jfh5I2}n;=CG;FR$PsUC2iMBk*@^bj@C!_N4fL^A>^a?El z@aNL221#>W;4?^yWsnq;^OX&fVsgGRrin?)`O1d;F*#q^D6kmD9gllq&PvjPFxy8M zaF+T18^GKMw8cpZ8J7B`uoS(9zNPn>g(!5HD2NI$)JuYtP%g?c&~%WA=%*O(XTZ_V z0o3at`F{caH=wHDghKuW)zU9%8ofow(68ut`Zdjj;jtC2`e}0nQ z$K|h^>F<5?@kZKD%I4QROCz^4w~c1q&qWX|Q*qZ|6^jEEcY6(<^R9cU`#7?Z_Q4~U zZsAd2w@nmr<*<4ik0zWIzMnAP6SQ?Jv=&6N#vh9h*+?U$;dn6SCaS2a9?ggK@g!Hi zy>9Y3b}cxFaCUJK{Q-3OBh2offVV%>Bzh0#@Gmr-{z@m(-{>?PZM={^fDrzjmg89B zRrC>lCgNkL-+x2R?xim<(KU=+JFMvrwy^oBi2Avhc0fMA%Mtn^&i;LcOL0Q>2pl0g zGN1afo%|c1W&`eT(0;wxI$FxJHY!(8Hyh% zgsetUe;*$$*Z2Fl9{4}OXOT!XO9|fW8t~0-uf}uUbq}s+@FEmPHJt1drS*$_JhP7* zLBeY76@mJQ2K#tEUgQ3BrshplUb~g?3I9%BHov;2kI%+q3@ozu4{jyHE|Yx#g~+P}pG-m6VUKIKtWsZ`2q9^l0;9qLF){XbqR3 z1m@M^HQb#~Q}uOBFoLNqt}c)Cabiie@tUp!4nWS{CAhx-ES00Qv5%w_#5JY9+}(y( zaH6j3)mQ7LZMJWPOnoM7@HFq8;_c6-B#IP+VWMiT0@|swln>Pq_5)U3e|S88TmuKJOP; zcA+*dyPu^Cf}?Yn-}6dwA1NwUIgx*OV4vFy#!Lf!At0EK9l~+ zOX;6DCawgji!wfk$H8&?6p(T>b{rnh9XNY<6^*N3{1`EgZS~1Pm8c z81DMGAF!WA9fs93W-*nw@K)#wVY)6FRkIg%rAU+z_t?D6<+;7kl95cA&C9@vHs5Xj zf2~x#`=2yoCN3jJ>_zv5%}2V{$?cLxjFJB+DtjmMITwC|GTP{5EV|9#!cR5VR`u~S zOKSW0S@76%wa=1``xhUb=igQDm=Aw2QZsK8ji|1<4{8M3;jZc(MgqnPJ<#7FaNP*^ zRp#mn{~?$)@y!(BTWBQT3I(tcG`o!s!L<@saerM+da}V0L82-RF15kdVN|>q9a}bE z>m#E_kg@D!gOJD!TuMr)gnz&?sT`#XO)q{fyg$SSRUpd`cM@k6x{|IqP3 zuwUyXlT_|=Vc0-m^eI}z*`RDMB*&wEf_+K;^ zn&~xgxeMwUpn*)cI`SQUy-j1_euPT6Y(=1i%dVETVu7Zm&Bo=WmM#2-z~Y)W7aI=; zuu<)e%lsDI=nYYes|=ybBl(Ej9B8V&rWXH-f9*EqA3$OxDBdFgd7ej6)uRH*vvj;% zw({F-Ob>z$yxC6NhT?vSduq1u?-2Sl;~6^Dsg-LV|7l6>qucpCskICLcK7r9^J_-) z-}?B2B{e93_W6gofx`Jf)EX4dD0$(U!+$N=&i`)fSJYfxRa>)7VPbvC@`tT$>r>Gs z+tf%z#%iB>!6SwqQDJUm>Jva!|+)860)f0F-eTu(LlldvAryWoN zJ7Mph28Ex3&3gu_>U%VgpM~9f4mR(3*t|WI;1_5;zetw?U0>uM(oOs$x|83At@{l< z#lNNR^Y0CsUDtk3hBNl4(Q02HdIHs`F(%SGp32o&aErzdD6LWZsr^Cco1l;;s0x(z z)4t}dm2r37cL(5|C@DDoBR_&!;bkAup-@WyApVGsxB0z~{mzbz}x9(0;4V?!0R*~usSa552=Rv zV1tV7>ZCUDD*Csn8GiGbJ8GU*jknRknU^(v>XdD2Ubez^wE%=Yy&2;yf;BAaQ;Yl5 zSz-opXK3kJ+sG!GRuS>7ZM|JvVQp8jHneN+Q!7WS^D9JFp6OR>`c%(Wl~S}ssvB$h ze1+Ag*7d22cB;$k?d|HywyMqQvlVuAzxrZ*1g;bOE9)MrsMTB5wTgDC8~W5uZt1<$ zv6+s<6ueq@(P-)H=G&-ryV?lQzq+F$QoBWct-i#RZ=}O(x2n7FA+)-eXsfzU;qIGq z_bs`5T<*Zg8@)u+^yns<5*+qB6%k8sQBT*GR+RRs-RQN1`_vviJzBF z`!jw`>^&&?KSRm?4}O&A@9^|K#4iB;3mpFuox&f}*-)q*5VzGzQK!;4`iJQfxLa4L z61qkqo1seSb{v=3uSU>oxW28*=zTSsSyl3QHJ)dv3EYHJ-%eK(c|ER|s>ApS9PD@sl z>9^`b^$(c5COS_2Q~e9p=}wyK-CasreFR)Z`2jk~yStUj)W_=Iz}q8eu@@0)DV&LE zCZKSjMQStyz7fy`W3|#6zh>wMsRmAMc~ToA=a2(Gq6xN|Te%n3t3=!hbqF3MzoE{y z)e;2luDfygUYavN1zTNiE2QGaeMI%Pdf{aLCs`lUVYr3z*F_m)PzRKWF%a9*pJxu# zF$UAFT`lr0i7rN%?&X}cZR~5XldbukG=y|QI>Hk%t{CTF<7e_4pB&PdbxA1r@8>t3 z9nv@=N8V(>j|v1zG9@K|q<_0EZR^(~<|FY}Z*A8j+iJG!vbL&zUEZ(9)E8B6)5x@m z0)o@64?MG?Xp0_KZyO~rxx((#hq;R3PEnZ~+p;Cw^knp|t9NWyFQB+zA6_4+h=}l3 zL@J7&CH(2rM?Bh`y|3L-Zy_NC#esls6cpNhRD!?o*J^h(i#{H$r_~xotdH6OZXSpH z?xmQ*2!!F+;Cd*B72eB zih!eS-DvAmJU3F0v~`oMD+#HGgCGb4yav;Y485=yT@@Er^8E6sxS^$`dojAsRKh!8 zEh5Krctw^LO1gTMdhF*-e5jlRv8}TTNCn6f%u9vh`fg9Vh;~0-mT@hgPs6GpN zhRSC1EBb7&hbib={NIwQ>Yc=W`kaUG5O5h$%}ZgIMfJHl<~HNyAd<+%`YJtbCmlDh z%9He)wSLO#ttwnf`6OWz%@(2HS0mdcmW;8Nvh=(P3JV7#Jx0| z*Wx(gbvOZdJ)O@NnpvIY&OxuztI?lL7kJsik#wGyMi@iwU_UVw2h%d$38u4<#BR~& z;jRc-$kX)sxDzIxsk?BskzhJbuYrRV$tZER4{^(cVGonU^b&BS?v@NI;y}qwXHu=W zLAG@?kc}-$AViM)1?7$%ujr(xYKy)gkJMPa$OaWfIt4dX{wvaH-AjxPvA^k)b$HpJ z9V*$p;<5wQQ(Wi2vbLtGy29F~HxN}=u7|BZO|(T{x>bKhQ#NZPzf?;6GA!X+v>&eH z_*Z7MN|6q#^ldj)>M!W4-7@_}eGOXqwh^^Z@+Gn`HSB5dAUPRR6p1K3z`ED-mV~3- zfXo$%iDn?zbd~aH`g*(<%3yh>MC#6rlZcn1`%9OorapUlk9ws!!kDr@syhQViX`O+NsYQBho7Q@c&W4-`_n zQy2>{Nu7oBrq4zMa1QqOFQkr$+w&Trb`}IGo`}N1-px5+S1n1qd zrX!t!&|#l3j&)}sTrjUjm{(mK4q)BJI@Z(l6X1F}?EzH?_rEdMsk8@c0(iy` z3d(hVdR5jk@I2aFUFHY7gZWpGquRmz9W+(N;h3zXdbNt0)M`2v_Y2hdbcX7ov(yFD zqmr~vrRZvvrdw1mJ)+hkkXlDitM#-;ZJ<|i|1-6oi`536rY-u$DzvKBS)=2LL+ph_hz5+4Sp|)k8Wm}(W!as3LoZ+tt(& zxtnFpw&s|-Q>=N`d~?@qwOD7EyCv2#>uhs(juo@o&0UAJ(pqJ$!M?6;^S8(RO`3OU ztJhj*?lxE#TAwy|7h9KEmz%pQtj}6kncAN-&sSStvaU6E*I74OUp9ANv2L@zYVN*f z-EQ4!?(VYgvFs!`iFj)XX8)_3??Ysc&oYp3;H>w8pU?Xq@T&lN}1m1vLu^b+@qGtOSOo~MYl2hUiaQdjCF F{|DL!g*X5J diff --git a/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$Builder.class b/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$Builder.class deleted file mode 100644 index c44daa1b273d2c6493f89fd87d79874d66db05b4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9395 zcmcIq349dQ9sVBKWH!rih07ID#E3~qSP(A=LOFt@CNYwLg0|^qcYuY>Zk*jkqOJDQ zw)W84)_PU6^{O{$5-L@(hi&m_v8`HLdmr?^?^gT0H?zC5JDb@Atv`O*d2inP-}iX% z)khx(aJo26!#IJZNpou`*>5CLM%avO3-udjBpDB>ed=teGi-+YjIXG}w2=j(&EtZjREXE>Be$KoOKY%k;C#MV$3SxIbdwhlOVmNzda5mPS880J9N zwy+spolK;SU1@>Y4rp64iAXw@OoX~J{r&NnBD~$}{j$=WC^VsSi;*cBs+PqPvGj6* zlk41Mdf{rW-zd;K#!F)+P$KcIPDYIYDlkQdjv9gKC9u|S3}6~&XqYZ==IgQ!9W!wh zEkv4{G8uuDb@insTgGnl7FP{{qjglGNM%k>>Bn4&^;vVSF;-9e9Nat7~AvF|7 z-san4sWicFbZxrm9hI-=5G`wvUJ^hZ8Z^`kOwOZRX`11Il#cmm6sQWPBnbj1)?MIW z>zv;0MkL)_g3`)f8IJ^Fb)HJK;#W5I*bYi~MaDG>B{XYTD6n?y7i)_`YBzNfp#7Yz zV=+zky3%PnPRAJneky-sIG&-^Ov|cR*^Mn=4NFN#UN;n5 z03j@szIvuWZ3$m>7!OY~kOZ&-D>b}HU{R5f3QVS?Ms%#gY6gk^aC)1-{4$=Z)R>?( zr?=6x1X^^g#aXb)1Luv++intib9rYH~C- z#eP}9o)U@0or2}ZTbX`stuVv(m_{lSPfHYU(-D>^rsvzMHJW3Cq-n{FHY9| zND7QMS^F40TV%Sa#zjbJFd3b_s9P^@NX_U-BO@@bwUgfFsnXmP<;NH6*o6UsIR)Ni zMAfN6t=v3Tf68O>BB)go0sAp8s09@SXN#RE0D^(jCFAtWfr5W&2Qvf?43H^ zg?9@COsNxvgB6u0=Xof$4DuTataO_9>bL?|vc9zx)Xu82FVokxEt!#>LOQa|#`J&i z8LPl@NQ)Npeq5#D17&>1by14>gXPOX*S%WDPEODt*6|VPMKk+K%R(l_Yjk`R*D}AV zXx3$<)9fc$ft5&ekz`*VTZNQ;CX_v_+~eaqK7s25f=uSr(S}5yY`Cm|xH8qvP)CDv zMXM1()^CZJeopop*V|Gj_HkC`4-lpoXg4*`{?QE4tVcm(lHQZIEQ7r>TM-x6PaGgt{ z?xbYqPLilvALwS#KEo5e6Vz&TBS&@s4`Ppo zhXhV5(&mW76he@~+AI4{S7}`a0F|{HkH|W6P>4Najp|CGj*?*1{5#C1dDNFImhDM^ z8Wp&kdrx4_a>z0UZ{%Y=?=CDn$;n6f(GcSZfA;asva#19*{R23Ph{4eti@%GUtnGF ztx?2}QY{s`*Qs{3XGEPv<-%eF1r}LC#H^=0uehbmzBZp|Ek;>zdjfIEo>_H-9Fhg- zO_f_c)%2_0Omf z#*J_yfP;8e$8-3)fZl8E*^Ms~)@BN#SSurtnqt5Ie3@|QwwwEi#(|`dx086@ni>q^~QYdU~y?wAH5s@Ctq`Q^`*_2P=_E7_a1! zpGo<#g3;{D0;d-n#pnnj6TcKV#fynzH_!4Qv)-?D{6^Mu{%|B>q*ApDWwH1>9lysP z$e$u%wHhl#HyUP&4KAmT?)+-Mqh%b%pCt2t7T8dNc~2G9T0KX$t;e4*Tn=eky6V=uyYb|HtAQ4)yCWi ztQ}=`-eJjo)uv4`H(8n>yHam29vOzf_BY;m?Fo4CS1`J%5Yr^V3j{X3@o24}Pa_X> zB{OD(t-t&rF}r94ZJlb_m0lndC^KbiB4L=T1twDnYp36rO?FeG9y;U z9kwo3O|6MJ0*k%8#c|8o^Knq+DCISA0_P`QU`k{p=a#fZMuXxlP0SN$@Pb>IY&B6Q zFt_}9fr+-L?Rr=f^UHksa+|SZ$0C1{YbRjs$k&<(m67xamdoIq`>LaflgsqhuJD`R zu!x*kyfL-NMy+H14kMLbmrSKGhXJ{Q->}D_Mt*_gN>eL8*ZdBbTaV{6mFHBbBR$tN!;Ub0-yEV&Uo(lP>U0DG|W>pEa9%aHOcoFrfsTdcm%<&eK_eMZjDm} zNVdQ!k3?#Ln`9&9lnV>^maK@Y$()}jhW8`1sbT&Q7D-4$IQ0OQ45PW7JIfn}ux1GB zYTD=T!#N#|O+)A!#HO0J$mLRBW6-BIdbsReTG<$^9Kx1C?5IiXML+j;F0E<|Rt@2j zL0rz|dzNaAL2U@{8^nhi3H|DQ_}CymDe(;9rv2ExNup~c6}L%p?%=~cLwJBcvzmgw zU}dl>sM*(s@o=8q1ZI(q8~LsFCd%_>)N{tP2)AGftD{x873**tzpvbm3vmYw4s~|m zE{>M(#^tyNSK?l3={}C%?x)5c!1Wwq?8ZYJD(&Hoy*&LePm&M}^3)Jl_bRcjw?voI zy|IBI*hAb9j_GUxLf{9pO=GZ5@&vm+z-#T*!|XpT}dgz#aTR^f;cN zl?CKr&QcD}AOxwziF-wGAZrxr0m~>`ycMEz_{0N6e=|QSHndb z*Ol}k{xXqt?0mSR>Vx~>dy4B-9d$zA#aPB z;wb)J%K-KYwu&IC#nIvzaV#zAK|aa#aaFxGtf?8YHmx|hnAv+yc_ugP2I~LCfHdpTUCVih~ z4{sXxA}r{e4LhxSR6xqK+G(dXnaZ}h8z>96#^Px* zojBOpn#(w?c56Xer&UvLjVCi%$Br3ztKhU*Yq_$T=)@}Jp>GomwV#^c)#~0=GUVw} zfx6*ek%;up0pS^oclDFOEZJNj+6S`BHV*27458A$1?NzDUHM*1Ts*1})PC7$eo*3-{vxwr1 zfolb)YtY40;ogDb6Ty#uMIx@LG(nVl2uD-nguB)MZO$Gi?Ifd4*E%=g z{YtfBVhaWg^b0OlPI}_KENsQ~^o+euR$FEpLzn{jB@FY`6kJa)nW#ZGXkj~c2+oM6 z9Xspv*vVKt#tYBMVs5-B=cSrbr9R6&p(Fx$I%YAMU_cIU&+Sh*4cs8OumYWa z(=Ci(m!R$dV|95#^Wvp}n*`@oE5O5~e%3#f|f&GGW ztLM3vP}7RjjaQAcI)n29@aw_@&HtoMBcQtaP@_ZoOl_4&i|sp={u?qn>y5ASDvm~?LM zPbK>`Pn+Ti)gf_9~a5|^(TgH7Cv4=N#kP4Kdh5S5^+N$AZ5bFJEw z4r@LErA+h_Jy$i3d7G*yb4z?o8)u4Vt$v#8xcYRYQx|>SAdmdXArkR}u^9Hka^j4mc^>N9-XUZyhw|X=1S;4B4#b*hu z41A9I*Bcj|&A_kIaVm%vJ>{^U_f>m9rD6*HR8?mFpKDB2++VctCH%&0%R~k9%+<-5 zZeS2!eJ3ZxNioad_+?sYR);iIGbZsB3(u;&7|6u$aGLQnzG7ku#|#`LGHNE2Qjf6k zoFcG*WzZITq9i636_rWhtUsb$;HMV;Qx%oQn4Rg@*8UkAohRk(ZC(e{#A`UNjP_p^npD;IKlon* zug_J5^`(+~wQpqHbE<_mFvH4rPdYVDEqKk&^*-XyA$%oKy7(k9B6WhL)dFcG70y=V zUIG>x6!$4X0VneO9uCB;XHMLLFpa;~aT?07;Q1zwZ$ELke8Y@&^-?mi(fSE*jgHdSZK z1(sYWZ3N>1)sY$?EEl(rltxN#g*vCo#fsfaDvnVewo8st1+=s3olktL_YqD^3D2w= za-&*oYbu^}V|Awcpa(uPlN(j4;aP;@=v2fwQlf`q9$?8cKV5wD;P!VHR#Rpy;yVuY z-{#@3L08~{@Cltj+ox#NLLbo!9EB#9uZLE_g2}Q>1w2psY+sINRmi)VU?1;6J^cg) zveysSWK@p#D<{kMU{7Z@2UD*#JLs?g@qHr;Gz#;6xl*5f#v zInrav2HD8a!sgEbg}{YnRh&`*1Kc&fIo6fG&$0Kohdb}phbfz+*OF`HI+A6JO*iN+ z_Ki=K6uL~=A_Io7A?VX0s+?`fR$()69!otPOVn#QN-)<}v-7yspi0`?=h$j?wjm5_ z=U2ygm!;yoCfOl3SnTPauLK#fWS88?5X5RrMZr}*@!l`lxI}g6B$pyLs{nk9;GEL+ zJ$%_T=rFjcUM-tHVoTHz_Vkw2;!+c2*)B{M7tLV~J#S+&H_pxX$e1B}=McR(nU%b# zwxBQJ@+f4;t-e)j*{nj%C;RBAvQLthq=YTOMpbzyDMFhcB+E!>wd@nt8Y!Xr1MS^P zPx~Zi$pOWynN1byGLwdzu;idRRh>hB(ut~9_5L@+*?Cbteto`DW_`OQcM5xdstH|^ z&;(bO+NbR0NqN?A?OU>xioQ$kHsqaiv{g%{Tkw|DxBC%3W(;KaNhZJ=Er4Zbh$ zvE;q#aMTb@q>_%J|9(sEEiSVsN|x~~C?BxogTk&{{r()!O;?mkQajbVu~NVP@+Q$N zK8em1cs5Wlpa$XuE5)rjyND@HURraO3lbJGd(?~2Zk|F3HoERyl#4E_*vJx^T&}CA zS{<}9r}F%gYa4a(M8~S#`6@9Q7ntrDgxyuX^<1*G##bemstnEQqc@p!(kyy28C@9Z zldvJ5@blDp?+}*|Euj*@K?>QU)cUMf9mumje6+kdINW9y|)3sAHi~O=@!bXIBl6m z=Wyr3OY6Uew@hK>*DJ`pUczTfk=iG*?$5;*s7)!U57Tm*vs5e8o|$ ztzb^*U3|XGomHf2YFbJj6Ft~Ks_JSZHKG{ZglqV_nO6bs)9b1II;97BipOnbltZCF z9(@sGS;z=A=CLgj3)N$UH0%^%+eI+T~2Fma{M*A#9Vg`9ADi?3N{PHFb598zfU4R50!J`P` z9E{>IJWf@$V+TIL6($Bq>6095#!cMIz1mG)KE*LhJKy5su7^3-rS@r3dxE>IMEwl5 zq1k{KIAY*Q1Cs`}@a8E4V+M8_V0D4lVe;a2Sp3gbe7EUi$j>W7*8i9sw@`OaEL)z( z7y4Id<4r~ayrAu#jx+>9fn#_^ushUn3}5B%QRI)n2uvbSzq)Z6)5BBvTEtLKpBEg% z@9>>g(iS5;RM&r^t|ueSq2~Hy`13m6d_O{yawG3%9v$Pa z^Z474NmI`|ikFY#pCa?=WAl&U-|Dba5&wzc(V54m@fsQUc_g@~@e%ChvR}}jj^e-R zet}~OtIDS?4K=&Wh>!te*88NMvqR>TG|>Az(wwSEzZeQmOK_N*Hcg57jLgf+={qOU ztA-cTbKSLpDLKn~7Bbuo%}JacGCk=v4nj?ZDIC_sUnZ4u44f|KxFGPRPGjdV5hP0% zH6E4or{tnpZ$F@XSK81h7okNi=0|7k{Cueco%~%b%lX+zC*5ELYxzr&l`!s>x8Qzx zD~`xYe38$l5_|OjdaLb>6Uf;EN{Iy{7vyV@PKU6 zh{fP{dN$$$STFN!~bvu)?C>90RWzoT6tgn_)<3 zsyPpsp%^aqtM(!tm&oE7N;?-33c>ds)d4})Dbv>7Plc#pT7dSJX<0u!C0F+^&r7!l zSJ{-8%@Ib1y!7Q|TcmLkFN7LBi0z0pFhVyxD?9Tt%)ns0fSuDK!+K~+Zelnv^0J!% zZOV%iAzD^V%RX+APys=?{5a!6UT))!$(znlb6&Cq-0LMF^IJ17h8ps6d*Cqoio4wr zA*kGVMB<9lJMwZ5gG*tt8hT&p&z1KrL1x)aieplrcu zGJp$YE4t)*TqE1i$AjaDY-by72X2)if^H|j7`TDJ8|Itz5j@24!*UazlA9SqZ^3cd zP2kylFEGjtx3XdFXmH&_*QVx+;A-^16~r4FNUHStnFdlltCb(}K&`CRML>T@9-#YP zgU|DPsUS6iP4ZzvAjkq~Jz@9|onj1E$VU|{=+dq7F?pCS{Z*`$j|)>Qj`Pbt1vnQf zs!r0o5+L5L1W3I*i(qKdU>HS%a&4pFvdZxd>PLI?3>hmrtM|l^9eiepZ3w&Ed-xv+ z{{n#-Twj9<>g7>-Y@IyDu%Z%RBVYYIEuUyqUFJ{9;j5bH_*3#}a`XdvLOz2Kp2WTK TWMh!A{}uTx?KP>^ diff --git a/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$TypeParser.class b/alerting/bin/main/org/opensearch/percolator/PercolatorFieldMapperExt$TypeParser.class deleted file mode 100644 index 545dd4e791524dc84769cbb98ff67c81beec7a68..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1964 zcmc&#+fLL_6kTU-WyTq~s30nWh&nS|stBSKHA*0wj7Tzs#K+P(4khKZp0;NA2S34& z&=*NG@xe!b!UzAuxZ5c(Q*J|I;zQegcF(@7b@txp=bx|N0nA}uL4x72E$gc7aN%-Y z8k?%arD0pTXG?W;UtBi1RePm7^mD1zal=!P zVVI0LHbsp$)a}rT+Vs`ph#|9J3e$VSkSvzg8B&XOjpvZWxjYnfF?3fqwdMHcgk|X(bl^Y`8Rrv=bm?_*kqQ;H!Gl6m_ks@jcTDkZAE( zCqo@;QnzRek2gH66u0gq)91cv)wrZIb4v|_JDzC^SHVSw=@{-KatY!SihVQI>X>Q zpUa)vIkym}ku}m%*hKr?^QhMtPBREKPZ-bV@07ioPzd;9k^0^!~4!cO`+hM01 zDq-p{K^+$KRrqhem>CLT<;y5r8WHgdJqrs|qW$fCO2Z@S)!FvY9o7}Yhm)!J_bqu8 z;Jp^ifng$#9`sT(R&8GzeAx`f)6nWM^K&X7oFV^G2rd^b-E}$j?mdQb2h|pZRdAnS z^z{7GCdepGim{Pwz~q{DlI^`Q`?kM>nnLPC+j@=_5Hss|&b9QUd*O zFBsg#@F!9vTipH*-IkjhBe!INQQQne6)=WdQ&_rZJnz(ws@-0f8Ez`*15hl=7!7jF1`?~LT-+XOtU-!)?@B2IV&g9OLfS>pN=|exc_uO;N z?|05`|NYLHm)`i~b3}BW+G~(vS{jLNtBwp?;g}VO_6=4KThYEqC=icCtJ_nrYlBv( zza=n?&c>Z_gFH;jGUf%t{npOvAw8zLJrE5HS@CFam(}l@%e1(ubxq@@-j=%d-tJcX zZEsxDySAyZc}-_;U1x7st@qWtp{}{R5u0kS>!|B$>tN#g zJdmH5W?{D-bq!rjZLPfxZ7uE1ja`jin#JVBMu$UI9GfX|jRAzNrn+V?&0#Wz1JQUe z5Mr9?8nL#nskw2Dm;6ix1A$=3>Svng^n=h{jU6qWHo1FS>so;9ElkB&Y&6gpxB7eI zR&*%F^cEMyj>gXJ=B~6krvRrjCm0F$My=RL2-}@|X?J5sPj7v9Q}dd}j zSkckmts}uuzZLBr5P4#n(NWiWabw!pGnk5^V2PC+b4p87Yg0>iOK<18w(jON5S^}u zb!k%;VUt6_aByg3s5dqk8430G4h7)C4~D7-M#6n@ zY^Az$WOz6fw4z{e%T)cVG9}EGeT}C2QrV~*nHH*I-Pt7s*80G7c5Wb!0$pN_?ubO) zx`Rc{o*;XNrn&>l-eA2=TjyK#l;G=w;Xrg$54E)#&=)u~?9?P|5em8nFij1yb!2F( z6?IJj#2K2Au5V0Brd(EsFy1vDFir+Ry2F|(HZ@{(u`bO*-VEW@<^-aBNn_8(wvIJS zXKDn2;XvP@RUI1Xv%*$&f240@$O^}+HR!IZm{dpgOmq6JSTJh!*Cj!A#$k=1O8~OD z5W2p`q?L58m)0^>YH3+AVJ?4M=`bQK=~81gJtbe*zst5w2&^Q z^(I|H%}l-~vCEx(=wUi1llqfb)D)yzAGK1uL2V%Hi7mpUO9@7lcgU|Bz)9On@s8<$VR~`Edv5(i%I9v`O^7TlP;%T z_y_o(uE>>ESezRKWYOlu8>ByA($-{u+fXpx8iBGGQa=qC1ls3Vgl#r7gW86*Aeyv| z1_5(TTSI4VSRkB38;F$5T0qS|REPz0;)Y=?1V>q>5=UE3io;gxmi8???uW@xdUh(DF>h zIRU!WGC!SbyBQovTRQkZ>;!z;%4VWX8|>O=Sqnmlp#Bi6J>f|Wk>Sy@p~rqd>3lcrF}+7<}aMYrj6CdFni9f3>+N|Sj`X78`n*YBpcj$w z0pCDy8_F=LL>nPfPBKm+H?=yL-bY_F>1Fy7R8DJ06gg0~K=ehTR`pKVqU3g)G|F&X zGbUPFL|>+_8uS&WiVXZyO*83h^mXh54AddMD6ItM5QY=QpPT|=WcsE_-=c4$V4}Bi zsgwp%0rAaQl$mN}Sz%G`!71#MM2;@9Cj)C)=XXu|9(^Bna=R4`4vgxBLgTYmXa{gP zfGpl%&=0aRRn(QFuo6RIB@&HSw}&8%b^6=d5vkVs=oQ*jL_emV8uXLw9Mg@i!`cxI z#x2nflYT}&hg*o+pX$Q>Yb{hOm~NVK!7d0{)O2IgyLSdVa-20@Q{UO0y`kn{404F# zk;MG3&@WB;l~~n6d(g!eQu+U6Ix9mMv+z1x5&QWYlYUFDq1b2}%T6o0J=mu`#U#vi zj4JdLy6y~z82+CAV9P_*CdnvNPn``ph=exoQf+rl&uE#7ShH6>0jt?2K^Q0 zD<{&l(I)+!{s9jOrPJRxSkV}yrXgwtP?bb+FB_*bMC;hUOnROE8xqAcapj zBw@SDiYlK5(l{X%X1Rxp!edmK$!GAH zQi-zSDMHWB(xD`!?MaJFUd-hP^|dFI>Q(>7q)%izKWldG#i!5H>zU7$CRgzi1OV|! zib4p87cJgW4_Cxzn|uy0g(5f4>~z;6y4Weq*kC zJ*mOYhLKhpS8a71M1jp$rc>-<5_p!PLK^dsoAU3hd>Ap$51uGryv+U3& zePK45CJ6vjIjJt|N`yPcmdOJQJD&%&!y0q(Fc$>qpD-b4@)dj~49>)vCgi`^Rm{cd zQEd?qal~NcU(+TE80av0n6E;jjf5a7R~4W!2ofiTG&$b^AjTsGLuE^{drjUhA-X3l z2W`BQ>AVw_2`wS8f^u+YE@Gg_D=C@199f!P33v+lqn;c!}hX%gGNq5p|t}8U! z7<5>&9=7(6Y@#nUu|+L&I$x68~s#9!14s2zNl$#?U6v52jhIM+3KWH$jd71UA$&HIlH2Fi^h|Py0JFIAXBp8khq$f?@qd&tZ zI-d>Rn^WvkHptF-^htz|KO+3tH(BM+;Ya4Fg<$@e$sZ?Q(nm?aA)f1LllSv8qVnNB z9IESylHIvXlEa!U^;_yC^5CcVpl~pO>@Mdp;gc(+TbBWU%@(w;Z@(*(xNw&NQe7Ra*g%NhoM7WO_{TvAlS zSqy;n0fml;>{&j3LFo7SY-Xm(bJjVIEb>K@U*a#qXl38YK$_U(!m=vEK~T>(7KbLRQ(chN#5`PjODt_xi0x@PmcjwbhQDtFjsH@fCb45LOyWDv6oCM zr$CVIeltuhAR1fkn=Z*4VpABEWEOyZfs*n!u+!l0!8~Vk)17_#XfMBF@(<)Ze-1)) z$WLTPTJbX`Lw;oPk7dYw3;`X|2GqrDVMZnZO-bv*6&AbC$3Np=82t0>!kNu0_~z(n zb=}sOCY+{(vm=v#$-lzdhUHkGDU7<4yO(K`ZMt3e`lie^CkfMS{BFo_T&QB@EU7wK zk_Z+8jfvtk{|0`2t-g&`8jqCf6&sAY)(q{DbtrFV3qF38e=lNt{`Kx16ZI!~kF@yg$ zSp_X^sE_}_|1$WWNt$XOt+U*i$*(htV+Dbj6eyX_Sd@zb_Tu$48HTXw7~(F4iq!Op zu*n86Lo?7W&S^Vos652}6TIvWJGBL?zYS(X*Uu-ULFI#1_AR~Aa3o&3)hZ2!x8piZ zzfUQ89^OG0W5RKXG*N0&g{JaJJY%3dC??s9Y8Ms4()aca;xeI6O;(h%GY)peu)TuS5 zY83jTxaW9_pi~@Y$VEN(*c@d_H}FiM6Mx(n3XH@wuuMAw5wbfWYW-Qu$`K&DQDyG~ z$Y!RNKXH=gz5*mNJc)I^N_$iHUd6;JR4(RDSg+I#&cobF^<1Dr|OK*-h!KeDwfT7^` zY7esTyv7qcNOmDwYv+OBrMquWX&XScY7+az71rrIT4`Jo{0XZqCJ z)jJGzO}gh8#VOv9sjgMm$=RJ0isjM=P6pgzw&OgS>|MDcnA)Z$3B9N5{4bS7z)m6h zY}8o~(rO=q&{U%sDM$$ShLbrz)2W(!Qv|ip0*`P!M-A$Dd;^`gLSUR=C4(J?B#Yia zpH%S6V4bs%e;x3U-n1WJ#KaE}t2<0}r@9T%9#+>RWg|OEkS%vcMxuSV)*u&17fhBx zED<$?h0^h?zFsXx#aNe=Zg28}v!ULXt&z6raN;6OdpnLQv+v*->i+B!SXoHvlm@6>+2d#^ofMD|fKb zGSE`wMZ1t7DO9A}Cfd`fIMtqk_DolQ3EC&Q+OyF<+0~wl_B>a6KH3Xh?S*KUy4t6r zeY&e%hW44R_E}USH-1y=tw0-B*09zRxv-|6xUQz3xU8n1xT>a~xTuEbJh?1RG-IyZ z_D8!Id#<5cKwO2^BcO%+yQG8@bio5;c=nJd?-^<+Y0UFHOBeU#?Wd;BL)7vNb&gYa z&lqidhBoh~&3OsB3=dC&-iF71>Pt`%F9{lY8sPG17=M>&qDE*2MX}r%`6&)!Tt%nz z(^SRJ&~iS2^OS?M2DsPr7!ds#nH89g2Pwzro{`JsJKwxwJX1)J6fXGD8^74wxD!*rp-d*m`KS)=X z`wI@zt>u2>Al+W>_a3A>%l*ECba%PG@F3k&KD+23-8Zq(Ga*C7*Ag5k74ti3HeXNk z`35=_bxm;QVXXQ=!kM-cMNQ6(ni7XV+!+5u8BG4)Tjpc9)I6r_Mq3&Zs z6^`OvAUq@#K1@*FqqMiBB0(R)hdUGWbb=Bu(6n*-R8PrsV|4ULC3x_eL-YcMywqA% zb(l~EIz->7@%TN5=sV-|3IykeHTix|r9b}={m7r6pkExNS8F_&@aqKqE%=vipf*lnJE)?o}w3yXHz=&_fFWzpWg?tw+ z;(O?9zK>S$`>2lJj~}?~#;-*lppE<>1^8ha;zwwdKY+`skI}vSI6cT8)SS7@-gAmG z5Aigf4w_7(J83Z&gGwv#TZor=1~^kkTX-gyU44{C6?&`*Lb)lf1Ky{jPZh+00MUqzMQm*DCIFY_0^Kr8))30|>>&H*ja z^y;SHI8x*F8=ohi-&hy6N{E5r)dt{2Ew| znSPHyU#8+AQ$0tX26am50$Pvt&xY>x;8{#NwJsKbGWViw8wy*0_CR-@qEg;VXYz-s z61CoCypJy6kJ3*57;aF0Tytri*53n~(f|O;TnNb*{W+jHA_nAu=3E}IUC$SB9XL{o z`f5ElfWymB1zy9A&xmDw1O?`j&gI22d$K-SNIDKaVzMTzn{CFq_ak5PxEf4`9BmYmDiEV*y$4CwJ4TB(B?m z%e5ites(u)Nb4?1?RO8Yaz-qebi};0p4q9n zvbKvBf;XR}d>*Ii{4CAm=jb%l-^=+Z3ii)a1Am4t=g-mre~u!kmtV)9r}L8ne;MnYjw89R0KNw+yaG6SU^V7J0BkFB zIo$@F-+;8q@1wrS--2`>p$gz`#M_JTKHo{n`6FZ;qXsez5#{fSkkgpK5aVu0KG87* zJ%zdZ430_v=1KYu-rh`>kob@sPG?AQ8~o-`{nZ!{6k@% z(#C!|$U^)Yi1a%s^6xcC=h(wRzG95SME(if9imdX>FNNz3PwNzz>~`l@lQX|GS08| zRLtUESLQv-za8V>9lpWKVoJ9&E!8} zoYFQ05lEu}I4mc^_@?6xK#+KA7jOusv%SyqUwiUI89RGCc--FElaI%poju|O@9yj| z@VKY*5dZx!zri#S-`KYDUs%)Y@Ob}CC-EDIHIC6~AlX^`FDh54Vc>Zcu-Lx-Y)95S z=y}v3$A#F5rt!E^J+qW&Ocfl#1e?Y>gi-}=+9t&nw6&e0M;X|R9Yj{)DVxeGF(=k( z11WN?BQFI6*SDRi)y`De`+fT|R#5DkobOEb%B^5~vN{`^KtRML<%g9QnTIM$sA;=t zy2K^3)GP^1l0CECJtym)=aqjq^_3r0{^oH7x2hJjR8}O^!ov!r+$gVSPP{Ijc&&6^ zHDAQ@mH0ijRi;;4s}Sx;Qyary@1fE1=T-G?ngKM`33U#7_mG8eE9`G866!qt?NWSO zxtr!;+{%Qi#W(wHweDSp-n!jXiEnkySXhI`-3q5$mBidqiIHe&#J`;gj>0q_Y@JKR z%1`suJX(yx>vHuLs#Oc4+^JVJZGw9nq#Y>5a5b-$3z#=Cbd>w44$o{ zcC}75LCS6+k6MoaDUV*GGIfb+hO&6@tp(pyDpaz^x8jSq>Ut`g=ND(4FK$}-4G6t9 zEz^&QpyMOatXT_=A%pVh5p{O=v||!w>23sLIPSHjhX=7r37Gfnr-lcpq~f{C=N_e^ zan%M~v$CQSn^=xkg1^)oi2UxkKve%Dk!>GiDoatCU8SXqIi6 zmSVf4&ZXJvJg~F|1gxbhwTc?l1=OTgQ%GG%H>(D)a1B`4NKdG>nk^mncGWv|PJoT8 z>){_nardYj@TQO}-Y%s<;3=aU)VtJ;uvS&HOWmYyhN3k=NZzg91G3#gekmNnvI%2O zAvOCLFc^lKZm3(F|M@yYbr`B2si#-ns&2EXPG{gLDD%q?soOIY9vxz}#4hEjyW~yX zt=^00Rx(kSyIkD^zjiUTs(UZ;tNYaZ)ovTn21Ih@vo{V>r5vO}kM@DVwM@YrnODG%0C{n|JYk z!RtSw{@{4ngMWaJf0K{rOVTt;+c{95J!{hBocG+`bH39*{<{A=fKmJ;p^f2~X%%F% z%#8|Htz1zqb1P@+s%=_w%D+wN)q-YBt=JMe81{r!oVOL(m68x)IM_mDPA!vxnqX&Z zLB;W=CKzQn(gKm3BHy(oox8#ekulBC>~V(nq2UZeN5ahWDB95*!*;|NdXt*L=c}b9 zZlzVyW9UzsIaSZ7mL|S?4?FClR$-8nVZ(k_nk6XC5G`=~a+O;eC-wfJ;p7eVwklU` zO_!5e#U_a^MJpJpUA4&ebc4WHgGDdD!JG^V3#&?*=f+0H;d%cY$?^M5K0sSI3y}}n86-$bE&IF zK~`)_GYauXbS|vc%KUI9iXn_h7-o2)UdV*0>pW*`rcsIED4wN6R>&Kj+omCk%rF@A z_0rM}l8!f_!VV(KG`Dj_rMi?VsujVRt3zS$%FPPts5v^6S1Ll)@yE&WN>f{R{tw&+ zwt919Sp+3-2+g7C^9;iwKDE}6@B+hFNW8|_4HIfkC38Hl=%!7XtfwZ56X=z2is9^5 zU}m&JQK$bf$Z4FFaE4(b4D!&0i6M?L8bVZe-x#;3>N_?J0veP={gW6M)ITrgNMMYJ zmx7oRNJzNA&{rqsgk`C##K}oa(IiwWE*TFET@Q3lYc@(b(V;6u-^71FL5A%VrNV8S zT7x1yzac{5Sp#{zsZ#NDWKX#^<6dDn9I8MIjV{dLl7xAN6Cs7B^qJPUo_7x?I#6Or z;WF`bc7AedAv-sb%3htP&(zdpc6xRyIjLkPlx%uxA)QsG(&^dxiwq5|)xt0Dd~i)s zPSP84LXXD+g=#V4+*ws}j#Al08ceU2xi>M#E&|9=WY8-cIkq(8wt17wNwu_;S4mJ^R&%s$t;TQzH>qPv^+ybU z1~ckuy5KXak=J=;Flm}MtL1ng%{9_Am~<5dQG((dP&a+)n%rF1JK=(nCRANlXf=rU zMzAdQop33duM-poZ56Xdx$2)~4ywnZK9r&;!;)}oOD)cwo#H}O(}@pMGci_)X++sj zOKicLOm8PlhT@~>hRxNR)ZZqmss2DcKrTA{^9!iNlpTF`bQ-zD5(*t@wXb-&vj5h}T9ah5U496c%mF&6;u!OG~SMtqgw}fwKScL2~QK6w!ANUf!V;Bu7ZFOMMCK4K< z^!u*9J~i1a>F108+XMS)$8V>C zV0Zbj$8g+-l^xhL`PxRh`?fv6bBpckII*~f7nQ}1b(~y0`Wr^qa89|0m%{kt4*pJ8 z*TW|iOyeTC$uJWLVAip88Xfc;=;?TXmlxZAMY0)gBH(wA@Anmq`F<-7+9+jPpmq1B z*O4Kg7w%y(?Bq-UtlI~pBau`^Va5qm1RohWx`u4MGEwL09A$hy5N5;=GXfP$gwc*1 zX}2K{PFIl(ot_!6aof3D!Mk`5@6)@pM2rvc5kAIle1gyLMWhQ~;%nTcYle2TyXeHX UM4j*H`)QK>f$j(B`Xjpj25}S?WB>pF diff --git a/alerting/bin/test/esnode-key.pem b/alerting/bin/test/esnode-key.pem deleted file mode 100644 index 4ac2cb57a..000000000 --- a/alerting/bin/test/esnode-key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCWvn+O+rywfgMC -ud24mAclMDfuNA/IzCKLxl5usIE/PvUm7PPfXQ14LfQhNQXqOuaD9fiVM+HO1BzK -wmN3j4g7eHInR1cxENoNGKFa0Fr9EXnUv8sfwyobPD8NTu9eaH7T+d6f9oow+Q4n -xb9Xin5IRR/pcJ8v7zEjcXpZaZejcSU4iVZ0PR2Di4H9rfe9SEyR5wLrsVBePB3L -jaL1uK4bZF3n/JGgDe3BNy1PgPU+O+FCzQipBBTyJWQCjd4iTRXVbMa01PglAR85 -O9w6NXApBLyWdGRY6dGd8vMC2P4KlhnxlcgPZdglKniGTX+eTzT7Rszq77zjYrou -PLwSh9S7AgMBAAECggEABwiohxFoEIwws8XcdKqTWsbfNTw0qFfuHLuK2Htf7IWR -htlzn66F3F+4jnwc5IsPCoVFriCXnsEC/usHHSMTZkL+gJqxlNaGdin6DXS/aiOQ -nb69SaQfqNmsz4ApZyxVDqsQGkK0vAhDAtQVU45gyhp/nLLmmqP8lPzMirOEodmp -U9bA8t/ttrzng7SVAER42f6IVpW0iTKTLyFii0WZbq+ObViyqib9hVFrI6NJuQS+ -IelcZB0KsSi6rqIjXg1XXyMiIUcSlhq+GfEa18AYgmsbPwMbExate7/8Ci7ZtCbh -lx9bves2+eeqq5EMm3sMHyhdcg61yzd5UYXeZhwJkQKBgQDS9YqrAtztvLY2gMgv -d+wOjb9awWxYbQTBjx33kf66W+pJ+2j8bI/XX2CpZ98w/oq8VhMqbr9j5b8MfsrF -EoQvedA4joUo8sXd4j1mR2qKF4/KLmkgy6YYusNP2UrVSw7sh77bzce+YaVVoO/e -0wIVTHuD/QZ6fG6MasOqcbl6hwKBgQC27cQruaHFEXR/16LrMVAX+HyEEv44KOCZ -ij5OE4P7F0twb+okngG26+OJV3BtqXf0ULlXJ+YGwXCRf6zUZkld3NMy3bbKPgH6 -H/nf3BxqS2tudj7+DV52jKtisBghdvtlKs56oc9AAuwOs37DvhptBKUPdzDDqfys -Qchv5JQdLQKBgERev+pcqy2Bk6xmYHrB6wdseS/4sByYeIoi0BuEfYH4eB4yFPx6 -UsQCbVl6CKPgWyZe3ydJbU37D8gE78KfFagtWoZ56j4zMF2RDUUwsB7BNCDamce/ -OL2bCeG/Erm98cBG3lxufOX+z47I8fTNfkdY2k8UmhzoZwurLm73HJ3RAoGBAKsp -6yamuXF2FbYRhUXgjHsBbTD/vJO72/yO2CGiLRpi/5mjfkjo99269trp0C8sJSub -5PBiSuADXFsoRgUv+HI1UAEGaCTwxFTQWrRWdtgW3d0sE2EQDVWL5kmfT9TwSeat -mSoyAYR5t3tCBNkPJhbgA7pm4mASzHQ50VyxWs25AoGBAKPFx9X2oKhYQa+mW541 -bbqRuGFMoXIIcr/aeM3LayfLETi48o5NDr2NDP11j4yYuz26YLH0Dj8aKpWuehuH -uB27n6j6qu0SVhQi6mMJBe1JrKbzhqMKQjYOoy8VsC2gdj5pCUP/kLQPW7zm9diX -CiKTtKgPIeYdigor7V3AHcVT ------END PRIVATE KEY----- diff --git a/alerting/bin/test/esnode.pem b/alerting/bin/test/esnode.pem deleted file mode 100644 index 7ba92534e..000000000 --- a/alerting/bin/test/esnode.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEyTCCA7GgAwIBAgIGAWLrc1O2MA0GCSqGSIb3DQEBCwUAMIGPMRMwEQYKCZIm -iZPyLGQBGRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQ -RXhhbXBsZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290 -IENBMSEwHwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0EwHhcNMTgwNDIy -MDM0MzQ3WhcNMjgwNDE5MDM0MzQ3WjBeMRIwEAYKCZImiZPyLGQBGRYCZGUxDTAL -BgNVBAcMBHRlc3QxDTALBgNVBAoMBG5vZGUxDTALBgNVBAsMBG5vZGUxGzAZBgNV -BAMMEm5vZGUtMC5leGFtcGxlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAJa+f476vLB+AwK53biYByUwN+40D8jMIovGXm6wgT8+9Sbs899dDXgt -9CE1Beo65oP1+JUz4c7UHMrCY3ePiDt4cidHVzEQ2g0YoVrQWv0RedS/yx/DKhs8 -Pw1O715oftP53p/2ijD5DifFv1eKfkhFH+lwny/vMSNxellpl6NxJTiJVnQ9HYOL -gf2t971ITJHnAuuxUF48HcuNovW4rhtkXef8kaAN7cE3LU+A9T474ULNCKkEFPIl -ZAKN3iJNFdVsxrTU+CUBHzk73Do1cCkEvJZ0ZFjp0Z3y8wLY/gqWGfGVyA9l2CUq -eIZNf55PNPtGzOrvvONiui48vBKH1LsCAwEAAaOCAVkwggFVMIG8BgNVHSMEgbQw -gbGAFJI1DOAPHitF9k0583tfouYSl0BzoYGVpIGSMIGPMRMwEQYKCZImiZPyLGQB -GRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQRXhhbXBs -ZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290IENBMSEw -HwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0GCAQEwHQYDVR0OBBYEFKyv -78ZmFjVKM9g7pMConYH7FVBHMAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/BAQDAgXg -MCAGA1UdJQEB/wQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjA1BgNVHREELjAsiAUq -AwQFBYISbm9kZS0wLmV4YW1wbGUuY29tgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZI -hvcNAQELBQADggEBAIOKuyXsFfGv1hI/Lkpd/73QNqjqJdxQclX57GOMWNbOM5H0 -5/9AOIZ5JQsWULNKN77aHjLRr4owq2jGbpc/Z6kAd+eiatkcpnbtbGrhKpOtoEZy -8KuslwkeixpzLDNISSbkeLpXz4xJI1ETMN/VG8ZZP1bjzlHziHHDu0JNZ6TnNzKr -XzCGMCohFfem8vnKNnKUneMQMvXd3rzUaAgvtf7Hc2LTBlf4fZzZF1EkwdSXhaMA -1lkfHiqOBxtgeDLxCHESZ2fqgVqsWX+t3qHQfivcPW6txtDyrFPRdJOGhiMGzT/t -e/9kkAtQRgpTb3skYdIOOUOV0WGQ60kJlFhAzIs= ------END CERTIFICATE----- diff --git a/alerting/bin/test/kirk-key.pem b/alerting/bin/test/kirk-key.pem deleted file mode 100644 index bacb22c21..000000000 --- a/alerting/bin/test/kirk-key.pem +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDCwgBOoO88uMM8 -dREJsk58Yt4Jn0zwQ2wUThbvy3ICDiEWhiAhUbg6dTggpS5vWWJto9bvaaqgMVoh -ElfYHdTDncX3UQNBEP8tqzHON6BFEFSGgJRGLd6f5dri6rK32nCotYS61CFXBFxf -WumXjSukjyrcTsdkR3C5QDo2oN7F883MOQqRENPzAtZi9s3jNX48u+/e3yvJzXsB -GS9Qmsye6C71enbIujM4CVwDT/7a5jHuaUp6OuNCFbdRPnu/wLYwOS2/yOtzAqk7 -/PFnPCe7YOa10ShnV/jx2sAHhp7ZQBJgFkkgnIERz9Ws74Au+EbptWnsWuB+LqRL -x5G02IzpAgMBAAECggEAEzwnMkeBbqqDgyRqFbO/PgMNvD7i0b/28V0dCtCPEVY6 -klzrg3RCERP5V9AN8VVkppYjPkCzZ2A4b0JpMUu7ncOmr7HCnoSCj2IfEyePSVg+ -4OHbbcBOAoDTHiI2myM/M9++8izNS34qGV4t6pfjaDyeQQ/5cBVWNBWnKjS34S5H -rJWpAcDgxYk5/ah2Xs2aULZlXDMxbSikjrv+n4JIYTKFQo8ydzL8HQDBRmXAFLjC -gNOSHf+5u1JdpY3uPIxK1ugVf8zPZ4/OEB23j56uu7c8+sZ+kZwfRWAQmMhFVG/y -OXxoT5mOruBsAw29m2Ijtxg252/YzSTxiDqFziB/eQKBgQDjeVAdi55GW/bvhuqn -xME/An8E3hI/FyaaITrMQJUBjiCUaStTEqUgQ6A7ZfY/VX6qafOX7sli1svihrXC -uelmKrdve/CFEEqzX9JWWRiPiQ0VZD+EQRsJvX85Tw2UGvVUh6dO3UGPS0BhplMD -jeVpyXgZ7Gy5we+DWjfwhYrCmwKBgQDbLmQhRy+IdVljObZmv3QtJ0cyxxZETWzU -MKmgBFvcRw+KvNwO+Iy0CHEbDu06Uj63kzI2bK3QdINaSrjgr8iftXIQpBmcgMF+ -a1l5HtHlCp6RWd55nWQOEvn36IGN3cAaQkXuh4UYM7QfEJaAbzJhyJ+wXA3jWqUd -8bDTIAZ0ywKBgFuZ44gyTAc7S2JDa0Up90O/ZpT4NFLRqMrSbNIJg7d/m2EIRNkM -HhCzCthAg/wXGo3XYq+hCdnSc4ICCzmiEfoBY6LyPvXmjJ5VDOeWs0xBvVIK74T7 -jr7KX2wdiHNGs9pZUidw89CXVhK8nptEzcheyA1wZowbK68yamph7HHXAoGBAK3x -7D9Iyl1mnDEWPT7f1Gh9UpDm1TIRrDvd/tBihTCVKK13YsFy2d+LD5Bk0TpGyUVR -STlOGMdloFUJFh4jA3pUOpkgUr8Uo/sbYN+x6Ov3+I3sH5aupRhSURVA7YhUIz/z -tqIt5R+m8Nzygi6dkQNvf+Qruk3jw0S3ahizwsvvAoGAL7do6dTLp832wFVxkEf4 -gg1M6DswfkgML5V/7GQ3MkIX/Hrmiu+qSuHhDGrp9inZdCDDYg5+uy1+2+RBMRZ3 -vDUUacvc4Fep05zp7NcjgU5y+/HWpuKVvLIlZAO1MBY4Xinqqii6RdxukIhxw7eT -C6TPL5KAcV1R/XAihDhI18Y= ------END PRIVATE KEY----- diff --git a/alerting/bin/test/kirk.pem b/alerting/bin/test/kirk.pem deleted file mode 100644 index c32b21cd8..000000000 --- a/alerting/bin/test/kirk.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEdzCCA1+gAwIBAgIGAWLrc1O4MA0GCSqGSIb3DQEBCwUAMIGPMRMwEQYKCZIm -iZPyLGQBGRYDY29tMRcwFQYKCZImiZPyLGQBGRYHZXhhbXBsZTEZMBcGA1UECgwQ -RXhhbXBsZSBDb20gSW5jLjEhMB8GA1UECwwYRXhhbXBsZSBDb20gSW5jLiBSb290 -IENBMSEwHwYDVQQDDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0EwHhcNMTgwNDIy -MDM0MzQ3WhcNMjgwNDE5MDM0MzQ3WjBNMQswCQYDVQQGEwJkZTENMAsGA1UEBwwE -dGVzdDEPMA0GA1UECgwGY2xpZW50MQ8wDQYDVQQLDAZjbGllbnQxDTALBgNVBAMM -BGtpcmswggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDCwgBOoO88uMM8 -dREJsk58Yt4Jn0zwQ2wUThbvy3ICDiEWhiAhUbg6dTggpS5vWWJto9bvaaqgMVoh -ElfYHdTDncX3UQNBEP8tqzHON6BFEFSGgJRGLd6f5dri6rK32nCotYS61CFXBFxf -WumXjSukjyrcTsdkR3C5QDo2oN7F883MOQqRENPzAtZi9s3jNX48u+/e3yvJzXsB -GS9Qmsye6C71enbIujM4CVwDT/7a5jHuaUp6OuNCFbdRPnu/wLYwOS2/yOtzAqk7 -/PFnPCe7YOa10ShnV/jx2sAHhp7ZQBJgFkkgnIERz9Ws74Au+EbptWnsWuB+LqRL -x5G02IzpAgMBAAGjggEYMIIBFDCBvAYDVR0jBIG0MIGxgBSSNQzgDx4rRfZNOfN7 -X6LmEpdAc6GBlaSBkjCBjzETMBEGCgmSJomT8ixkARkWA2NvbTEXMBUGCgmSJomT -8ixkARkWB2V4YW1wbGUxGTAXBgNVBAoMEEV4YW1wbGUgQ29tIEluYy4xITAfBgNV -BAsMGEV4YW1wbGUgQ29tIEluYy4gUm9vdCBDQTEhMB8GA1UEAwwYRXhhbXBsZSBD -b20gSW5jLiBSb290IENBggEBMB0GA1UdDgQWBBRsdhuHn3MGDvZxOe22+1wliCJB -mDAMBgNVHRMBAf8EAjAAMA4GA1UdDwEB/wQEAwIF4DAWBgNVHSUBAf8EDDAKBggr -BgEFBQcDAjANBgkqhkiG9w0BAQsFAAOCAQEAkPrUTKKn+/6g0CjhTPBFeX8mKXhG -zw5z9Oq+xnwefZwxV82E/tgFsPcwXcJIBg0f43BaVSygPiV7bXqWhxASwn73i24z -lveIR4+z56bKIhP6c3twb8WWR9yDcLu2Iroin7dYEm3dfVUrhz/A90WHr6ddwmLL -3gcFF2kBu3S3xqM5OmN/tqRXFmo+EvwrdJRiTh4Fsf0tX1ZT07rrGvBFYktK7Kma -lqDl4UDCF1UWkiiFubc0Xw+DR6vNAa99E0oaphzvCmITU1wITNnYZTKzVzQ7vUCq -kLmXOFLTcxTQpptxSo5xDD3aTpzWGCvjExCKpXQtsITUOYtZc02AGjjPOQ== ------END CERTIFICATE----- diff --git a/alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt b/alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt deleted file mode 100644 index 6eda9ec30..000000000 --- a/alerting/bin/test/org/opensearch/alerting/ADTestHelpers.kt +++ /dev/null @@ -1,508 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ -package org.opensearch.alerting - -import org.opensearch.commons.alerting.model.Input -import org.opensearch.commons.alerting.model.IntervalSchedule -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import org.opensearch.commons.alerting.model.Schedule -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.Trigger -import org.opensearch.commons.authuser.User -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.search.aggregations.AggregationBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase -import org.opensearch.test.rest.OpenSearchRestTestCase -import java.time.Instant -import java.time.ZonedDateTime -import java.time.temporal.ChronoUnit - -const val ANOMALY_DETECTOR_INDEX = ".opendistro-anomaly-detectors" -const val ANOMALY_RESULT_INDEX = ".opendistro-anomaly-results*" - -fun anomalyDetectorIndexMapping(): String { - return """ - "properties": { - "schema_version": { - "type": "integer" - }, - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "description": { - "type": "text" - }, - "time_field": { - "type": "keyword" - }, - "indices": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "filter_query": { - "type": "object", - "enabled": false - }, - "feature_attributes": { - "type": "nested", - "properties": { - "feature_id": { - "type": "keyword", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "feature_name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "feature_enabled": { - "type": "boolean" - }, - "aggregation_query": { - "type": "object", - "enabled": false - } - } - }, - "detection_interval": { - "properties": { - "period": { - "properties": { - "interval": { - "type": "integer" - }, - "unit": { - "type": "keyword" - } - } - } - } - }, - "window_delay": { - "properties": { - "period": { - "properties": { - "interval": { - "type": "integer" - }, - "unit": { - "type": "keyword" - } - } - } - } - }, - "shingle_size": { - "type": "integer" - }, - "last_update_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "ui_metadata": { - "type": "object", - "enabled": false - }, - "user": { - "type": "nested", - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "backend_roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "custom_attribute_names": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "category_field": { - "type": "keyword" - } - } - """ -} - -fun anomalyResultIndexMapping(): String { - return """ - "properties": { - "detector_id": { - "type": "keyword" - }, - "is_anomaly": { - "type": "boolean" - }, - "anomaly_score": { - "type": "double" - }, - "anomaly_grade": { - "type": "double" - }, - "confidence": { - "type": "double" - }, - "feature_data": { - "type": "nested", - "properties": { - "feature_id": { - "type": "keyword" - }, - "data": { - "type": "double" - } - } - }, - "data_start_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "data_end_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "execution_start_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "execution_end_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "error": { - "type": "text" - }, - "user": { - "type": "nested", - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "backend_roles": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword" - } - } - }, - "roles": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword" - } - } - }, - "custom_attribute_names": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword" - } - } - } - } - }, - "entity": { - "type": "nested", - "properties": { - "name": { - "type": "keyword" - }, - "value": { - "type": "keyword" - } - } - }, - "schema_version": { - "type": "integer" - } - } - """ -} - -fun randomAnomalyDetector(): String { - return """{ - "name" : "${OpenSearchTestCase.randomAlphaOfLength(10)}", - "description" : "${OpenSearchTestCase.randomAlphaOfLength(10)}", - "time_field" : "timestamp", - "indices" : [ - "${OpenSearchTestCase.randomAlphaOfLength(5)}" - ], - "filter_query" : { - "match_all" : { - "boost" : 1.0 - } - }, - "detection_interval" : { - "period" : { - "interval" : 1, - "unit" : "Minutes" - } - }, - "window_delay" : { - "period" : { - "interval" : 1, - "unit" : "Minutes" - } - }, - "shingle_size" : 8, - "feature_attributes" : [ - { - "feature_name" : "F1", - "feature_enabled" : true, - "aggregation_query" : { - "f_1" : { - "sum" : { - "field" : "value" - } - } - } - } - ] - } - """.trimIndent() -} - -fun randomAnomalyDetectorWithUser(backendRole: String): String { - return """{ - "name" : "${OpenSearchTestCase.randomAlphaOfLength(5)}", - "description" : "${OpenSearchTestCase.randomAlphaOfLength(10)}", - "time_field" : "timestamp", - "indices" : [ - "${OpenSearchTestCase.randomAlphaOfLength(5)}" - ], - "filter_query" : { - "match_all" : { - "boost" : 1.0 - } - }, - "detection_interval" : { - "period" : { - "interval" : 1, - "unit" : "Minutes" - } - }, - "window_delay" : { - "period" : { - "interval" : 1, - "unit" : "Minutes" - } - }, - "shingle_size" : 8, - "feature_attributes" : [ - { - "feature_name" : "F1", - "feature_enabled" : true, - "aggregation_query" : { - "f_1" : { - "sum" : { - "field" : "value" - } - } - } - } - ], - "user" : { - "name" : "${OpenSearchTestCase.randomAlphaOfLength(5)}", - "backend_roles" : [ "$backendRole" ], - "roles" : [ - "${OpenSearchTestCase.randomAlphaOfLength(5)}" - ], - "custom_attribute_names" : [ ] - } - } - """.trimIndent() -} - -fun randomAnomalyResult( - detectorId: String = OpenSearchTestCase.randomAlphaOfLength(10), - dataStartTime: Long = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).toInstant().toEpochMilli(), - dataEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), - featureId: String = OpenSearchTestCase.randomAlphaOfLength(5), - featureName: String = OpenSearchTestCase.randomAlphaOfLength(5), - featureData: Double = OpenSearchTestCase.randomDouble(), - executionStartTime: Long = ZonedDateTime.now().minus(10, ChronoUnit.SECONDS).toInstant().toEpochMilli(), - executionEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), - anomalyScore: Double = OpenSearchTestCase.randomDouble(), - anomalyGrade: Double = OpenSearchTestCase.randomDouble(), - confidence: Double = OpenSearchTestCase.randomDouble(), - user: User = randomUser() -): String { - return """{ - "detector_id" : "$detectorId", - "data_start_time" : $dataStartTime, - "data_end_time" : $dataEndTime, - "feature_data" : [ - { - "feature_id" : "$featureId", - "feature_name" : "$featureName", - "data" : $featureData - } - ], - "execution_start_time" : $executionStartTime, - "execution_end_time" : $executionEndTime, - "anomaly_score" : $anomalyScore, - "anomaly_grade" : $anomalyGrade, - "confidence" : $confidence, - "user" : { - "name" : "${user.name}", - "backend_roles" : [ - ${user.backendRoles.joinToString { "\"${it}\"" }} - ], - "roles" : [ - ${user.roles.joinToString { "\"${it}\"" }} - ], - "custom_attribute_names" : [ - ${user.customAttNames.joinToString { "\"${it}\"" }} - ] - } - } - """.trimIndent() -} - -fun randomAnomalyResultWithoutUser( - detectorId: String = OpenSearchTestCase.randomAlphaOfLength(10), - dataStartTime: Long = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).toInstant().toEpochMilli(), - dataEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), - featureId: String = OpenSearchTestCase.randomAlphaOfLength(5), - featureName: String = OpenSearchTestCase.randomAlphaOfLength(5), - featureData: Double = OpenSearchTestCase.randomDouble(), - executionStartTime: Long = ZonedDateTime.now().minus(10, ChronoUnit.SECONDS).toInstant().toEpochMilli(), - executionEndTime: Long = ZonedDateTime.now().toInstant().toEpochMilli(), - anomalyScore: Double = OpenSearchTestCase.randomDouble(), - anomalyGrade: Double = OpenSearchTestCase.randomDouble(), - confidence: Double = OpenSearchTestCase.randomDouble() -): String { - return """{ - "detector_id" : "$detectorId", - "data_start_time" : $dataStartTime, - "data_end_time" : $dataEndTime, - "feature_data" : [ - { - "feature_id" : "$featureId", - "feature_name" : "$featureName", - "data" : $featureData - } - ], - "execution_start_time" : $executionStartTime, - "execution_end_time" : $executionEndTime, - "anomaly_score" : $anomalyScore, - "anomaly_grade" : $anomalyGrade, - "confidence" : $confidence - } - """.trimIndent() -} - -fun maxAnomalyGradeSearchInput( - adResultIndex: String = ".opendistro-anomaly-results-history", - detectorId: String = OpenSearchTestCase.randomAlphaOfLength(10), - size: Int = 1 -): SearchInput { - val rangeQuery = QueryBuilders.rangeQuery("execution_end_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val termQuery = QueryBuilders.termQuery("detector_id", detectorId) - - var boolQueryBuilder = BoolQueryBuilder() - boolQueryBuilder.filter(rangeQuery).filter(termQuery) - - val aggregationBuilder = AggregationBuilders.max("max_anomaly_grade").field("anomaly_grade") - val searchSourceBuilder = SearchSourceBuilder().query(boolQueryBuilder).aggregation(aggregationBuilder).size(size) - return SearchInput(indices = listOf(adResultIndex), query = searchSourceBuilder) -} - -fun adMonitorTrigger(): QueryLevelTrigger { - val triggerScript = """ - return ctx.results[0].aggregations.max_anomaly_grade.value != null && - ctx.results[0].aggregations.max_anomaly_grade.value > 0.7 - """.trimIndent() - return randomQueryLevelTrigger(condition = Script(triggerScript)) -} - -fun adSearchInput(detectorId: String): SearchInput { - return maxAnomalyGradeSearchInput(adResultIndex = ANOMALY_RESULT_INDEX, detectorId = detectorId, size = 10) -} - -fun randomADMonitor( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User? = randomUser(), - inputs: List = listOf(adSearchInput("test_detector_id")), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = OpenSearchTestCase.randomBoolean(), - triggers: List = (1..OpenSearchTestCase.randomInt(10)).map { randomQueryLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, - user = user, uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() - ) -} - -fun randomADUser(backendRole: String = OpenSearchRestTestCase.randomAlphaOfLength(10)): User { - return User( - OpenSearchRestTestCase.randomAlphaOfLength(10), listOf(backendRole), - listOf(OpenSearchRestTestCase.randomAlphaOfLength(10), ALL_ACCESS_ROLE), listOf("test_attr=test") - ) -} diff --git a/alerting/bin/test/org/opensearch/alerting/AccessRoles.kt b/alerting/bin/test/org/opensearch/alerting/AccessRoles.kt deleted file mode 100644 index 7f415a8ac..000000000 --- a/alerting/bin/test/org/opensearch/alerting/AccessRoles.kt +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.opensearch.alerting.action.ExecuteWorkflowAction -import org.opensearch.commons.alerting.action.AlertingActions - -val ALL_ACCESS_ROLE = "all_access" -val READALL_AND_MONITOR_ROLE = "readall_and_monitor" -val ALERTING_FULL_ACCESS_ROLE = "alerting_full_access" -val ALERTING_READ_ONLY_ACCESS = "alerting_read_access" -val ALERTING_NO_ACCESS_ROLE = "no_access" -val ALERTING_GET_EMAIL_ACCOUNT_ACCESS = "alerting_get_email_account_access" -val ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS = "alerting_search_email_account_access" -val ALERTING_GET_EMAIL_GROUP_ACCESS = "alerting_get_email_group_access" -val ALERTING_SEARCH_EMAIL_GROUP_ACCESS = "alerting_search_email_group_access" -val ALERTING_INDEX_MONITOR_ACCESS = "alerting_index_monitor_access" -val ALERTING_GET_MONITOR_ACCESS = "alerting_get_monitor_access" -val ALERTING_GET_WORKFLOW_ACCESS = "alerting_get_workflow_access" -val ALERTING_DELETE_WORKFLOW_ACCESS = "alerting_delete_workflow_access" -val ALERTING_SEARCH_MONITOR_ONLY_ACCESS = "alerting_search_monitor_access" -val ALERTING_EXECUTE_MONITOR_ACCESS = "alerting_execute_monitor_access" -val ALERTING_EXECUTE_WORKFLOW_ACCESS = "alerting_execute_workflow_access" -val ALERTING_DELETE_MONITOR_ACCESS = "alerting_delete_monitor_access" -val ALERTING_GET_DESTINATION_ACCESS = "alerting_get_destination_access" -val ALERTING_GET_ALERTS_ACCESS = "alerting_get_alerts_access" -val ALERTING_INDEX_WORKFLOW_ACCESS = "alerting_index_workflow_access" - -val ROLE_TO_PERMISSION_MAPPING = mapOf( - ALERTING_NO_ACCESS_ROLE to "", - ALERTING_GET_EMAIL_ACCOUNT_ACCESS to "cluster:admin/opendistro/alerting/destination/email_account/get", - ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS to "cluster:admin/opendistro/alerting/destination/email_account/search", - ALERTING_GET_EMAIL_GROUP_ACCESS to "cluster:admin/opendistro/alerting/destination/email_group/get", - ALERTING_SEARCH_EMAIL_GROUP_ACCESS to "cluster:admin/opendistro/alerting/destination/email_group/search", - ALERTING_INDEX_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/write", - ALERTING_GET_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/get", - ALERTING_GET_WORKFLOW_ACCESS to AlertingActions.GET_WORKFLOW_ACTION_NAME, - ALERTING_SEARCH_MONITOR_ONLY_ACCESS to "cluster:admin/opendistro/alerting/monitor/search", - ALERTING_EXECUTE_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/execute", - ALERTING_EXECUTE_WORKFLOW_ACCESS to ExecuteWorkflowAction.NAME, - ALERTING_DELETE_MONITOR_ACCESS to "cluster:admin/opendistro/alerting/monitor/delete", - ALERTING_GET_DESTINATION_ACCESS to "cluster:admin/opendistro/alerting/destination/get", - ALERTING_GET_ALERTS_ACCESS to "cluster:admin/opendistro/alerting/alerts/get", - ALERTING_INDEX_WORKFLOW_ACCESS to AlertingActions.INDEX_WORKFLOW_ACTION_NAME, - ALERTING_DELETE_WORKFLOW_ACCESS to AlertingActions.DELETE_WORKFLOW_ACTION_NAME -) diff --git a/alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt b/alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt deleted file mode 100644 index 8583ae0db..000000000 --- a/alerting/bin/test/org/opensearch/alerting/AlertServiceTests.kt +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.junit.Before -import org.mockito.Mockito -import org.opensearch.Version -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.util.getBucketKeysHash -import org.opensearch.client.Client -import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.ClusterSettings -import org.opensearch.common.settings.Setting -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.model.AggregationResultBucket -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.action.AlertCategory -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.test.ClusterServiceUtils -import org.opensearch.test.OpenSearchTestCase -import org.opensearch.threadpool.ThreadPool -import java.time.Instant -import java.time.temporal.ChronoUnit - -class AlertServiceTests : OpenSearchTestCase() { - - private lateinit var client: Client - private lateinit var xContentRegistry: NamedXContentRegistry - private lateinit var settings: Settings - private lateinit var threadPool: ThreadPool - private lateinit var clusterService: ClusterService - - private lateinit var alertIndices: AlertIndices - private lateinit var alertService: AlertService - - @Before - fun setup() { - // TODO: If more *Service unit tests are added, this configuration can be moved to some base class for each service test class to use - client = Mockito.mock(Client::class.java) - xContentRegistry = Mockito.mock(NamedXContentRegistry::class.java) - threadPool = Mockito.mock(ThreadPool::class.java) - clusterService = Mockito.mock(ClusterService::class.java) - - settings = Settings.builder().build() - val settingSet = hashSetOf>() - settingSet.addAll(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) - settingSet.add(AlertingSettings.ALERT_HISTORY_ENABLED) - settingSet.add(AlertingSettings.ALERT_HISTORY_MAX_DOCS) - settingSet.add(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE) - settingSet.add(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD) - settingSet.add(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD) - settingSet.add(AlertingSettings.REQUEST_TIMEOUT) - settingSet.add(AlertingSettings.FINDING_HISTORY_ENABLED) - settingSet.add(AlertingSettings.FINDING_HISTORY_MAX_DOCS) - settingSet.add(AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE) - settingSet.add(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD) - settingSet.add(AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD) - val discoveryNode = DiscoveryNode("node", buildNewFakeTransportAddress(), Version.CURRENT) - val clusterSettings = ClusterSettings(settings, settingSet) - val testClusterService = ClusterServiceUtils.createClusterService(threadPool, discoveryNode, clusterSettings) - clusterService = Mockito.spy(testClusterService) - - alertIndices = AlertIndices(settings, client, threadPool, clusterService) - alertService = AlertService(client, xContentRegistry, alertIndices) - } - - fun `test getting categorized alerts for bucket-level monitor with no current alerts`() { - val trigger = randomBucketLevelTrigger() - val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - - val currentAlerts = mutableMapOf() - val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( - listOf( - listOf("a"), - listOf("b") - ) - ) - - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null - ) - // Completed Alerts are what remains in currentAlerts after categorization - val completedAlerts = currentAlerts.values.toList() - assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) - assertAlertsExistForBucketKeys( - listOf( - listOf("a"), - listOf("b") - ), - categorizedAlerts[AlertCategory.NEW] ?: error("New alerts not found") - ) - assertEquals(listOf(), completedAlerts) - } - - fun `test getting categorized alerts for bucket-level monitor with de-duped alerts`() { - val trigger = randomBucketLevelTrigger() - val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - - val currentAlerts = createCurrentAlertsFromBucketKeys( - monitor, - trigger, - listOf( - listOf("a"), - listOf("b") - ) - ) - val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( - listOf( - listOf("a"), - listOf("b") - ) - ) - - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null - ) - // Completed Alerts are what remains in currentAlerts after categorization - val completedAlerts = currentAlerts.values.toList() - assertAlertsExistForBucketKeys( - listOf( - listOf("a"), - listOf("b") - ), - categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found") - ) - assertEquals(listOf(), categorizedAlerts[AlertCategory.NEW]) - assertEquals(listOf(), completedAlerts) - } - - fun `test getting categorized alerts for bucket-level monitor with completed alerts`() { - val trigger = randomBucketLevelTrigger() - val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - - val currentAlerts = createCurrentAlertsFromBucketKeys( - monitor, - trigger, - listOf( - listOf("a"), - listOf("b") - ) - ) - val aggResultBuckets = listOf() - - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null - ) - // Completed Alerts are what remains in currentAlerts after categorization - val completedAlerts = currentAlerts.values.toList() - assertEquals(listOf(), categorizedAlerts[AlertCategory.DEDUPED]) - assertEquals(listOf(), categorizedAlerts[AlertCategory.NEW]) - assertAlertsExistForBucketKeys( - listOf( - listOf("a"), - listOf("b") - ), - completedAlerts - ) - } - - fun `test getting categorized alerts for bucket-level monitor with de-duped and completed alerts`() { - val trigger = randomBucketLevelTrigger() - val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - - val currentAlerts = createCurrentAlertsFromBucketKeys( - monitor, - trigger, - listOf( - listOf("a"), - listOf("b") - ) - ) - val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( - listOf( - listOf("b"), - listOf("c") - ) - ) - - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null - ) - // Completed Alerts are what remains in currentAlerts after categorization - val completedAlerts = currentAlerts.values.toList() - assertAlertsExistForBucketKeys(listOf(listOf("b")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) - assertAlertsExistForBucketKeys(listOf(listOf("c")), categorizedAlerts[AlertCategory.NEW] ?: error("New alerts not found")) - assertAlertsExistForBucketKeys(listOf(listOf("a")), completedAlerts) - } - - fun `test getting categorized alerts for bucket-level monitor with de-duped alerts size 1`() { - val trigger = randomBucketLevelTrigger() - val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - - val currentAlerts = createCurrentAlertsFromBucketKeys( - monitor, - trigger, - listOf( - listOf("a") - ) - ) - val aggResultBuckets = createAggregationResultBucketsFromBucketKeys( - listOf( - listOf("a") - ) - ) - - val categorizedAlerts = alertService.getCategorizedAlertsForBucketLevelMonitor( - monitor, trigger, currentAlerts, aggResultBuckets, emptyList(), "", null - ) - // Completed Alerts are what remains in currentAlerts after categorization - val completedAlerts = currentAlerts.values.toList() - assertAlertsExistForBucketKeys(listOf(listOf("a")), categorizedAlerts[AlertCategory.DEDUPED] ?: error("Deduped alerts not found")) - assertAlertsExistForBucketKeys(emptyList(), categorizedAlerts[AlertCategory.NEW] ?: error("New alerts found")) - assertAlertsExistForBucketKeys(emptyList(), completedAlerts) - } - - private fun createCurrentAlertsFromBucketKeys( - monitor: Monitor, - trigger: BucketLevelTrigger, - bucketKeysList: List> - ): MutableMap { - return bucketKeysList.map { bucketKeys -> - val aggResultBucket = AggregationResultBucket("parent_bucket_path", bucketKeys, mapOf()) - val alert = Alert( - monitor, - trigger, - Instant.now().truncatedTo(ChronoUnit.MILLIS), - null, - actionExecutionResults = listOf(randomActionExecutionResult()), - aggregationResultBucket = aggResultBucket - ) - aggResultBucket.getBucketKeysHash() to alert - }.toMap().toMutableMap() - } - - private fun createAggregationResultBucketsFromBucketKeys(bucketKeysList: List>): List { - return bucketKeysList.map { AggregationResultBucket("parent_bucket_path", it, mapOf()) } - } - - private fun assertAlertsExistForBucketKeys(bucketKeysList: List>, alerts: List) { - // Check if size is equals first for sanity and since bucketKeysList should have unique entries, - // this ensures there shouldn't be duplicates in the alerts - assertEquals(bucketKeysList.size, alerts.size) - val expectedBucketKeyHashes = bucketKeysList.map { it.joinToString(separator = "#") }.toSet() - alerts.forEach { alert -> - assertNotNull(alert.aggregationResultBucket) - assertTrue(expectedBucketKeyHashes.contains(alert.aggregationResultBucket!!.getBucketKeysHash())) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt b/alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt deleted file mode 100644 index 50cae9d8c..000000000 --- a/alerting/bin/test/org/opensearch/alerting/AlertingRestTestCase.kt +++ /dev/null @@ -1,1821 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.http.HttpEntity -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.entity.ContentType.APPLICATION_JSON -import org.apache.http.entity.StringEntity -import org.apache.http.message.BasicHeader -import org.junit.AfterClass -import org.junit.rules.DisableOnDebug -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_ACCOUNT_BASE_URI -import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_GROUP_BASE_URI -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.alerts.AlertIndices.Companion.FINDING_HISTORY_WRITE_INDEX -import org.opensearch.alerting.core.settings.ScheduledJobSettings -import org.opensearch.alerting.model.destination.Chime -import org.opensearch.alerting.model.destination.CustomWebhook -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.Slack -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.settings.DestinationSettings -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.Request -import org.opensearch.client.Response -import org.opensearch.client.RestClient -import org.opensearch.client.WarningFailureException -import org.opensearch.common.UUIDs -import org.opensearch.common.io.PathUtils -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentFactory.jsonBuilder -import org.opensearch.common.xcontent.XContentType -import org.opensearch.common.xcontent.json.JsonXContent.jsonXContent -import org.opensearch.commons.alerting.action.GetFindingsResponse -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.commons.alerting.model.FindingWithDocs -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.alerting.util.string -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.search.SearchModule -import java.net.URLEncoder -import java.nio.file.Files -import java.time.Instant -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter -import java.time.temporal.ChronoUnit -import java.util.Locale -import java.util.UUID -import java.util.stream.Collectors -import javax.management.MBeanServerInvocationHandler -import javax.management.ObjectName -import javax.management.remote.JMXConnectorFactory -import javax.management.remote.JMXServiceURL -import kotlin.collections.ArrayList -import kotlin.collections.HashMap - -/** - * Superclass for tests that interact with an external test cluster using OpenSearch's RestClient - */ -abstract class AlertingRestTestCase : ODFERestTestCase() { - - protected val password = "D%LMX3bo#@U3XqVQ" - - protected val isDebuggingTest = DisableOnDebug(null).isDebugging - protected val isDebuggingRemoteCluster = System.getProperty("cluster.debug", "false")!!.toBoolean() - protected val numberOfNodes = System.getProperty("cluster.number_of_nodes", "1")!!.toInt() - protected val isMultiNode = numberOfNodes > 1 - - protected val statsResponseOpendistroSweeperEnabledField = "opendistro.scheduled_jobs.enabled" - protected val statsResponseOpenSearchSweeperEnabledField = "plugins.scheduled_jobs.enabled" - - override fun xContentRegistry(): NamedXContentRegistry { - return NamedXContentRegistry( - mutableListOf( - Monitor.XCONTENT_REGISTRY, - SearchInput.XCONTENT_REGISTRY, - DocLevelMonitorInput.XCONTENT_REGISTRY, - QueryLevelTrigger.XCONTENT_REGISTRY, - BucketLevelTrigger.XCONTENT_REGISTRY, - DocumentLevelTrigger.XCONTENT_REGISTRY, - Workflow.XCONTENT_REGISTRY, - ChainedAlertTrigger.XCONTENT_REGISTRY - ) + SearchModule(Settings.EMPTY, emptyList()).namedXContents - ) - } - - fun Response.asMap(): Map { - return entityAsMap(this) - } - - private fun createMonitorEntityWithBackendRoles(monitor: Monitor, rbacRoles: List?): HttpEntity { - if (rbacRoles == null) { - return monitor.toHttpEntity() - } - val temp = monitor.toJsonString() - val toReplace = temp.lastIndexOf("}") - val rbacString = rbacRoles.joinToString { "\"$it\"" } - val jsonString = temp.substring(0, toReplace) + ", \"rbac_roles\": [$rbacString] }" - return StringEntity(jsonString, APPLICATION_JSON) - } - - protected fun createMonitorWithClient( - client: RestClient, - monitor: Monitor, - rbacRoles: List? = null, - refresh: Boolean = true, - ): Monitor { - val response = client.makeRequest( - "POST", - "$ALERTING_BASE_URI?refresh=$refresh", - emptyMap(), - createMonitorEntityWithBackendRoles(monitor, rbacRoles) - ) - assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) - - val monitorJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - assertUserNull(monitorJson as HashMap) - - return getMonitor(monitorId = monitorJson["_id"] as String) - } - - protected fun createMonitor(monitor: Monitor, refresh: Boolean = true): Monitor { - return createMonitorWithClient(client(), monitor, emptyList(), refresh) - } - - protected fun deleteMonitor(monitor: Monitor, refresh: Boolean = true): Response { - val response = client().makeRequest( - "DELETE", - "$ALERTING_BASE_URI/${monitor.id}?refresh=$refresh", - emptyMap(), - monitor.toHttpEntity() - ) - assertEquals("Unable to delete a monitor", RestStatus.OK, response.restStatus()) - - return response - } - - protected fun deleteWorkflow(workflow: Workflow, deleteDelegates: Boolean = false, refresh: Boolean = true): Response { - val response = client().makeRequest( - "DELETE", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh&deleteDelegateMonitors=$deleteDelegates", - emptyMap(), - workflow.toHttpEntity() - ) - assertEquals("Unable to delete a workflow", RestStatus.OK, response.restStatus()) - return response - } - - protected fun deleteWorkflowWithClient( - client: RestClient, - workflow: Workflow, - deleteDelegates: Boolean = false, - refresh: Boolean = true, - ): Response { - val response = client.makeRequest( - "DELETE", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh&deleteDelegateMonitors=$deleteDelegates", - emptyMap(), - workflow.toHttpEntity() - ) - assertEquals("Unable to delete a workflow", RestStatus.OK, response.restStatus()) - - return response - } - - /** - * Destinations are now deprecated in favor of the Notification plugin's configs. - * This method should only be used for checking legacy behavior/Notification migration scenarios. - */ - protected fun createDestination(destination: Destination = getTestDestination(), refresh: Boolean = true): Destination { - // Create Alerting config index if it doesn't exist to avoid mapping issues with legacy destination indexing - createAlertingConfigIndex() - - val response = indexDocWithAdminClient( - ScheduledJob.SCHEDULED_JOBS_INDEX, - UUIDs.base64UUID(), - destination.toJsonStringWithType(), - refresh - ) - val destinationJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - - return destination.copy( - id = destinationJson["_id"] as String, - version = (destinationJson["_version"] as Int).toLong(), - primaryTerm = destinationJson["_primary_term"] as Int - ) - } - - protected fun deleteDestination(destination: Destination = getTestDestination(), refresh: Boolean = true): Response { - val response = client().makeRequest( - "DELETE", - "$DESTINATION_BASE_URI/${destination.id}?refresh=$refresh", - emptyMap(), - destination.toHttpEntity() - ) - assertEquals("Unable to delete destination", RestStatus.OK, response.restStatus()) - - return response - } - - protected fun updateDestination(destination: Destination, refresh: Boolean = true): Destination { - val response = client().makeRequest( - "PUT", - "$DESTINATION_BASE_URI/${destination.id}?refresh=$refresh", - emptyMap(), - destination.toHttpEntity() - ) - assertEquals("Unable to update a destination", RestStatus.OK, response.restStatus()) - val destinationJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - assertUserNull(destinationJson as HashMap) - - return destination.copy(id = destinationJson["_id"] as String, version = (destinationJson["_version"] as Int).toLong()) - } - - protected fun getEmailAccount( - emailAccountID: String, - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), - ): EmailAccount { - val response = client().makeRequest("GET", "$EMAIL_ACCOUNT_BASE_URI/$emailAccountID", null, header) - assertEquals("Unable to get email account $emailAccountID", RestStatus.OK, response.restStatus()) - - val parser = createParser(XContentType.JSON.xContent(), response.entity.content) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) - - lateinit var id: String - var version: Long = 0 - lateinit var emailAccount: EmailAccount - - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parser.nextToken() - - when (parser.currentName()) { - "_id" -> id = parser.text() - "_version" -> version = parser.longValue() - "email_account" -> emailAccount = EmailAccount.parse(parser) - } - } - - return emailAccount.copy(id = id, version = version) - } - - /** - * Email Accounts are now deprecated in favor of the Notification plugin's configs. - * This method should only be used for checking legacy behavior/Notification migration scenarios. - */ - protected fun createEmailAccount(emailAccount: EmailAccount = getTestEmailAccount(), refresh: Boolean = true): EmailAccount { - // Create Alerting config index if it doesn't exist to avoid mapping issues with legacy destination indexing - createAlertingConfigIndex() - - val response = indexDocWithAdminClient( - ScheduledJob.SCHEDULED_JOBS_INDEX, - UUIDs.base64UUID(), - emailAccount.toJsonStringWithType(), - refresh - ) - val emailAccountJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - return emailAccount.copy(id = emailAccountJson["_id"] as String) - } - - protected fun createRandomEmailAccount(refresh: Boolean = true): EmailAccount { - val emailAccount = randomEmailAccount() - val emailAccountID = createEmailAccount(emailAccount, refresh).id - return getEmailAccount(emailAccountID = emailAccountID) - } - - protected fun createRandomEmailAccountWithGivenName(refresh: Boolean = true, randomName: String): EmailAccount { - val emailAccount = randomEmailAccount(salt = randomName) - val emailAccountID = createEmailAccount(emailAccount, refresh).id - return getEmailAccount(emailAccountID = emailAccountID) - } - - protected fun getEmailGroup( - emailGroupID: String, - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), - ): EmailGroup { - val response = client().makeRequest("GET", "$EMAIL_GROUP_BASE_URI/$emailGroupID", null, header) - assertEquals("Unable to get email group $emailGroupID", RestStatus.OK, response.restStatus()) - - val parser = createParser(XContentType.JSON.xContent(), response.entity.content) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) - - lateinit var id: String - var version: Long = 0 - lateinit var emailGroup: EmailGroup - - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parser.nextToken() - - when (parser.currentName()) { - "_id" -> id = parser.text() - "_version" -> version = parser.longValue() - "email_group" -> emailGroup = EmailGroup.parse(parser) - } - } - - return emailGroup.copy(id = id, version = version) - } - - /** - * Email Groups are now deprecated in favor of the Notification plugin's configs. - * This method should only be used for checking legacy behavior/Notification migration scenarios. - */ - protected fun createEmailGroup(emailGroup: EmailGroup = getTestEmailGroup(), refresh: Boolean = true): EmailGroup { - // Create Alerting config index if it doesn't exist to avoid mapping issues with legacy destination indexing - createAlertingConfigIndex() - - val response = indexDocWithAdminClient( - ScheduledJob.SCHEDULED_JOBS_INDEX, - UUIDs.base64UUID(), - emailGroup.toJsonStringWithType(), - refresh - ) - val emailGroupJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - return emailGroup.copy(id = emailGroupJson["_id"] as String) - } - - protected fun createRandomEmailGroup(refresh: Boolean = true): EmailGroup { - val emailGroup = randomEmailGroup() - val emailGroupID = createEmailGroup(emailGroup, refresh).id - return getEmailGroup(emailGroupID = emailGroupID) - } - - protected fun createRandomEmailGroupWithGivenName(refresh: Boolean = true, randomName: String): EmailGroup { - val emailGroup = randomEmailGroup(salt = randomName) - val emailGroupID = createEmailGroup(emailGroup, refresh).id - return getEmailGroup(emailGroupID = emailGroupID) - } - - @Suppress("UNCHECKED_CAST") - protected fun getDestination(destination: Destination): Map { - val response = client().makeRequest( - "GET", - "$DESTINATION_BASE_URI/${destination.id}" - ) - assertEquals("Unable to update a destination", RestStatus.OK, response.restStatus()) - val destinationJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - assertUserNull(destinationJson as HashMap) - return (destinationJson["destinations"] as List)[0] as Map - } - - @Suppress("UNCHECKED_CAST") - protected fun getDestinations(dataMap: Map = emptyMap()): List> { - return getDestinations(client(), dataMap) - } - - @Suppress("UNCHECKED_CAST") - protected fun getDestinations( - client: RestClient, - dataMap: Map = emptyMap(), - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), - ): List> { - var baseEndpoint = "$DESTINATION_BASE_URI?" - for (entry in dataMap.entries) { - baseEndpoint += "${entry.key}=${entry.value}&" - } - - val response = client.makeRequest( - "GET", - baseEndpoint, - null, - header - ) - assertEquals("Unable to update a destination", RestStatus.OK, response.restStatus()) - val destinationJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - return destinationJson["destinations"] as List> - } - - protected fun getTestDestination(): Destination { - return Destination( - type = DestinationType.TEST_ACTION, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = null, - email = null - ) - } - - fun getSlackDestination(): Destination { - val slack = Slack("https://hooks.slack.com/services/slackId") - return Destination( - type = DestinationType.SLACK, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = slack, - customWebhook = null, - email = null - ) - } - - fun getChimeDestination(): Destination { - val chime = Chime("https://hooks.chime.aws/incomingwebhooks/chimeId") - return Destination( - type = DestinationType.CHIME, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = chime, - slack = null, - customWebhook = null, - email = null - ) - } - - fun getCustomWebhookDestination(): Destination { - val customWebhook = CustomWebhook( - "https://hooks.slack.com/services/customWebhookId", - null, - null, - 80, - null, - null, - emptyMap(), - emptyMap(), - null, - null - ) - return Destination( - type = DestinationType.CUSTOM_WEBHOOK, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = customWebhook, - email = null - ) - } - - private fun getTestEmailAccount(): EmailAccount { - return EmailAccount( - name = "test", - email = "test@email.com", - host = "smtp.com", - port = 25, - method = EmailAccount.MethodType.NONE, - username = null, - password = null - ) - } - - private fun getTestEmailGroup(): EmailGroup { - return EmailGroup( - name = "test", - emails = listOf() - ) - } - - protected fun verifyIndexSchemaVersion(index: String, expectedVersion: Int) { - val indexMapping = client().getIndexMapping(index) - val indexName = indexMapping.keys.toList()[0] - val mappings = indexMapping.stringMap(indexName)?.stringMap("mappings") - var version = 0 - if (mappings!!.containsKey("_meta")) { - val meta = mappings.stringMap("_meta") - if (meta!!.containsKey("schema_version")) version = meta.get("schema_version") as Int - } - assertEquals(expectedVersion, version) - } - - protected fun createAlert(alert: Alert): Alert { - val response = adminClient().makeRequest( - "POST", - "/${AlertIndices.ALERT_INDEX}/_doc?refresh=true&routing=${alert.monitorId}", - emptyMap(), - alert.toHttpEntityWithUser() - ) - assertEquals("Unable to create a new alert", RestStatus.CREATED, response.restStatus()) - - val alertJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - - assertNull(alertJson["monitor_user"]) - return alert.copy(id = alertJson["_id"] as String, version = (alertJson["_version"] as Int).toLong()) - } - - protected fun createRandomMonitor(refresh: Boolean = false, withMetadata: Boolean = false): Monitor { - val monitor = randomQueryLevelMonitor(withMetadata = withMetadata) - val monitorId = createMonitor(monitor, refresh).id - if (withMetadata) { - return getMonitor(monitorId = monitorId, header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards")) - } - return getMonitor(monitorId = monitorId) - } - - protected fun createRandomDocumentMonitor(refresh: Boolean = false, withMetadata: Boolean = false): Monitor { - val monitor = randomDocumentLevelMonitor(withMetadata = withMetadata) - val monitorId = createMonitor(monitor, refresh).id - if (withMetadata) { - return getMonitor(monitorId = monitorId, header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards")) - } - return getMonitor(monitorId = monitorId) - } - - @Suppress("UNCHECKED_CAST") - protected fun updateMonitor(monitor: Monitor, refresh: Boolean = false): Monitor { - val response = client().makeRequest( - "PUT", - "${monitor.relativeUrl()}?refresh=$refresh", - emptyMap(), - monitor.toHttpEntity() - ) - assertEquals("Unable to update a monitor", RestStatus.OK, response.restStatus()) - assertUserNull(response.asMap()["monitor"] as Map) - return getMonitor(monitorId = monitor.id) - } - - @Suppress("UNCHECKED_CAST") - protected fun updateWorkflow(workflow: Workflow, refresh: Boolean = false): Workflow { - val response = client().makeRequest( - "PUT", - "${workflow.relativeUrl()}?refresh=$refresh", - emptyMap(), - workflow.toHttpEntity() - ) - assertEquals("Unable to update a workflow", RestStatus.OK, response.restStatus()) - assertUserNull(response.asMap()["workflow"] as Map) - return getWorkflow(workflowId = workflow.id) - } - - protected fun updateMonitorWithClient( - client: RestClient, - monitor: Monitor, - rbacRoles: List = emptyList(), - refresh: Boolean = true, - ): Monitor { - val response = client.makeRequest( - "PUT", - "${monitor.relativeUrl()}?refresh=$refresh", - emptyMap(), - createMonitorEntityWithBackendRoles(monitor, rbacRoles) - ) - assertEquals("Unable to update a monitor", RestStatus.OK, response.restStatus()) - assertUserNull(response.asMap()["monitor"] as Map) - return getMonitor(monitorId = monitor.id) - } - - protected fun updateWorkflowWithClient( - client: RestClient, - workflow: Workflow, - rbacRoles: List = emptyList(), - refresh: Boolean = true, - ): Workflow { - val response = client.makeRequest( - "PUT", - "${workflow.relativeUrl()}?refresh=$refresh", - emptyMap(), - createWorkflowEntityWithBackendRoles(workflow, rbacRoles) - ) - assertEquals("Unable to update a workflow", RestStatus.OK, response.restStatus()) - assertUserNull(response.asMap()["workflow"] as Map) - return getWorkflow(workflowId = workflow.id) - } - - protected fun getMonitor(monitorId: String, header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json")): Monitor { - val response = client().makeRequest("GET", "$ALERTING_BASE_URI/$monitorId", null, header) - assertEquals("Unable to get monitor $monitorId", RestStatus.OK, response.restStatus()) - - val parser = createParser(XContentType.JSON.xContent(), response.entity.content) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) - - lateinit var id: String - var version: Long = 0 - lateinit var monitor: Monitor - - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parser.nextToken() - - when (parser.currentName()) { - "_id" -> id = parser.text() - "_version" -> version = parser.longValue() - "monitor" -> monitor = Monitor.parse(parser) - "associated_workflows" -> { - XContentParserUtils.ensureExpectedToken( - XContentParser.Token.START_ARRAY, - parser.currentToken(), - parser - ) - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - // do nothing - } - } - } - } - - assertUserNull(monitor) - return monitor.copy(id = id, version = version) - } - - // TODO: understand why doc alerts wont work with the normal search Alerts function - protected fun searchAlertsWithFilter( - monitor: Monitor, - indices: String = AlertIndices.ALERT_INDEX, - refresh: Boolean = true, - ): List { - if (refresh) refreshIndex(indices) - - val request = """ - { "version" : true, - "query": { "match_all": {} } - } - """.trimIndent() - val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - - val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) - return searchResponse.hits.hits.map { - val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } - Alert.parse(xcp, it.id, it.version) - }.filter { alert -> alert.monitorId == monitor.id } - } - - protected fun createFinding( - monitorId: String = "NO_ID", - monitorName: String = "NO_NAME", - index: String = "testIndex", - docLevelQueries: List = listOf( - DocLevelQuery(query = "test_field:\"us-west-2\"", name = "testQuery", fields = listOf()) - ), - matchingDocIds: List, - ): String { - val finding = Finding( - id = UUID.randomUUID().toString(), - relatedDocIds = matchingDocIds, - monitorId = monitorId, - monitorName = monitorName, - index = index, - docLevelQueries = docLevelQueries, - timestamp = Instant.now() - ) - - val findingStr = finding.toXContent(XContentBuilder.builder(XContentType.JSON.xContent()), ToXContent.EMPTY_PARAMS).string() - - indexDoc(FINDING_HISTORY_WRITE_INDEX, finding.id, findingStr) - return finding.id - } - - protected fun searchFindings( - monitor: Monitor, - indices: String = AlertIndices.ALL_FINDING_INDEX_PATTERN, - refresh: Boolean = true, - ): List { - if (refresh) refreshIndex(indices) - - val request = """ - { "version" : true, - "query": { "match_all": {} } - } - """.trimIndent() - val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", StringEntity(request, APPLICATION_JSON)) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - - val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) - return searchResponse.hits.hits.map { - val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } - Finding.parse(xcp) - }.filter { finding -> finding.monitorId == monitor.id } - } - - protected fun searchAlerts(monitor: Monitor, indices: String = AlertIndices.ALERT_INDEX, refresh: Boolean = true): List { - try { - if (refresh) refreshIndex(indices) - } catch (e: Exception) { - logger.warn("Could not refresh index $indices because: ${e.message}") - return emptyList() - } - - // If this is a test monitor (it doesn't have an ID) and no alerts will be saved for it. - val searchParams = if (monitor.id != Monitor.NO_ID) mapOf("routing" to monitor.id) else mapOf() - val request = """ - { "version" : true, - "query" : { "term" : { "${Alert.MONITOR_ID_FIELD}" : "${monitor.id}" } } - } - """.trimIndent() - val httpResponse = adminClient().makeRequest("GET", "/$indices/_search", searchParams, StringEntity(request, APPLICATION_JSON)) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - - val searchResponse = SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) - return searchResponse.hits.hits.map { - val xcp = createParser(jsonXContent, it.sourceRef).also { it.nextToken() } - Alert.parse(xcp, it.id, it.version) - } - } - - protected fun acknowledgeAlerts(monitor: Monitor, vararg alerts: Alert): Response { - val request = XContentFactory.jsonBuilder().startObject() - .array("alerts", *alerts.map { it.id }.toTypedArray()) - .endObject() - .string() - .let { StringEntity(it, APPLICATION_JSON) } - - val response = client().makeRequest( - "POST", - "${monitor.relativeUrl()}/_acknowledge/alerts?refresh=true", - emptyMap(), - request - ) - assertEquals("Acknowledge call failed.", RestStatus.OK, response.restStatus()) - return response - } - - protected fun acknowledgeChainedAlerts(workflowId: String, vararg alertId: String): Response { - val request = jsonBuilder().startObject() - .array("alerts", *alertId.map { it }.toTypedArray()) - .endObject() - .string() - .let { StringEntity(it, APPLICATION_JSON) } - - val response = client().makeRequest( - "POST", "${AlertingPlugin.WORKFLOW_BASE_URI}/$workflowId/_acknowledge/alerts", - emptyMap(), request - ) - assertEquals("Acknowledge call failed.", RestStatus.OK, response.restStatus()) - return response - } - - protected fun getAlerts( - client: RestClient, - dataMap: Map = emptyMap(), - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), - ): Response { - var baseEndpoint = "$ALERTING_BASE_URI/alerts?" - for (entry in dataMap.entries) { - baseEndpoint += "${entry.key}=${entry.value}&" - } - - val response = client.makeRequest("GET", baseEndpoint, null, header) - assertEquals("Get call failed.", RestStatus.OK, response.restStatus()) - return response - } - - protected fun getAlerts( - dataMap: Map = emptyMap(), - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), - ): Response { - return getAlerts(client(), dataMap, header) - } - - protected fun refreshIndex(index: String): Response { - val response = client().makeRequest("POST", "/$index/_refresh?expand_wildcards=all") - assertEquals("Unable to refresh index", RestStatus.OK, response.restStatus()) - return response - } - - protected fun deleteIndex(index: String): Response { - val response = adminClient().makeRequest("DELETE", "/$index") - assertEquals("Unable to delete index", RestStatus.OK, response.restStatus()) - return response - } - - protected fun executeMonitor(monitorId: String, params: Map = mutableMapOf()): Response { - return executeMonitor(client(), monitorId, params) - } - - protected fun executeWorkflow(workflowId: String, params: Map = mutableMapOf()): Response { - return executeWorkflow(client(), workflowId, params) - } - - protected fun getWorkflowAlerts( - workflowId: String, - alertId: String? = "", - getAssociatedAlerts: Boolean = true, - ): Response { - return getWorkflowAlerts( - client(), - mutableMapOf(Pair("workflowIds", workflowId), Pair("getAssociatedAlerts", getAssociatedAlerts), Pair("alertIds", alertId!!)) - ) - } - - protected fun getWorkflowAlerts( - client: RestClient, - dataMap: Map = emptyMap(), - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), - ): Response { - var baseEndpoint = "$WORKFLOW_ALERTING_BASE_URI/alerts?" - for (entry in dataMap.entries) { - baseEndpoint += "${entry.key}=${entry.value}&" - } - - val response = client.makeRequest("GET", baseEndpoint, null, header) - assertEquals("Get call failed.", RestStatus.OK, response.restStatus()) - return response - } - - protected fun executeMonitor(client: RestClient, monitorId: String, params: Map = mutableMapOf()): Response { - return client.makeRequest("POST", "$ALERTING_BASE_URI/$monitorId/_execute", params) - } - - protected fun executeWorkflow(client: RestClient, workflowId: String, params: Map = mutableMapOf()): Response { - return client.makeRequest("POST", "$WORKFLOW_ALERTING_BASE_URI/$workflowId/_execute", params) - } - - protected fun executeMonitor(monitor: Monitor, params: Map = mapOf()): Response { - return executeMonitor(client(), monitor, params) - } - - protected fun executeMonitor(client: RestClient, monitor: Monitor, params: Map = mapOf()): Response = - client.makeRequest("POST", "$ALERTING_BASE_URI/_execute", params, monitor.toHttpEntityWithUser()) - - protected fun searchFindings(params: Map = mutableMapOf()): GetFindingsResponse { - var baseEndpoint = "${AlertingPlugin.FINDING_BASE_URI}/_search?" - for (entry in params.entries) { - baseEndpoint += "${entry.key}=${entry.value}&" - } - - val response = client().makeRequest("GET", baseEndpoint) - - assertEquals("Unable to retrieve findings", RestStatus.OK, response.restStatus()) - - val parser = createParser(XContentType.JSON.xContent(), response.entity.content) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) - - var totalFindings = 0 - val findings = mutableListOf() - - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parser.nextToken() - - when (parser.currentName()) { - "total_findings" -> totalFindings = parser.intValue() - "findings" -> { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.currentToken(), parser) - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - findings.add(FindingWithDocs.parse(parser)) - } - } - } - } - - return GetFindingsResponse(response.restStatus(), totalFindings, findings) - } - - protected fun searchMonitors(): SearchResponse { - var baseEndpoint = "${AlertingPlugin.MONITOR_BASE_URI}/_search?" - val request = """ - { "version" : true, - "query": { "match_all": {} } - } - """.trimIndent() - val httpResponse = adminClient().makeRequest("POST", baseEndpoint, StringEntity(request, APPLICATION_JSON)) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - return SearchResponse.fromXContent(createParser(jsonXContent, httpResponse.entity.content)) - } - - protected fun indexDoc(index: String, id: String, doc: String, refresh: Boolean = true): Response { - return indexDoc(client(), index, id, doc, refresh) - } - - protected fun indexDocWithAdminClient(index: String, id: String, doc: String, refresh: Boolean = true): Response { - return indexDoc(adminClient(), index, id, doc, refresh) - } - - private fun indexDoc(client: RestClient, index: String, id: String, doc: String, refresh: Boolean = true): Response { - val requestBody = StringEntity(doc, APPLICATION_JSON) - val params = if (refresh) mapOf("refresh" to "true") else mapOf() - val response = client.makeRequest("POST", "$index/_doc/$id?op_type=create", params, requestBody) - assertTrue( - "Unable to index doc: '${doc.take(15)}...' to index: '$index'", - listOf(RestStatus.OK, RestStatus.CREATED).contains(response.restStatus()) - ) - return response - } - - protected fun deleteDoc(index: String, id: String, refresh: Boolean = true): Response { - val params = if (refresh) mapOf("refresh" to "true") else mapOf() - val response = client().makeRequest("DELETE", "$index/_doc/$id", params) - assertTrue("Unable to delete doc with ID $id in index: '$index'", listOf(RestStatus.OK).contains(response.restStatus())) - return response - } - - /** A test index that can be used across tests. Feel free to add new fields but don't remove any. */ - protected fun createTestIndex(index: String = randomAlphaOfLength(10).lowercase(Locale.ROOT)): String { - createIndex( - index, - Settings.EMPTY, - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent() - ) - return index - } - - protected fun createTestIndex(index: String, mapping: String): String { - createIndex(index, Settings.EMPTY, mapping.trimIndent()) - return index - } - - protected fun createTestIndex(index: String, mapping: String?, alias: String): String { - createIndex(index, Settings.EMPTY, mapping?.trimIndent(), alias) - return index - } - - protected fun createTestConfigIndex(index: String = "." + randomAlphaOfLength(10).lowercase(Locale.ROOT)): String { - try { - createIndex( - index, - Settings.builder().build(), - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" } - } - """.trimIndent() - ) - } catch (ex: WarningFailureException) { - // ignore - } - return index - } - - protected fun createTestAlias( - alias: String = randomAlphaOfLength(10).lowercase(Locale.ROOT), - numOfAliasIndices: Int = randomIntBetween(1, 10), - includeWriteIndex: Boolean = true, - ): MutableMap> { - return createTestAlias(alias = alias, indices = randomAliasIndices(alias, numOfAliasIndices, includeWriteIndex)) - } - - protected fun createTestAlias( - alias: String = randomAlphaOfLength(10).lowercase(Locale.ROOT), - indices: Map = randomAliasIndices( - alias = alias, - num = randomIntBetween(1, 10), - includeWriteIndex = true - ), - ): MutableMap> { - val indicesMap = mutableMapOf() - val indicesJson = jsonBuilder().startObject().startArray("actions") - indices.keys.map { - val indexName = createTestIndex(index = it, mapping = "") - val isWriteIndex = indices.getOrDefault(indexName, false) - indicesMap[indexName] = isWriteIndex - val indexMap = mapOf( - "add" to mapOf( - "index" to indexName, - "alias" to alias, - "is_write_index" to isWriteIndex - ) - ) - indicesJson.value(indexMap) - } - val requestBody = indicesJson.endArray().endObject().string() - client().makeRequest("POST", "/_aliases", emptyMap(), StringEntity(requestBody, APPLICATION_JSON)) - return mutableMapOf(alias to indicesMap) - } - - protected fun createDataStream(datastream: String, mappings: String?, useComponentTemplate: Boolean) { - val indexPattern = "$datastream*" - var componentTemplateMappings = "\"properties\": {" + - " \"netflow.destination_transport_port\":{ \"type\": \"long\" }," + - " \"netflow.destination_ipv4_address\":{ \"type\": \"ip\" }" + - "}" - if (mappings != null) { - componentTemplateMappings = mappings - } - if (useComponentTemplate) { - // Setup index_template - createComponentTemplateWithMappings( - "my_ds_component_template-$datastream", - componentTemplateMappings - ) - } - createComposableIndexTemplate( - "my_index_template_ds-$datastream", - listOf(indexPattern), - (if (useComponentTemplate) "my_ds_component_template-$datastream" else null), - mappings, - true, - 0 - ) - createDataStream(datastream) - } - - protected fun createDataStream(datastream: String? = randomAlphaOfLength(10).lowercase(Locale.ROOT)) { - client().makeRequest("PUT", "_data_stream/$datastream") - } - - protected fun deleteDataStream(datastream: String) { - client().makeRequest("DELETE", "_data_stream/$datastream") - } - - protected fun createIndexAlias(alias: String, mappings: String?) { - val indexPattern = "$alias*" - var componentTemplateMappings = "\"properties\": {" + - " \"netflow.destination_transport_port\":{ \"type\": \"long\" }," + - " \"netflow.destination_ipv4_address\":{ \"type\": \"ip\" }" + - "}" - if (mappings != null) { - componentTemplateMappings = mappings - } - createComponentTemplateWithMappings( - "my_alias_component_template-$alias", - componentTemplateMappings - ) - createComposableIndexTemplate( - "my_index_template_alias-$alias", - listOf(indexPattern), - "my_alias_component_template-$alias", - mappings, - false, - 0 - ) - createTestIndex( - "$alias-000001", - null, - """ - "$alias": { - "is_write_index": true - } - """.trimIndent() - ) - } - - protected fun deleteIndexAlias(alias: String) { - client().makeRequest("DELETE", "$alias*/_alias/$alias") - } - - protected fun createComponentTemplateWithMappings(componentTemplateName: String, mappings: String?) { - val body = """{"template" : { "mappings": {$mappings} }}""" - client().makeRequest( - "PUT", - "_component_template/$componentTemplateName", - emptyMap(), - StringEntity(body, ContentType.APPLICATION_JSON), - BasicHeader("Content-Type", "application/json") - ) - } - - protected fun createComposableIndexTemplate( - templateName: String, - indexPatterns: List, - componentTemplateName: String?, - mappings: String?, - isDataStream: Boolean, - priority: Int - ) { - var body = "{\n" - if (isDataStream) { - body += "\"data_stream\": { }," - } - body += "\"index_patterns\": [" + - indexPatterns.stream().collect( - Collectors.joining(",", "\"", "\"") - ) + "]," - if (componentTemplateName == null) { - body += "\"template\": {\"mappings\": {$mappings}}," - } - if (componentTemplateName != null) { - body += "\"composed_of\": [\"$componentTemplateName\"]," - } - body += "\"priority\":$priority}" - client().makeRequest( - "PUT", - "_index_template/$templateName", - emptyMap(), - StringEntity(body, APPLICATION_JSON), - BasicHeader("Content-Type", "application/json") - ) - } - - protected fun getDatastreamWriteIndex(datastream: String): String { - val response = client().makeRequest("GET", "_data_stream/$datastream", emptyMap(), null) - var respAsMap = responseAsMap(response) - if (respAsMap.containsKey("data_streams")) { - respAsMap = (respAsMap["data_streams"] as ArrayList>)[0] - val indices = respAsMap["indices"] as List> - val index = indices.last() - return index["index_name"] as String - } else { - respAsMap = respAsMap[datastream] as Map - } - val indices = respAsMap["indices"] as Array - return indices.last() - } - - protected fun rolloverDatastream(datastream: String) { - client().makeRequest( - "POST", - datastream + "/_rollover", - emptyMap(), - null - ) - } - - protected fun randomAliasIndices( - alias: String, - num: Int = randomIntBetween(1, 10), - includeWriteIndex: Boolean = true, - ): Map { - val indices = mutableMapOf() - val writeIndex = randomIntBetween(0, num - 1) - for (i: Int in 0 until num) { - var indexName = randomAlphaOfLength(10).lowercase(Locale.ROOT) - while (indexName.equals(alias) || indices.containsKey(indexName)) - indexName = randomAlphaOfLength(10).lowercase(Locale.ROOT) - indices[indexName] = includeWriteIndex && i == writeIndex - } - return indices - } - - protected fun insertSampleTimeSerializedData(index: String, data: List) { - data.forEachIndexed { i, value -> - val twoMinsAgo = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.MILLIS) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(twoMinsAgo) - val testDoc = """ - { - "test_strict_date_time": "$testTime", - "test_field": "$value", - "number": "$i" - } - """.trimIndent() - // Indexing documents with deterministic doc id to allow for easy selected deletion during testing - indexDoc(index, (i + 1).toString(), testDoc) - } - } - - protected fun deleteDataWithDocIds(index: String, docIds: List) { - docIds.forEach { - deleteDoc(index, it) - } - } - - fun putAlertMappings(mapping: String? = null) { - val mappingHack = if (mapping != null) mapping else AlertIndices.alertMapping().trimStart('{').trimEnd('}') - val encodedHistoryIndex = URLEncoder.encode(AlertIndices.ALERT_HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) - val settings = Settings.builder().put("index.hidden", true).build() - createIndex(AlertIndices.ALERT_INDEX, settings, mappingHack) - createIndex(encodedHistoryIndex, settings, mappingHack, "\"${AlertIndices.ALERT_HISTORY_WRITE_INDEX}\" : {}") - } - - fun putFindingMappings(mapping: String? = null) { - val mappingHack = if (mapping != null) mapping else AlertIndices.findingMapping().trimStart('{').trimEnd('}') - val encodedHistoryIndex = URLEncoder.encode(AlertIndices.FINDING_HISTORY_INDEX_PATTERN, Charsets.UTF_8.toString()) - val settings = Settings.builder().put("index.hidden", true).build() -// createIndex(AlertIndices.FINDING_HISTORY_WRITE_INDEX, settings, mappingHack) - createIndex(encodedHistoryIndex, settings, mappingHack, "\"${AlertIndices.FINDING_HISTORY_WRITE_INDEX}\" : {}") - } - - fun scheduledJobMappings(): String { - return javaClass.classLoader.getResource("mappings/scheduled-jobs.json").readText() - } - - /** Creates the Alerting config index if it does not exist */ - fun createAlertingConfigIndex(mapping: String? = null) { - val indexExistsResponse = client().makeRequest("HEAD", ScheduledJob.SCHEDULED_JOBS_INDEX) - if (indexExistsResponse.restStatus() == RestStatus.NOT_FOUND) { - val mappingHack = mapping ?: scheduledJobMappings().trimStart('{').trimEnd('}') - val settings = Settings.builder().put("index.hidden", true).build() - createIndex(ScheduledJob.SCHEDULED_JOBS_INDEX, settings, mappingHack) - } - } - - protected fun Response.restStatus(): RestStatus { - return RestStatus.fromCode(this.statusLine.statusCode) - } - - protected fun Monitor.toHttpEntity(): HttpEntity { - return StringEntity(toJsonString(), APPLICATION_JSON) - } - - private fun Monitor.toJsonString(): String { - val builder = XContentFactory.jsonBuilder() - return shuffleXContent(toXContent(builder, ToXContent.EMPTY_PARAMS)).string() - } - - protected fun Monitor.toHttpEntityWithUser(): HttpEntity { - return StringEntity(toJsonStringWithUser(), APPLICATION_JSON) - } - - private fun Monitor.toJsonStringWithUser(): String { - val builder = jsonBuilder() - return shuffleXContent(toXContentWithUser(builder, ToXContent.EMPTY_PARAMS)).string() - } - - protected fun Destination.toHttpEntity(): HttpEntity { - return StringEntity(toJsonString(), APPLICATION_JSON) - } - - protected fun Destination.toJsonString(): String { - val builder = jsonBuilder() - return shuffleXContent(toXContent(builder)).string() - } - - protected fun Destination.toJsonStringWithType(): String { - val builder = jsonBuilder() - return shuffleXContent( - toXContent(builder, ToXContent.MapParams(mapOf("with_type" to "true"))) - ).string() - } - - protected fun EmailAccount.toHttpEntity(): HttpEntity { - return StringEntity(toJsonString(), APPLICATION_JSON) - } - - protected fun EmailAccount.toJsonString(): String { - val builder = jsonBuilder() - return shuffleXContent(toXContent(builder)).string() - } - - protected fun EmailAccount.toJsonStringWithType(): String { - val builder = jsonBuilder() - return shuffleXContent( - toXContent(builder, ToXContent.MapParams(mapOf("with_type" to "true"))) - ).string() - } - - protected fun EmailGroup.toHttpEntity(): HttpEntity { - return StringEntity(toJsonString(), APPLICATION_JSON) - } - - protected fun EmailGroup.toJsonString(): String { - val builder = jsonBuilder() - return shuffleXContent(toXContent(builder)).string() - } - - protected fun EmailGroup.toJsonStringWithType(): String { - val builder = jsonBuilder() - return shuffleXContent( - toXContent(builder, ToXContent.MapParams(mapOf("with_type" to "true"))) - ).string() - } - - protected fun Alert.toHttpEntityWithUser(): HttpEntity { - return StringEntity(toJsonStringWithUser(), APPLICATION_JSON) - } - - private fun Alert.toJsonStringWithUser(): String { - val builder = jsonBuilder() - return shuffleXContent(toXContentWithUser(builder)).string() - } - - protected fun Monitor.relativeUrl() = "$ALERTING_BASE_URI/$id" - - // Useful settings when debugging to prevent timeouts - override fun restClientSettings(): Settings { - return if (isDebuggingTest || isDebuggingRemoteCluster) { - Settings.builder() - .put(CLIENT_SOCKET_TIMEOUT, TimeValue.timeValueMinutes(10)) - .build() - } else { - super.restClientSettings() - } - } - - fun RestClient.getClusterSettings(settings: Map): Map { - val response = this.makeRequest("GET", "_cluster/settings", settings) - assertEquals(RestStatus.OK, response.restStatus()) - return response.asMap() - } - - fun RestClient.getIndexMapping(index: String): Map { - val response = this.makeRequest("GET", "$index/_mapping") - assertEquals(RestStatus.OK, response.restStatus()) - return response.asMap() - } - - fun RestClient.updateSettings(setting: String, value: Any): Map { - val settings = jsonBuilder() - .startObject() - .startObject("persistent") - .field(setting, value) - .endObject() - .endObject() - .string() - val response = this.makeRequest("PUT", "_cluster/settings", StringEntity(settings, APPLICATION_JSON)) - assertEquals(RestStatus.OK, response.restStatus()) - return response.asMap() - } - - @Suppress("UNCHECKED_CAST") - fun Map.opendistroSettings(): Map? { - val map = this as Map>>> - return map["defaults"]?.get("opendistro")?.get("alerting") - } - - @Suppress("UNCHECKED_CAST") - fun Map.stringMap(key: String): Map? { - val map = this as Map> - return map[key] - } - - fun getAlertingStats(metrics: String = ""): Map { - val monitorStatsResponse = client().makeRequest("GET", "/_plugins/_alerting/stats$metrics") - val responseMap = createParser(XContentType.JSON.xContent(), monitorStatsResponse.entity.content).map() - return responseMap - } - - fun enableScheduledJob(): Response { - val updateResponse = client().makeRequest( - "PUT", - "_cluster/settings", - emptyMap(), - StringEntity( - XContentFactory.jsonBuilder().startObject().field("persistent") - .startObject().field(ScheduledJobSettings.SWEEPER_ENABLED.key, true).endObject() - .endObject().string(), - ContentType.APPLICATION_JSON - ) - ) - return updateResponse - } - - fun disableScheduledJob(): Response { - val updateResponse = client().makeRequest( - "PUT", - "_cluster/settings", - emptyMap(), - StringEntity( - XContentFactory.jsonBuilder().startObject().field("persistent") - .startObject().field(ScheduledJobSettings.SWEEPER_ENABLED.key, false).endObject() - .endObject().string(), - ContentType.APPLICATION_JSON - ) - ) - return updateResponse - } - - fun enableFilterBy() { - val updateResponse = client().makeRequest( - "PUT", - "_cluster/settings", - emptyMap(), - StringEntity( - XContentFactory.jsonBuilder().startObject().field("persistent") - .startObject().field(AlertingSettings.FILTER_BY_BACKEND_ROLES.key, true).endObject() - .endObject().string(), - ContentType.APPLICATION_JSON - ) - ) - assertEquals(updateResponse.statusLine.toString(), 200, updateResponse.statusLine.statusCode) - } - - fun disableFilterBy() { - val updateResponse = client().makeRequest( - "PUT", - "_cluster/settings", - emptyMap(), - StringEntity( - XContentFactory.jsonBuilder().startObject().field("persistent") - .startObject().field(AlertingSettings.FILTER_BY_BACKEND_ROLES.key, false).endObject() - .endObject().string(), - ContentType.APPLICATION_JSON - ) - ) - assertEquals(updateResponse.statusLine.toString(), 200, updateResponse.statusLine.statusCode) - } - - fun removeEmailFromAllowList() { - val allowedDestinations = DestinationType.values().toList() - .filter { destinationType -> destinationType != DestinationType.EMAIL } - .joinToString(prefix = "[", postfix = "]") { string -> "\"$string\"" } - client().updateSettings(DestinationSettings.ALLOW_LIST.key, allowedDestinations) - } - - fun createUser(name: String, backendRoles: Array) { - val request = Request("PUT", "/_plugins/_security/api/internalusers/$name") - val broles = backendRoles.joinToString { it -> "\"$it\"" } - var entity = " {\n" + - "\"password\": \"$password\",\n" + - "\"backend_roles\": [$broles],\n" + - "\"attributes\": {\n" + - "}} " - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun patchUserBackendRoles(name: String, backendRoles: Array) { - val request = Request("PATCH", "/_plugins/_security/api/internalusers/$name") - val broles = backendRoles.joinToString { "\"$it\"" } - var entity = " [{\n" + - "\"op\": \"replace\",\n" + - "\"path\": \"/backend_roles\",\n" + - "\"value\": [$broles]\n" + - "}]" - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun createIndexRole(name: String, index: String) { - val request = Request("PUT", "/_plugins/_security/api/roles/$name") - var entity = "{\n" + - "\"cluster_permissions\": [\n" + - "],\n" + - "\"index_permissions\": [\n" + - "{\n" + - "\"index_patterns\": [\n" + - "\"$index\"\n" + - "],\n" + - "\"dls\": \"\",\n" + - "\"fls\": [],\n" + - "\"masked_fields\": [],\n" + - "\"allowed_actions\": [\n" + - "\"crud\"\n" + - "]\n" + - "}\n" + - "],\n" + - "\"tenant_permissions\": []\n" + - "}" - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun createCustomIndexRole(name: String, index: String, clusterPermissions: String?) { - val request = Request("PUT", "/_plugins/_security/api/roles/$name") - var entity = "{\n" + - "\"cluster_permissions\": [\n" + - "\"$clusterPermissions\"\n" + - "],\n" + - "\"index_permissions\": [\n" + - "{\n" + - "\"index_patterns\": [\n" + - "\"$index\"\n" + - "],\n" + - "\"dls\": \"\",\n" + - "\"fls\": [],\n" + - "\"masked_fields\": [],\n" + - "\"allowed_actions\": [\n" + - "\"crud\"\n" + - "]\n" + - "}\n" + - "],\n" + - "\"tenant_permissions\": []\n" + - "}" - request.setJsonEntity(entity) - client().performRequest(request) - } - - private fun createCustomIndexRole(name: String, index: String, clusterPermissions: List) { - val request = Request("PUT", "/_plugins/_security/api/roles/$name") - - val clusterPermissionsStr = - clusterPermissions.stream().map { p: String? -> "\"" + p + "\"" }.collect( - Collectors.joining(",") - ) - - var entity = "{\n" + - "\"cluster_permissions\": [\n" + - "$clusterPermissionsStr\n" + - "],\n" + - "\"index_permissions\": [\n" + - "{\n" + - "\"index_patterns\": [\n" + - "\"$index\"\n" + - "],\n" + - "\"dls\": \"\",\n" + - "\"fls\": [],\n" + - "\"masked_fields\": [],\n" + - "\"allowed_actions\": [\n" + - "\"crud\"\n" + - "]\n" + - "}\n" + - "],\n" + - "\"tenant_permissions\": []\n" + - "}" - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String, clusterPermissions: String? = "") { - val request = Request("PUT", "/_plugins/_security/api/roles/$name") - var entity = "{\n" + - "\"cluster_permissions\": [\n" + - "\"$clusterPermissions\"\n" + - "],\n" + - "\"index_permissions\": [\n" + - "{\n" + - "\"index_patterns\": [\n" + - "\"$index\"\n" + - "],\n" + - "\"dls\": \"$dlsQuery\",\n" + - "\"fls\": [],\n" + - "\"masked_fields\": [],\n" + - "\"allowed_actions\": [\n" + - "\"crud\"\n" + - "]\n" + - "}\n" + - "],\n" + - "\"tenant_permissions\": []\n" + - "}" - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun createIndexRoleWithDocLevelSecurity(name: String, index: String, dlsQuery: String, clusterPermissions: List) { - val clusterPermissionsStr = - clusterPermissions.stream().map { p: String -> "\"" + getClusterPermissionsFromCustomRole(p) + "\"" }.collect( - Collectors.joining(",") - ) - - val request = Request("PUT", "/_plugins/_security/api/roles/$name") - var entity = "{\n" + - "\"cluster_permissions\": [\n" + - "$clusterPermissionsStr\n" + - "],\n" + - "\"index_permissions\": [\n" + - "{\n" + - "\"index_patterns\": [\n" + - "\"$index\"\n" + - "],\n" + - "\"dls\": \"$dlsQuery\",\n" + - "\"fls\": [],\n" + - "\"masked_fields\": [],\n" + - "\"allowed_actions\": [\n" + - "\"crud\"\n" + - "]\n" + - "}\n" + - "],\n" + - "\"tenant_permissions\": []\n" + - "}" - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun createUserRolesMapping(role: String, users: Array) { - val request = Request("PUT", "/_plugins/_security/api/rolesmapping/$role") - val usersStr = users.joinToString { it -> "\"$it\"" } - var entity = "{ \n" + - " \"backend_roles\" : [ ],\n" + - " \"hosts\" : [ ],\n" + - " \"users\" : [$usersStr]\n" + - "}" - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun updateRoleMapping(role: String, users: List, addUser: Boolean) { - val request = Request("PATCH", "/_plugins/_security/api/rolesmapping/$role") - val usersStr = users.joinToString { it -> "\"$it\"" } - - val op = if (addUser) "add" else "remove" - - val entity = "[{\n" + - " \"op\" : \"$op\",\n" + - " \"path\" : \"/users\",\n" + - " \"value\" : [$usersStr]\n" + - "}]" - - request.setJsonEntity(entity) - client().performRequest(request) - } - - fun deleteUser(name: String) { - client().makeRequest("DELETE", "/_plugins/_security/api/internalusers/$name") - } - - fun deleteRole(name: String) { - client().makeRequest("DELETE", "/_plugins/_security/api/roles/$name") - } - - fun deleteRoleMapping(name: String) { - client().makeRequest("DELETE", "/_plugins/_security/api/rolesmapping/$name") - } - - fun deleteRoleAndRoleMapping(role: String) { - deleteRoleMapping(role) - deleteRole(role) - } - - fun createUserWithTestData(user: String, index: String, role: String, backendRole: String) { - createUser(user, arrayOf(backendRole)) - createTestIndex(index) - createIndexRole(role, index) - createUserRolesMapping(role, arrayOf(user)) - } - - fun createUserWithTestDataAndCustomRole( - user: String, - index: String, - role: String, - backendRoles: List, - clusterPermissions: String?, - ) { - createUser(user, backendRoles.toTypedArray()) - createTestIndex(index) - createCustomIndexRole(role, index, clusterPermissions) - createUserRolesMapping(role, arrayOf(user)) - } - - fun createUserWithTestDataAndCustomRole( - user: String, - index: String, - role: String, - backendRoles: List, - clusterPermissions: List, - ) { - createUser(user, backendRoles.toTypedArray()) - createTestIndex(index) - createCustomIndexRole(role, index, clusterPermissions) - createUserRolesMapping(role, arrayOf(user)) - } - - fun createUserWithRoles( - user: String, - roles: List, - backendRoles: List, - isExistingRole: Boolean, - ) { - createUser(user, backendRoles.toTypedArray()) - for (role in roles) { - if (isExistingRole) { - updateRoleMapping(role, listOf(user), true) - } else { - createUserRolesMapping(role, arrayOf(user)) - } - } - } - - fun createUserWithDocLevelSecurityTestData( - user: String, - index: String, - role: String, - backendRole: String, - dlsQuery: String, - ) { - createUser(user, arrayOf(backendRole)) - createTestIndex(index) - createIndexRoleWithDocLevelSecurity(role, index, dlsQuery) - createUserRolesMapping(role, arrayOf(user)) - } - - fun createUserWithDocLevelSecurityTestDataAndCustomRole( - user: String, - index: String, - role: String, - backendRole: String, - dlsQuery: String, - clusterPermissions: String?, - ) { - createUser(user, arrayOf(backendRole)) - createTestIndex(index) - createIndexRoleWithDocLevelSecurity(role, index, dlsQuery) - createCustomIndexRole(role, index, clusterPermissions) - createUserRolesMapping(role, arrayOf(user)) - } - - fun getClusterPermissionsFromCustomRole(clusterPermissions: String): String? { - return ROLE_TO_PERMISSION_MAPPING.get(clusterPermissions) - } - - companion object { - internal interface IProxy { - val version: String? - var sessionId: String? - - fun getExecutionData(reset: Boolean): ByteArray? - fun dump(reset: Boolean) - fun reset() - } - - /* - * We need to be able to dump the jacoco coverage before the cluster shuts down. - * The new internal testing framework removed some gradle tasks we were listening to, - * to choose a good time to do it. This will dump the executionData to file after each test. - * TODO: This is also currently just overwriting integTest.exec with the updated execData without - * resetting after writing each time. This can be improved to either write an exec file per test - * or by letting jacoco append to the file. - * */ - @JvmStatic - @AfterClass - fun dumpCoverage() { - // jacoco.dir set in opensearchplugin-coverage.gradle, if it doesn't exist we don't - // want to collect coverage, so we can return early - val jacocoBuildPath = System.getProperty("jacoco.dir") ?: return - val serverUrl = "service:jmx:rmi:///jndi/rmi://127.0.0.1:7777/jmxrmi" - JMXConnectorFactory.connect(JMXServiceURL(serverUrl)).use { connector -> - val proxy = MBeanServerInvocationHandler.newProxyInstance( - connector.mBeanServerConnection, - ObjectName("org.jacoco:type=Runtime"), - IProxy::class.java, - false - ) - proxy.getExecutionData(false)?.let { - val path = PathUtils.get("$jacocoBuildPath/integTest.exec") - Files.write(path, it) - } - } - } - } - - protected fun createRandomWorkflow(monitorIds: List, refresh: Boolean = false): Workflow { - val workflow = randomWorkflow(monitorIds = monitorIds) - return createWorkflow(workflow, refresh) - } - - private fun createWorkflowEntityWithBackendRoles(workflow: Workflow, rbacRoles: List?): HttpEntity { - if (rbacRoles == null) { - return workflow.toHttpEntity() - } - val temp = workflow.toJsonString() - val toReplace = temp.lastIndexOf("}") - val rbacString = rbacRoles.joinToString { "\"$it\"" } - val jsonString = temp.substring(0, toReplace) + ", \"rbac_roles\": [$rbacString] }" - return StringEntity(jsonString, ContentType.APPLICATION_JSON) - } - - protected fun createWorkflowWithClient( - client: RestClient, - workflow: Workflow, - rbacRoles: List? = null, - refresh: Boolean = true, - ): Workflow { - val response = client.makeRequest( - "POST", "$WORKFLOW_ALERTING_BASE_URI?refresh=$refresh", emptyMap(), - createWorkflowEntityWithBackendRoles(workflow, rbacRoles) - ) - assertEquals("Unable to create a new monitor", RestStatus.CREATED, response.restStatus()) - - val workflowJson = jsonXContent.createParser( - NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, - response.entity.content - ).map() - assertUserNull(workflowJson as HashMap) - return workflow.copy(id = workflowJson["_id"] as String) - } - - protected fun createWorkflow(workflow: Workflow, refresh: Boolean = true): Workflow { - return createWorkflowWithClient(client(), workflow, emptyList(), refresh) - } - - protected fun Workflow.toHttpEntity(): HttpEntity { - return StringEntity(toJsonString(), APPLICATION_JSON) - } - - private fun Workflow.toJsonString(): String { - val builder = XContentFactory.jsonBuilder() - return shuffleXContent(toXContent(builder, ToXContent.EMPTY_PARAMS)).string() - } - - protected fun getWorkflow( - workflowId: String, - header: BasicHeader = BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"), - ): Workflow { - val response = client().makeRequest("GET", "$WORKFLOW_ALERTING_BASE_URI/$workflowId", null, header) - assertEquals("Unable to get workflow $workflowId", RestStatus.OK, response.restStatus()) - - val parser = createParser(XContentType.JSON.xContent(), response.entity.content) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser) - - lateinit var id: String - var version: Long = 0 - lateinit var workflow: Workflow - - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - parser.nextToken() - - when (parser.currentName()) { - "_id" -> id = parser.text() - "_version" -> version = parser.longValue() - "workflow" -> workflow = Workflow.parse(parser) - } - } - - assertUserNull(workflow) - return workflow.copy(id = id, version = version) - } - - protected fun Workflow.relativeUrl() = "$WORKFLOW_ALERTING_BASE_URI/$id" -} diff --git a/alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt b/alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt deleted file mode 100644 index 44454395e..000000000 --- a/alerting/bin/test/org/opensearch/alerting/DocumentMonitorRunnerIT.kt +++ /dev/null @@ -1,1675 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.http.entity.ContentType -import org.apache.http.entity.StringEntity -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_ALERT_INDEX_PATTERN -import org.opensearch.alerting.alerts.AlertIndices.Companion.ALL_FINDING_INDEX_PATTERN -import org.opensearch.client.Response -import org.opensearch.client.ResponseException -import org.opensearch.common.xcontent.json.JsonXContent -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy -import org.opensearch.commons.alerting.model.action.AlertCategory -import org.opensearch.commons.alerting.model.action.PerAlertActionScope -import org.opensearch.commons.alerting.model.action.PerExecutionActionScope -import org.opensearch.core.rest.RestStatus -import org.opensearch.script.Script -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter -import java.time.temporal.ChronoUnit.MILLIS -import java.util.Locale - -class DocumentMonitorRunnerIT : AlertingRestTestCase() { - - fun `test execute monitor with dryrun`() { - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val index = createTestIndex() - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - - indexDoc(index, "1", testDoc) - - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - - assertEquals(1, output.objectMap("trigger_results").values.size) - - for (triggerResult in output.objectMap("trigger_results").values) { - assertEquals(1, triggerResult.objectMap("action_results").values.size) - for (alertActionResult in triggerResult.objectMap("action_results").values) { - for (actionResult in alertActionResult.values) { - @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] - as Map - assertEquals("Hello ${monitor.name}", actionOutput["subject"]) - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } - } - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert saved for test monitor", 0, alerts.size) - } - - fun `test execute monitor returns search result with dryrun`() { - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger)) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex, "5", testDoc) - - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.contains("1|$testIndex")) - assertTrue("Incorrect search result", matchingDocsToQuery.contains("5|$testIndex")) - } - - fun `test execute monitor generates alerts and findings`() { - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex, "5", testDoc) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) - } - - fun `test execute monitor with tag as trigger condition generates alerts and findings`() { - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", tags = listOf("test_tag"), fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = Script("query[tag=test_tag]")) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex, "5", testDoc) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) - } - - fun `test execute monitor input error`() { - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", tags = listOf("test_tag"), fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - deleteIndex(testIndex) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val inputResults = output.stringMap("input_results") - assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - assertEquals("Alert status is incorrect", Alert.State.ERROR, alerts[0].state) - } - - fun `test execute monitor generates alerts and findings with per alert execution for actions`() { - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val alertCategories = AlertCategory.values() - val actionExecutionScope = PerAlertActionScope( - actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() - ) - val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) - val actions = (0..randomInt(10)).map { - randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - actionExecutionPolicy = actionExecutionPolicy - ) - } - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex, "5", testDoc) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) - - for (triggerResult in output.objectMap("trigger_results").values) { - assertEquals(2, triggerResult.objectMap("action_results").values.size) - for (alertActionResult in triggerResult.objectMap("action_results").values) { - assertEquals(actions.size, alertActionResult.values.size) - for (actionResult in alertActionResult.values) { - @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] - as Map - assertEquals("Hello ${monitor.name}", actionOutput["subject"]) - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } - } - } - - refreshAllIndices() - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) - } - - fun `test execute monitor generates alerts and findings with per trigger execution for actions`() { - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val actionExecutionScope = PerExecutionActionScope() - val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) - val actions = (0..randomInt(10)).map { - randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - actionExecutionPolicy = actionExecutionPolicy - ) - } - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex, "5", testDoc) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex"))) - - for (triggerResult in output.objectMap("trigger_results").values) { - assertEquals(2, triggerResult.objectMap("action_results").values.size) - for (alertActionResult in triggerResult.objectMap("action_results").values) { - assertEquals(actions.size, alertActionResult.values.size) - for (actionResult in alertActionResult.values) { - @Suppress("UNCHECKED_CAST") val actionOutput = (actionResult as Map>)["output"] - as Map - assertEquals("Hello ${monitor.name}", actionOutput["subject"]) - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } - } - } - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("5")) - } - - fun `test execute monitor with wildcard index that generates alerts and findings for EQUALS query operator`() { - val testIndexPrefix = "test-index-${randomAlphaOfLength(10).lowercase(Locale.ROOT)}" - val testQueryName = "wildcard-test-query" - val testIndex = createTestIndex("${testIndexPrefix}1") - val testIndex2 = createTestIndex("${testIndexPrefix}2") - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = testQueryName, fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf("$testIndexPrefix*"), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = Script("query[name=$testQueryName]")) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex2, "5", testDoc) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex2"))) - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - val foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } - assertEquals("Didn't find findings for docs 1 and 5", 2, foundFindings.size) - } - - fun `test execute monitor with wildcard index that generates alerts and findings for NOT EQUALS query operator`() { - val testIndexPrefix = "test-index-${randomAlphaOfLength(10).lowercase(Locale.ROOT)}" - val testQueryName = "wildcard-test-query" - val testIndex = createTestIndex("${testIndexPrefix}1") - val testIndex2 = createTestIndex("${testIndexPrefix}2") - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "NOT (test_field:\"us-west-1\")", name = testQueryName, fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf("$testIndexPrefix*"), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = Script("query[name=$testQueryName]")) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex2, "5", testDoc) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("1|$testIndex", "5|$testIndex2"))) - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - val foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } - assertEquals("Didn't find findings for docs 1 and 5", 2, foundFindings.size) - } - - fun `test execute monitor with new index added after first execution that generates alerts and findings`() { - val testIndex = createTestIndex("test1") - val testIndex2 = createTestIndex("test2") - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex2, "5", testDoc) - executeMonitor(monitor.id) - - var alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - var findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - - var foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } - assertEquals("Findings saved for test monitor expected 1 and 5", 2, foundFindings.size) - - // clear previous findings and alerts - deleteIndex(ALL_FINDING_INDEX_PATTERN) - deleteIndex(ALL_ALERT_INDEX_PATTERN) - - val testIndex3 = createTestIndex("test3") - indexDoc(testIndex3, "10", testDoc) - indexDoc(testIndex, "14", testDoc) - indexDoc(testIndex2, "51", testDoc) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 3, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("14|$testIndex", "51|$testIndex2", "10|$testIndex3"))) - - alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 3, alerts.size) - - findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 3, findings.size) - - foundFindings = findings.filter { - it.relatedDocIds.contains("14") || it.relatedDocIds.contains("51") || it.relatedDocIds.contains("10") - } - assertEquals("Findings saved for test monitor expected 14, 51 and 10", 3, foundFindings.size) - } - - fun `test execute monitor with indices having fields with same name but different data types`() { - val testIndex = createTestIndex( - "test1", - """"properties": { - "source.device.port": { "type": "long" }, - "source.device.hwd.id": { "type": "long" }, - "nested_field": { - "type": "nested", - "properties": { - "test1": { - "type": "keyword" - } - } - }, - "my_join_field": { - "type": "join", - "relations": { - "question": "answer" - } - }, - "test_field" : { "type" : "integer" } - } - """.trimIndent() - ) - var testDoc = """{ - "source" : { "device": {"port" : 12345 } }, - "nested_field": { "test1": "some text" }, - "test_field": 12345 - }""" - - val docQuery1 = DocLevelQuery( - query = "(source.device.port:12345 AND test_field:12345) OR source.device.hwd.id:12345", - name = "4", - fields = listOf() - ) - val docQuery2 = DocLevelQuery( - query = "(source.device.port:\"12345\" AND test_field:\"12345\") OR source.device.hwd.id:\"12345\"", - name = "5", - fields = listOf() - ) - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - executeMonitor(monitor.id) - - var alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 1, alerts.size) - - var findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 1, findings.size) - - // clear previous findings and alerts - deleteIndex(ALL_FINDING_INDEX_PATTERN) - deleteIndex(ALL_ALERT_INDEX_PATTERN) - - indexDoc(testIndex, "2", testDoc) - - // no fields expanded as only index test1 is present - val oldExpectedQueries = listOf( - "(source.device.port_test__${monitor.id}:12345 AND test_field_test__${monitor.id}:12345) OR " + - "source.device.hwd.id_test__${monitor.id}:12345", - "(source.device.port_test__${monitor.id}:\"12345\" AND test_field_test__${monitor.id}:\"12345\") " + - "OR source.device.hwd.id_test__${monitor.id}:\"12345\"" - ) - - val request = """{ - "size": 10, - "query": { - "match_all": {} - } - }""" - var httpResponse = adminClient().makeRequest( - "GET", "/${monitor.dataSources.queryIndex}/_search", - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) - searchResponse.hits.forEach { hit -> - val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] - assertTrue(oldExpectedQueries.contains(query)) - } - - val testIndex2 = createTestIndex( - "test2", - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent() - ) - testDoc = """{ - "source" : { "device": {"port" : "12345" } }, - "nested_field": { "test1": "some text" }, - "test_field": "12345" - }""" - indexDoc(testIndex2, "1", testDoc) - executeMonitor(monitor.id) - - // only fields source.device.port & test_field is expanded as they have same name but different data types - // in indices test1 & test2 - val newExpectedQueries = listOf( - "(source.device.port_test2_${monitor.id}:12345 AND test_field_test2_${monitor.id}:12345) " + - "OR source.device.hwd.id_test__${monitor.id}:12345", - "(source.device.port_test1_${monitor.id}:12345 AND test_field_test1_${monitor.id}:12345) " + - "OR source.device.hwd.id_test__${monitor.id}:12345", - "(source.device.port_test2_${monitor.id}:\"12345\" AND test_field_test2_${monitor.id}:\"12345\") " + - "OR source.device.hwd.id_test__${monitor.id}:\"12345\"", - "(source.device.port_test1_${monitor.id}:\"12345\" AND test_field_test1_${monitor.id}:\"12345\") " + - "OR source.device.hwd.id_test__${monitor.id}:\"12345\"" - ) - - alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - - httpResponse = adminClient().makeRequest( - "GET", "/${monitor.dataSources.queryIndex}/_search", - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) - searchResponse.hits.forEach { hit -> - val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] - assertTrue(oldExpectedQueries.contains(query) || newExpectedQueries.contains(query)) - } - } - - fun `test execute monitor with indices having fields with same name but with different nesting`() { - val testIndex = createTestIndex( - "test1", - """"properties": { - "nested_field": { - "type": "nested", - "properties": { - "test1": { - "type": "keyword" - } - } - } - } - """.trimIndent() - ) - - val testIndex2 = createTestIndex( - "test2", - """"properties": { - "nested_field": { - "properties": { - "test1": { - "type": "keyword" - } - } - } - } - """.trimIndent() - ) - val testDoc = """{ - "nested_field": { "test1": "12345" } - }""" - - val docQuery = DocLevelQuery( - query = "nested_field.test1:\"12345\"", - name = "5", - fields = listOf() - ) - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex2, "1", testDoc) - - executeMonitor(monitor.id) - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - - // as mappings of source.id & test_field are different so, both of them expands - val expectedQueries = listOf( - "nested_field.test1_test__${monitor.id}:\"12345\"" - ) - - val request = """{ - "size": 10, - "query": { - "match_all": {} - } - }""" - var httpResponse = adminClient().makeRequest( - "GET", "/${monitor.dataSources.queryIndex}/_search", - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) - searchResponse.hits.forEach { hit -> - val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] - assertTrue(expectedQueries.contains(query)) - } - } - - fun `test execute monitor with indices having fields with same name but different field mappings`() { - val testIndex = createTestIndex( - "test1", - """"properties": { - "source": { - "properties": { - "id": { - "type":"text", - "analyzer":"whitespace" - } - } - }, - "test_field" : { - "type":"text", - "analyzer":"whitespace" - } - } - """.trimIndent() - ) - - val testIndex2 = createTestIndex( - "test2", - """"properties": { - "source": { - "properties": { - "id": { - "type":"text" - } - } - }, - "test_field" : { - "type":"text" - } - } - """.trimIndent() - ) - val testDoc = """{ - "source" : {"id" : "12345" }, - "nested_field": { "test1": "some text" }, - "test_field": "12345" - }""" - - val docQuery = DocLevelQuery( - query = "test_field:\"12345\" AND source.id:\"12345\"", - name = "5", - fields = listOf() - ) - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex2, "1", testDoc) - - executeMonitor(monitor.id) - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - - // as mappings of source.id & test_field are different so, both of them expands - val expectedQueries = listOf( - "test_field_test2_${monitor.id}:\"12345\" AND source.id_test2_${monitor.id}:\"12345\"", - "test_field_test1_${monitor.id}:\"12345\" AND source.id_test1_${monitor.id}:\"12345\"" - ) - - val request = """{ - "size": 10, - "query": { - "match_all": {} - } - }""" - var httpResponse = adminClient().makeRequest( - "GET", "/${monitor.dataSources.queryIndex}/_search", - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) - searchResponse.hits.forEach { hit -> - val query = ((hit.sourceAsMap["query"] as Map)["query_string"] as Map)["query"] - assertTrue(expectedQueries.contains(query)) - } - } - - fun `test execute monitor with indices having fields with same name but different field mappings in multiple indices`() { - val testIndex = createTestIndex( - "test1", - """"properties": { - "source": { - "properties": { - "device": { - "properties": { - "hwd": { - "properties": { - "id": { - "type":"text", - "analyzer":"whitespace" - } - } - } - } - } - } - }, - "test_field" : { - "type":"text" - } - } - """.trimIndent() - ) - - val testIndex2 = createTestIndex( - "test2", - """"properties": { - "test_field" : { - "type":"keyword" - } - } - """.trimIndent() - ) - - val testIndex4 = createTestIndex( - "test4", - """"properties": { - "source": { - "properties": { - "device": { - "properties": { - "hwd": { - "properties": { - "id": { - "type":"text" - } - } - } - } - } - } - }, - "test_field" : { - "type":"text" - } - } - """.trimIndent() - ) - - val testDoc1 = """{ - "source" : {"device" : {"hwd" : {"id" : "12345"}} }, - "nested_field": { "test1": "some text" } - }""" - val testDoc2 = """{ - "nested_field": { "test1": "some text" }, - "test_field": "12345" - }""" - - val docQuery1 = DocLevelQuery( - query = "test_field:\"12345\"", - name = "4", - fields = listOf() - ) - val docQuery2 = DocLevelQuery( - query = "source.device.hwd.id:\"12345\"", - name = "5", - fields = listOf() - ) - - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex4, "1", testDoc1) - indexDoc(testIndex2, "1", testDoc2) - indexDoc(testIndex, "1", testDoc1) - indexDoc(testIndex, "2", testDoc2) - - executeMonitor(monitor.id) - - val alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 4, alerts.size) - - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 4, findings.size) - - val request = """{ - "size": 0, - "query": { - "match_all": {} - } - }""" - val httpResponse = adminClient().makeRequest( - "GET", "/${monitor.dataSources.queryIndex}/_search", - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - - val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) - searchResponse.hits.totalHits?.let { assertEquals(5L, it.value) } - } - - fun `test no of queries generated for document-level monitor based on wildcard indexes`() { - val testIndex = createTestIndex("test1") - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - executeMonitor(monitor.id) - - val request = """{ - "size": 0, - "query": { - "match_all": {} - } - }""" - var httpResponse = adminClient().makeRequest( - "GET", "/${monitor.dataSources.queryIndex}/_search", - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - - var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) - searchResponse.hits.totalHits?.let { assertEquals(1L, it.value) } - - val testIndex2 = createTestIndex("test2") - indexDoc(testIndex2, "1", testDoc) - executeMonitor(monitor.id) - - httpResponse = adminClient().makeRequest( - "GET", "/${monitor.dataSources.queryIndex}/_search", - StringEntity(request, ContentType.APPLICATION_JSON) - ) - assertEquals("Search failed", RestStatus.OK, httpResponse.restStatus()) - - searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, httpResponse.entity.content)) - searchResponse.hits.totalHits?.let { assertEquals(1L, it.value) } - } - - fun `test execute monitor with new index added after first execution that generates alerts and findings from new query`() { - val testIndex = createTestIndex("test1") - val testIndex2 = createTestIndex("test2") - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery1 = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docQuery2 = DocLevelQuery(query = "test_field_new:\"us-west-2\"", name = "4", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf("test*"), listOf(docQuery1, docQuery2)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - assertNotNull(monitor.id) - - indexDoc(testIndex, "1", testDoc) - indexDoc(testIndex2, "5", testDoc) - executeMonitor(monitor.id) - - var alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 2, alerts.size) - - var findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 2, findings.size) - - var foundFindings = findings.filter { it.relatedDocIds.contains("1") || it.relatedDocIds.contains("5") } - assertEquals("Findings saved for test monitor expected 1 and 5", 2, foundFindings.size) - - // clear previous findings and alerts - deleteIndex(ALL_FINDING_INDEX_PATTERN) - deleteIndex(ALL_ALERT_INDEX_PATTERN) - - val testDocNew = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field_new" : "us-west-2" - }""" - - val testIndex3 = createTestIndex("test3") - indexDoc(testIndex3, "10", testDocNew) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val matchingDocsToQuery = searchResult[docQuery2.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - assertTrue("Incorrect search result", matchingDocsToQuery.containsAll(listOf("10|$testIndex3"))) - - alerts = searchAlertsWithFilter(monitor) - assertEquals("Alert saved for test monitor", 1, alerts.size) - - findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 1, findings.size) - - foundFindings = findings.filter { - it.relatedDocIds.contains("10") - } - assertEquals("Findings saved for test monitor expected 10", 1, foundFindings.size) - } - - fun `test document-level monitor when alias only has write index with 0 docs`() { - // Monitor should execute, but create 0 findings. - val alias = createTestAlias(includeWriteIndex = true) - val aliasIndex = alias.keys.first() - val query = randomDocLevelQuery(tags = listOf()) - val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) - val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) - val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) - - val response: Response - try { - response = executeMonitor(monitor.id) - } catch (e: ResponseException) { - assertNotNull("Expected an error message: $e", e.message) - e.message?.let { - assertTrue("Unexpected exception: $e", it.contains("""reason":"no such index [.opensearch-alerting-findings]""")) - } - assertEquals(404, e.response.statusLine.statusCode) - return - } - - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - val errorMessage = inputResults?.get("error") - @Suppress("UNCHECKED_CAST") - val searchResult = (inputResults?.get("results") as List>).firstOrNull() - @Suppress("UNCHECKED_CAST") - val findings = searchFindings() - - assertEquals(monitor.name, output["monitor_name"]) - assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) - findings.findings.forEach { - val queryIds = it.finding.docLevelQueries.map { query -> query.id } - assertFalse("No findings should exist with queryId ${query.id}, but found: $it", queryIds.contains(query.id)) - } - } - - fun `test document-level monitor when docs exist prior to monitor creation`() { - // FIXME: Consider renaming this test case - // Only new docs should create findings. - val alias = createTestAlias(includeWriteIndex = true) - val aliasIndex = alias.keys.first() - val indices = alias[aliasIndex]?.keys?.toList() as List - val query = randomDocLevelQuery(tags = listOf()) - val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) - val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) - - val preExistingDocIds = mutableSetOf() - indices.forEach { index -> - val docId = index.hashCode().toString() - val doc = """{ "message" : "${query.query}" }""" - preExistingDocIds.add(docId) - indexDoc(index = index, id = docId, doc = doc) - } - assertEquals(indices.size, preExistingDocIds.size) - - val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - val errorMessage = inputResults?.get("error") - @Suppress("UNCHECKED_CAST") - val searchResult = (inputResults?.get("results") as List>).firstOrNull() - @Suppress("UNCHECKED_CAST") - val findings = searchFindings() - - assertEquals(monitor.name, output["monitor_name"]) - assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) - findings.findings.forEach { - val docIds = it.finding.relatedDocIds - assertTrue( - "Findings index should not contain a pre-existing doc, but found $it", - preExistingDocIds.intersect(docIds).isEmpty() - ) - } - } - - fun `test document-level monitor when alias indices only contain docs that match query`() { - // Only new docs should create findings. - val alias = createTestAlias(includeWriteIndex = true) - val aliasIndex = alias.keys.first() - val indices = alias[aliasIndex]?.keys?.toList() as List - val query = randomDocLevelQuery(tags = listOf()) - val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) - val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) - - val preExistingDocIds = mutableSetOf() - indices.forEach { index -> - val docId = index.hashCode().toString() - val doc = """{ "message" : "${query.query}" }""" - preExistingDocIds.add(docId) - indexDoc(index = index, id = docId, doc = doc) - } - assertEquals(indices.size, preExistingDocIds.size) - - val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) - executeMonitor(monitor.id) - - val newDocIds = mutableSetOf() - indices.forEach { index -> - (1..5).map { - val docId = "${index.hashCode()}$it" - val doc = """{ "message" : "${query.query}" }""" - newDocIds.add(docId) - indexDoc(index = index, id = docId, doc = doc) - } - } - assertEquals(indices.size * 5, newDocIds.size) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - val errorMessage = inputResults?.get("error") - @Suppress("UNCHECKED_CAST") - val searchResult = (inputResults?.get("results") as List>).firstOrNull() - @Suppress("UNCHECKED_CAST") - val findings = searchFindings() - - assertEquals(monitor.name, output["monitor_name"]) - assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) - findings.findings.forEach { - val docIds = it.finding.relatedDocIds - assertTrue( - "Findings index should not contain a pre-existing doc, but found $it", - preExistingDocIds.intersect(docIds).isEmpty() - ) - assertTrue("Found an unexpected finding $it", newDocIds.intersect(docIds).isNotEmpty()) - } - } - - fun `test document-level monitor when alias indices contain docs that do and do not match query`() { - // Only matching docs should create findings. - val alias = createTestAlias(includeWriteIndex = true) - val aliasIndex = alias.keys.first() - val indices = alias[aliasIndex]?.keys?.toList() as List - val query = randomDocLevelQuery(tags = listOf()) - val input = randomDocLevelMonitorInput(indices = listOf(aliasIndex), queries = listOf(query)) - val trigger = randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]")) - - val preExistingDocIds = mutableSetOf() - indices.forEach { index -> - val docId = index.hashCode().toString() - val doc = """{ "message" : "${query.query}" }""" - preExistingDocIds.add(docId) - indexDoc(index = index, id = docId, doc = doc) - } - assertEquals(indices.size, preExistingDocIds.size) - - val monitor = createMonitor(randomDocumentLevelMonitor(enabled = false, inputs = listOf(input), triggers = listOf(trigger))) - executeMonitor(monitor.id) - - val matchingDocIds = mutableSetOf() - val nonMatchingDocIds = mutableSetOf() - indices.forEach { index -> - (1..5).map { - val matchingDocId = "${index.hashCode()}$it" - val matchingDoc = """{ "message" : "${query.query}" }""" - indexDoc(index = index, id = matchingDocId, doc = matchingDoc) - matchingDocIds.add(matchingDocId) - - val nonMatchingDocId = "${index.hashCode()}${it}2" - var nonMatchingDoc = StringBuilder(query.query).insert(2, "difference").toString() - nonMatchingDoc = """{ "message" : "$nonMatchingDoc" }""" - indexDoc(index = index, id = nonMatchingDocId, doc = nonMatchingDoc) - nonMatchingDocIds.add(nonMatchingDocId) - } - } - assertEquals(indices.size * 5, matchingDocIds.size) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - val errorMessage = inputResults?.get("error") - @Suppress("UNCHECKED_CAST") - val searchResult = (inputResults?.get("results") as List>).firstOrNull() - @Suppress("UNCHECKED_CAST") - val findings = searchFindings() - - assertEquals(monitor.name, output["monitor_name"]) - assertNull("Unexpected monitor execution failure: $errorMessage", errorMessage) - findings.findings.forEach { - val docIds = it.finding.relatedDocIds - assertTrue( - "Findings index should not contain a pre-existing doc, but found $it", - preExistingDocIds.intersect(docIds).isEmpty() - ) - assertTrue("Found doc that doesn't match query: $it", nonMatchingDocIds.intersect(docIds).isEmpty()) - assertFalse("Found an unexpected finding $it", matchingDocIds.intersect(docIds).isNotEmpty()) - } - } - - fun `test document-level monitor when datastreams contain docs that do match query`() { - val dataStreamName = "test-datastream" - createDataStream( - dataStreamName, - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent(), - false - ) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) - - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - ) - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "@timestamp": "$testTime", - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - indexDoc(dataStreamName, "1", testDoc) - var response = executeMonitor(monitor.id) - var output = entityAsMap(response) - var searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - var matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - - rolloverDatastream(dataStreamName) - indexDoc(dataStreamName, "2", testDoc) - response = executeMonitor(monitor.id) - output = entityAsMap(response) - searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - - deleteDataStream(dataStreamName) - } - - fun `test document-level monitor when datastreams contain docs across read-only indices that do match query`() { - val dataStreamName = "test-datastream" - createDataStream( - dataStreamName, - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent(), - false - ) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) - - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - ) - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "@timestamp": "$testTime", - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - indexDoc(dataStreamName, "1", testDoc) - var response = executeMonitor(monitor.id) - var output = entityAsMap(response) - var searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - var matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - - indexDoc(dataStreamName, "2", testDoc) - rolloverDatastream(dataStreamName) - rolloverDatastream(dataStreamName) - indexDoc(dataStreamName, "4", testDoc) - rolloverDatastream(dataStreamName) - response = executeMonitor(monitor.id) - output = entityAsMap(response) - searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - - indexDoc(dataStreamName, "5", testDoc) - indexDoc(dataStreamName, "6", testDoc) - response = executeMonitor(monitor.id) - output = entityAsMap(response) - searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - deleteDataStream(dataStreamName) - } - - fun `test document-level monitor when index alias contain docs that do match query`() { - val aliasName = "test-alias" - createIndexAlias( - aliasName, - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent() - ) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf("$aliasName"), listOf(docQuery)) - - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - ) - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "@timestamp": "$testTime", - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - indexDoc(aliasName, "1", testDoc) - var response = executeMonitor(monitor.id) - var output = entityAsMap(response) - var searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - var matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - - rolloverDatastream(aliasName) - indexDoc(aliasName, "2", testDoc) - response = executeMonitor(monitor.id) - output = entityAsMap(response) - searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - - deleteIndexAlias(aliasName) - } - - fun `test document-level monitor when multiple datastreams contain docs across read-only indices that do match query`() { - val dataStreamName1 = "test-datastream1" - createDataStream( - dataStreamName1, - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent(), - false - ) - val dataStreamName2 = "test-datastream2" - createDataStream( - dataStreamName2, - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent(), - false - ) - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "@timestamp": "$testTime", - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - indexDoc(dataStreamName2, "-1", testDoc) - rolloverDatastream(dataStreamName2) - indexDoc(dataStreamName2, "0", testDoc) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf("test-datastream*"), listOf(docQuery)) - - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - ) - - indexDoc(dataStreamName1, "1", testDoc) - indexDoc(dataStreamName2, "1", testDoc) - var response = executeMonitor(monitor.id) - var output = entityAsMap(response) - var searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - var matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 2, matchingDocsToQuery.size) - - indexDoc(dataStreamName1, "2", testDoc) - indexDoc(dataStreamName2, "2", testDoc) - rolloverDatastream(dataStreamName1) - rolloverDatastream(dataStreamName1) - rolloverDatastream(dataStreamName2) - indexDoc(dataStreamName1, "4", testDoc) - indexDoc(dataStreamName2, "4", testDoc) - rolloverDatastream(dataStreamName1) - response = executeMonitor(monitor.id) - output = entityAsMap(response) - searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 4, matchingDocsToQuery.size) - - indexDoc(dataStreamName1, "5", testDoc) - indexDoc(dataStreamName1, "6", testDoc) - indexDoc(dataStreamName2, "5", testDoc) - indexDoc(dataStreamName2, "6", testDoc) - response = executeMonitor(monitor.id) - output = entityAsMap(response) - searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 4, matchingDocsToQuery.size) - deleteDataStream(dataStreamName1) - deleteDataStream(dataStreamName2) - } - - fun `test document-level monitor ignoring old read-only indices for datastreams`() { - val dataStreamName = "test-datastream" - createDataStream( - dataStreamName, - """ - "properties" : { - "test_strict_date_time" : { "type" : "date", "format" : "strict_date_time" }, - "test_field" : { "type" : "keyword" }, - "number" : { "type" : "keyword" } - } - """.trimIndent(), - false - ) - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "@timestamp": "$testTime", - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - indexDoc(dataStreamName, "-1", testDoc) - rolloverDatastream(dataStreamName) - indexDoc(dataStreamName, "0", testDoc) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(dataStreamName), listOf(docQuery)) - - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - ) - - indexDoc(dataStreamName, "1", testDoc) - var response = executeMonitor(monitor.id) - var output = entityAsMap(response) - var searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - var matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - - rolloverDatastream(dataStreamName) - indexDoc(dataStreamName, "2", testDoc) - response = executeMonitor(monitor.id) - output = entityAsMap(response) - searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - matchingDocsToQuery = searchResult[docQuery.id] as List - assertEquals("Incorrect search result", 1, matchingDocsToQuery.size) - - deleteDataStream(dataStreamName) - } - - fun `test execute monitor with non-null data sources`() { - - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val alertCategories = AlertCategory.values() - val actionExecutionScope = PerAlertActionScope( - actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() - ) - val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) - val actions = (0..randomInt(10)).map { - randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - actionExecutionPolicy = actionExecutionPolicy - ) - } - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) - try { - createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - findingsIndex = "custom_findings_index", - alertsIndex = "custom_alerts_index", - ) - ) - ) - fail("Expected create monitor to fail") - } catch (e: ResponseException) { - assertTrue(e.message!!.contains("illegal_argument_exception")) - } - } - - fun `test execute monitor with indices removed after first run`() { - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val index1 = createTestIndex() - val index2 = createTestIndex() - val index4 = createTestIndex() - val index5 = createTestIndex() - - val docQuery = DocLevelQuery(query = "\"us-west-2\"", name = "3", fields = listOf()) - var docLevelInput = DocLevelMonitorInput("description", listOf(index1, index2, index4, index5), listOf(docQuery)) - - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - ) - - indexDoc(index1, "1", testDoc) - indexDoc(index2, "1", testDoc) - indexDoc(index4, "1", testDoc) - indexDoc(index5, "1", testDoc) - - var response = executeMonitor(monitor.id) - - var output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - - assertEquals(1, output.objectMap("trigger_results").values.size) - deleteIndex(index1) - deleteIndex(index2) - - indexDoc(index4, "2", testDoc) - response = executeMonitor(monitor.id) - - output = entityAsMap(response) - assertEquals(1, output.objectMap("trigger_results").values.size) - } - - @Suppress("UNCHECKED_CAST") - /** helper that returns a field in a json map whose values are all json objects */ - private fun Map.objectMap(key: String): Map> { - return this[key] as Map> - } - - fun `test execute monitor with non-null owner`() { - - val testIndex = createTestIndex() - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val alertCategories = AlertCategory.values() - val actionExecutionScope = PerAlertActionScope( - actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet() - ) - val actionExecutionPolicy = ActionExecutionPolicy(actionExecutionScope) - val actions = (0..randomInt(10)).map { - randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - actionExecutionPolicy = actionExecutionPolicy - ) - } - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN, actions = actions) - try { - createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - owner = "owner" - ) - ) - fail("Expected create monitor to fail") - } catch (e: ResponseException) { - assertTrue(e.message!!.contains("illegal_argument_exception")) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt b/alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt deleted file mode 100644 index d3f166b13..000000000 --- a/alerting/bin/test/org/opensearch/alerting/MonitorDataSourcesIT.kt +++ /dev/null @@ -1,5923 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.junit.Assert -import org.opensearch.action.DocWriteRequest -import org.opensearch.action.admin.cluster.state.ClusterStateRequest -import org.opensearch.action.admin.indices.alias.Alias -import org.opensearch.action.admin.indices.close.CloseIndexRequest -import org.opensearch.action.admin.indices.create.CreateIndexRequest -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexResponse -import org.opensearch.action.admin.indices.mapping.get.GetMappingsRequest -import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest -import org.opensearch.action.admin.indices.open.OpenIndexRequest -import org.opensearch.action.admin.indices.refresh.RefreshRequest -import org.opensearch.action.bulk.BulkRequest -import org.opensearch.action.bulk.BulkResponse -import org.opensearch.action.fieldcaps.FieldCapabilitiesRequest -import org.opensearch.action.index.IndexRequest -import org.opensearch.action.search.SearchRequest -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.alerting.model.DocumentLevelTriggerRunResult -import org.opensearch.alerting.model.WorkflowMetadata -import org.opensearch.alerting.transport.AlertingSingleNodeTestCase -import org.opensearch.alerting.util.AlertingException -import org.opensearch.alerting.util.DocLevelMonitorQueries -import org.opensearch.alerting.util.DocLevelMonitorQueries.Companion.INDEX_PATTERN_SUFFIX -import org.opensearch.alerting.workflow.CompositeWorkflowRunner -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.action.AcknowledgeAlertRequest -import org.opensearch.commons.alerting.action.AcknowledgeAlertResponse -import org.opensearch.commons.alerting.action.AcknowledgeChainedAlertRequest -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.DeleteMonitorRequest -import org.opensearch.commons.alerting.action.GetAlertsRequest -import org.opensearch.commons.alerting.action.GetAlertsResponse -import org.opensearch.commons.alerting.action.IndexMonitorResponse -import org.opensearch.commons.alerting.action.SearchMonitorRequest -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.ChainedMonitorFindings -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.Delegate -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.IntervalSchedule -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.DOC_LEVEL_QUERIES_INDEX -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.Table -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.mapper.MapperService -import org.opensearch.index.query.MatchQueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.query.TermQueryBuilder -import org.opensearch.rest.RestRequest -import org.opensearch.script.Script -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder -import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter -import java.time.temporal.ChronoUnit -import java.time.temporal.ChronoUnit.MILLIS -import java.util.Collections -import java.util.Map -import java.util.UUID -import java.util.concurrent.ExecutionException -import java.util.concurrent.TimeUnit -import java.util.stream.Collectors - -class MonitorDataSourcesIT : AlertingSingleNodeTestCase() { - - fun `test execute monitor with dryrun`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, true) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 0) - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 0) - try { - client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, "wrong_alert_index")) - .get() - fail() - } catch (e: Exception) { - Assert.assertTrue(e.message!!.contains("IndexNotFoundException")) - } - } - - fun `test execute monitor with custom alerts index`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(alertsIndex = customAlertsIndex) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - val alerts = searchAlerts(id, customAlertsIndex) - assertEquals("Alert saved for test monitor", 1, alerts.size) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - val alertId = getAlertsResponse.alerts.get(0).id - val acknowledgeAlertResponse = client().execute( - AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, - AcknowledgeAlertRequest(id, listOf(alertId), WriteRequest.RefreshPolicy.IMMEDIATE) - ).get() - Assert.assertEquals(acknowledgeAlertResponse.acknowledged.size, 1) - } - - fun `test mappings parsing`() { - - val index1 = "index_123" - val index2 = "index_456" - val index3 = "index_789" - val index4 = "index_012" - val q1 = DocLevelQuery(query = "properties:\"abcd\"", name = "1", fields = listOf()) - val q2 = DocLevelQuery(query = "type.properties:\"abcd\"", name = "2", fields = listOf()) - val q3 = DocLevelQuery(query = "type.something.properties:\"abcd\"", name = "3", fields = listOf()) - val q4 = DocLevelQuery(query = "type.something.properties.lastone:\"abcd\"", name = "4", fields = listOf()) - - createIndex(index1, Settings.EMPTY) - createIndex(index2, Settings.EMPTY) - createIndex(index3, Settings.EMPTY) - createIndex(index4, Settings.EMPTY) - - val m1 = """{ - "properties": { - "properties": { - "type": "keyword" - } - } - } - """.trimIndent() - client().admin().indices().putMapping(PutMappingRequest(index1).source(m1, XContentType.JSON)).get() - - val m2 = """{ - "properties": { - "type": { - "properties": { - "properties": { "type": "keyword" } - } - } - } - } - """.trimIndent() - client().admin().indices().putMapping(PutMappingRequest(index2).source(m2, XContentType.JSON)).get() - - val m3 = """{ - "properties": { - "type": { - "properties": { - "something": { - "properties" : { - "properties": { "type": "keyword" } - } - } - } - } - } - } - """.trimIndent() - client().admin().indices().putMapping(PutMappingRequest(index3).source(m3, XContentType.JSON)).get() - - val m4 = """{ - "properties": { - "type": { - "properties": { - "something": { - "properties" : { - "properties": { - "properties": { - "lastone": { "type": "keyword" } - } - } - } - } - } - } - } - } - """.trimIndent() - client().admin().indices().putMapping(PutMappingRequest(index4).source(m4, XContentType.JSON)).get() - - val docLevelInput = DocLevelMonitorInput( - "description", - listOf(index1, index2, index3, index4), - listOf(q1, q2, q3, q4) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - - val testDoc1 = """{ - "properties": "abcd" - }""" - indexDoc(index1, "1", testDoc1) - val testDoc2 = """{ - "type.properties": "abcd" - }""" - indexDoc(index2, "1", testDoc2) - val testDoc3 = """{ - "type.something.properties": "abcd" - }""" - indexDoc(index3, "1", testDoc3) - val testDoc4 = """{ - "type.something.properties.lastone": "abcd" - }""" - indexDoc(index4, "1", testDoc4) - - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - val findings = searchFindings(id, customFindingsIndex) - assertEquals("Findings saved for test monitor", 4, findings.size) - } - - fun `test execute monitor without triggers`() { - val docQuery = DocLevelQuery(query = "eventType:\"login\"", name = "3", fields = listOf()) - - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery) - ) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - - val testDoc = """{ - "eventType" : "login" - }""" - indexDoc(index, "1", testDoc) - - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - // Execute dry run first and expect no alerts or findings - var executeMonitorResponse = executeMonitor(monitor, id, true) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) - searchAlerts(id) - var table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.isEmpty()) - var findings = searchFindings(id, customFindingsIndex) - assertEquals("Findings saved for test monitor", 0, findings.size) - - // Execute real run - expect findings, but no alerts - executeMonitorResponse = executeMonitor(monitor, id, false) - - searchAlerts(id) - table = Table("asc", "id", null, 1, 0, "") - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.isEmpty()) - - findings = searchFindings(id, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertEquals("Didn't match query", 1, findings[0].docLevelQueries.size) - } - - fun `test execute monitor with custom query index`() { - val q1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val q2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val q3 = DocLevelQuery(query = "source.ip.v4.v0:120", name = "5", fields = listOf()) - val q4 = DocLevelQuery(query = "alias.some.fff:\"us-west-2\"", name = "6", fields = listOf()) - val q5 = DocLevelQuery(query = "message:\"This is an error from IAD region\"", name = "7", fields = listOf()) - val q6 = DocLevelQuery(query = "f1.type.f4:\"hello\"", name = "8", fields = listOf()) - val q7 = DocLevelQuery(query = "f1.type.f2.f3:\"world\"", name = "9", fields = listOf()) - val q8 = DocLevelQuery(query = "type:\"some type\"", name = "10", fields = listOf()) - val q9 = DocLevelQuery(query = "properties:123", name = "11", fields = listOf()) - - val docLevelInput = DocLevelMonitorInput( - "description", - listOf(index), - listOf(q1, q2, q3, q4, q5, q6, q7, q8, q9) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - // Trying to test here few different "nesting" situations and "wierd" characters - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v1" : 12345, - "source.ip.v6.v2" : 16645, - "source.ip.v4.v0" : 120, - "test_bad_char" : "\u0000", - "test_strict_date_time" : "$testTime", - "test_field.some_other_field" : "us-west-2", - "f1.type.f2.f3" : "world", - "f1.type.f4" : "hello", - "type" : "some type", - "properties": 123 - }""" - indexDoc(index, "1", testDoc) - client().admin().indices().putMapping( - PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") - ) - val mappings = "{\"properties\":{\"type\":{\"type\":\"text\",\"fields\":{\"keyword\":{\"type\":\"keyword\"," + - "\"ignore_above\":256}}},\"query\":{\"type\":\"text\"}}}" - val mappingsResp = client().admin().indices().putMapping( - PutMappingRequest(index).source(mappings, XContentType.JSON) - ).get() - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - val findings = searchFindings(id, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertEquals("Didn't match all 9 queries", 9, findings[0].docLevelQueries.size) - } - - fun `test execute monitor with non-flattened json doc as source`() { - val docQuery1 = DocLevelQuery(query = "source.device.port:12345 OR source.device.hwd.id:12345", name = "3", fields = listOf()) - - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - - val mappings = """{ - "properties": { - "source.device.port": { "type": "long" }, - "source.device.hwd.id": { "type": "long" }, - "nested_field": { - "type": "nested", - "properties": { - "test1": { - "type": "keyword" - } - } - }, - "my_join_field": { - "type": "join", - "relations": { - "question": "answer" - } - } - } - }""" - - client().admin().indices().putMapping(PutMappingRequest(index).source(mappings, XContentType.JSON)).get() - val getFieldCapabilitiesResp = client().fieldCaps(FieldCapabilitiesRequest().indices(index).fields("*")).get() - assertTrue(getFieldCapabilitiesResp.getField("source").containsKey("object")) - assertTrue(getFieldCapabilitiesResp.getField("source.device").containsKey("object")) - assertTrue(getFieldCapabilitiesResp.getField("source.device.hwd").containsKey("object")) - // testing both, nested and flatten documents - val testDocuments = mutableListOf() - testDocuments += """{ - "source" : { "device": {"port" : 12345 } }, - "nested_field": { "test1": "some text" } - }""" - testDocuments += """{ - "source.device.port" : "12345" - }""" - testDocuments += """{ - "source.device.port" : 12345 - }""" - testDocuments += """{ - "source" : { "device": {"hwd": { "id": 12345 } } } - }""" - testDocuments += """{ - "source.device.hwd.id" : 12345 - }""" - // Document with join field - testDocuments += """{ - "source" : { "device" : { "hwd": { "id" : 12345 } } }, - "my_join_field": { "name": "question" } - }""" - // Checking if these pointless but valid documents cause any issues - testDocuments += """{ - "source" : {} - }""" - testDocuments += """{ - "source.device" : null - }""" - testDocuments += """{ - "source.device" : {} - }""" - testDocuments += """{ - "source.device.hwd" : {} - }""" - testDocuments += """{ - "source.device.hwd.id" : null - }""" - testDocuments += """{ - "some.multi.val.field" : [12345, 10, 11] - }""" - // Insert all documents - for (i in testDocuments.indices) { - indexDoc(index, "$i", testDocuments[i]) - } - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - val findings = searchFindings(id, customFindingsIndex) - assertEquals("Findings saved for test monitor", 6, findings.size) - assertEquals("Didn't match query", 1, findings[0].docLevelQueries.size) - } - - fun `test execute monitor with custom query index old`() { - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docQuery3 = DocLevelQuery(query = "source.ip.v4.v0:120", name = "5", fields = listOf()) - val docQuery4 = DocLevelQuery(query = "alias.some.fff:\"us-west-2\"", name = "6", fields = listOf()) - val docQuery5 = DocLevelQuery(query = "message:\"This is an error from IAD region\"", name = "7", fields = listOf()) - val docQuery6 = DocLevelQuery(query = "type.subtype:\"some subtype\"", name = "8", fields = listOf()) - val docQuery7 = DocLevelQuery(query = "supertype.type:\"some type\"", name = "9", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1, docQuery2, docQuery3, docQuery4, docQuery5, docQuery6, docQuery7) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - // Trying to test here few different "nesting" situations and "wierd" characters - val testDoc = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v1" : 12345, - "source.ip.v6.v2" : 16645, - "source.ip.v4.v0" : 120, - "test_bad_char" : "\u0000", - "test_strict_date_time" : "$testTime", - "test_field.some_other_field" : "us-west-2", - "type.subtype" : "some subtype", - "supertype.type" : "some type" - }""" - indexDoc(index, "1", testDoc) - client().admin().indices().putMapping( - PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") - ) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - val findings = searchFindings(id, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertEquals("Didn't match all 7 queries", 7, findings[0].docLevelQueries.size) - } - - fun `test monitor error alert created and updated with new error`() { - val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val testDoc = """{ - "message" : "This is an error from IAD region" - }""" - - val monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - - // Close index to force error alert - client().admin().indices().close(CloseIndexRequest(index)).get() - - var executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) - searchAlerts(id) - var table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") - // Reopen index - client().admin().indices().open(OpenIndexRequest(index)).get() - // Close queryIndex - client().admin().indices().close(CloseIndexRequest(DOC_LEVEL_QUERIES_INDEX + INDEX_PATTERN_SUFFIX)).get() - - indexDoc(index, "1", testDoc) - - executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) - searchAlerts(id) - table = Table("asc", "id", null, 10, 0, "") - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - Assert.assertTrue(getAlertsResponse.alerts[0].errorHistory[0].message == "IndexClosedException[closed]") - Assert.assertEquals(1, getAlertsResponse.alerts[0].errorHistory.size) - Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Failed to run percolate search")) - } - - fun `test monitor error alert created trigger run errored 2 times same error`() { - val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery) - ) - val trigger = randomDocumentLevelTrigger(condition = Script("invalid script code")) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - - var executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - var table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Trigger errors")) - - val oldLastNotificationTime = getAlertsResponse.alerts[0].lastNotificationTime - - executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - table = Table("asc", "id", null, 10, 0, "") - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - Assert.assertEquals(0, getAlertsResponse.alerts[0].errorHistory.size) - Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage!!.contains("Trigger errors")) - Assert.assertTrue(getAlertsResponse.alerts[0].lastNotificationTime!!.isAfter(oldLastNotificationTime)) - } - - fun `test monitor error alert cleared after successful monitor run`() { - val customAlertIndex = "custom-alert-index" - val customAlertHistoryIndex = "custom-alert-history-index" - val customAlertHistoryIndexPattern = "" - val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - alertsIndex = customAlertIndex, - alertsHistoryIndex = customAlertHistoryIndex, - alertsHistoryIndexPattern = customAlertHistoryIndexPattern - ) - ) - - val monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - - // Close index to force error alert - client().admin().indices().close(CloseIndexRequest(index)).get() - - var executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) - searchAlerts(id) - var table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertEquals(1, getAlertsResponse.alerts.size) - Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") - Assert.assertNull(getAlertsResponse.alerts[0].endTime) - - // Open index to have monitor run successfully - client().admin().indices().open(OpenIndexRequest(index)).get() - // Execute monitor again and expect successful run - executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - // Verify that alert is moved to history index - table = Table("asc", "id", null, 10, 0, "") - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertEquals(0, getAlertsResponse.alerts.size) - - table = Table("asc", "id", null, 10, 0, "") - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, customAlertHistoryIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertEquals(1, getAlertsResponse.alerts.size) - Assert.assertTrue(getAlertsResponse.alerts[0].errorMessage == "IndexClosedException[closed]") - Assert.assertNotNull(getAlertsResponse.alerts[0].endTime) - } - - fun `test multiple monitor error alerts cleared after successful monitor run`() { - val customAlertIndex = "custom-alert-index" - val customAlertHistoryIndex = "custom-alert-history-index" - val customAlertHistoryIndexPattern = "" - val docQuery = DocLevelQuery(query = "source:12345", name = "1", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - alertsIndex = customAlertIndex, - alertsHistoryIndex = customAlertHistoryIndex, - alertsHistoryIndexPattern = customAlertHistoryIndexPattern - ) - ) - - val monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - - monitor = monitorResponse!!.monitor - val monitorId = monitorResponse.id - - // Close index to force error alert - client().admin().indices().close(CloseIndexRequest(index)).get() - - var executeMonitorResponse = executeMonitor(monitor, monitorId, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 0) - // Create 10 old alerts to simulate having "old error alerts"(2.6) - for (i in 1..10) { - val startTimestamp = Instant.now().minusSeconds(3600 * 24 * i.toLong()).toEpochMilli() - val oldErrorAlertAsString = """ - {"id":"$i","version":-1,"monitor_id":"$monitorId", - "schema_version":4,"monitor_version":1,"monitor_name":"geCNcHKTlp","monitor_user":{"name":"","backend_roles":[], - "roles":[],"custom_attribute_names":[],"user_requested_tenant":null},"trigger_id":"_nnk_YcB5pHgSZwYwO2r", - "trigger_name":"NoOp trigger","finding_ids":[],"related_doc_ids":[],"state":"ERROR","error_message":"some monitor error", - "alert_history":[],"severity":"","action_execution_results":[], - "start_time":$startTimestamp,"last_notification_time":$startTimestamp,"end_time":null,"acknowledged_time":null} - """.trimIndent() - - client().index( - IndexRequest(customAlertIndex) - .id("$i") - .routing(monitorId) - .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .source(oldErrorAlertAsString, XContentType.JSON) - ).get() - } - var table = Table("asc", "id", null, 1000, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertIndex)) - .get() - - Assert.assertTrue(getAlertsResponse != null) - Assert.assertEquals(1 + 10, getAlertsResponse.alerts.size) - val newErrorAlert = getAlertsResponse.alerts.firstOrNull { it.errorMessage == "IndexClosedException[closed]" } - Assert.assertNotNull(newErrorAlert) - Assert.assertNull(newErrorAlert!!.endTime) - - // Open index to have monitor run successfully - client().admin().indices().open(OpenIndexRequest(index)).get() - // Execute monitor again and expect successful run - executeMonitorResponse = executeMonitor(monitor, monitorId, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - // Verify that alert is moved to history index - table = Table("asc", "id", null, 1000, 0, "") - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertEquals(0, getAlertsResponse.alerts.size) - - table = Table("asc", "id", null, 1000, 0, "") - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, customAlertHistoryIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertEquals(11, getAlertsResponse.alerts.size) - getAlertsResponse.alerts.forEach { alert -> assertNotNull(alert.endTime) } - } - - fun `test execute monitor with custom query index and nested mappings`() { - val docQuery1 = DocLevelQuery(query = "message:\"msg 1 2 3 4\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - - // We are verifying here that index with nested mappings and nested aliases - // won't break query matching - - // Create index mappings - val m: MutableMap = HashMap() - val m1: MutableMap = HashMap() - m1["title"] = Map.of("type", "text") - m1["category"] = Map.of("type", "keyword") - m["rule"] = Map.of("type", "nested", "properties", m1) - val properties = Map.of("properties", m) - - client().admin().indices().putMapping( - PutMappingRequest( - index - ).source(properties) - ).get() - - // Put alias for nested fields - val mm: MutableMap = HashMap() - val mm1: MutableMap = HashMap() - mm1["title_alias"] = Map.of("type", "alias", "path", "rule.title") - mm["rule"] = Map.of("type", "nested", "properties", mm1) - val properties1 = Map.of("properties", mm) - client().admin().indices().putMapping( - PutMappingRequest( - index - ).source(properties1) - ).get() - - val testDoc = """{ - "rule": {"title": "some_title"}, - "message": "msg 1 2 3 4" - }""" - indexDoc(index, "2", testDoc) - - client().admin().indices().putMapping( - PutMappingRequest(index).source("alias.some.fff", "type=alias,path=test_field.some_other_field") - ) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - val findings = searchFindings(id, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) - assertEquals("Didn't match all 4 queries", 1, findings[0].docLevelQueries.size) - } - - fun `test cleanup monitor on partial create monitor failure`() { - val docQuery = DocLevelQuery(query = "dnbkjndsfkjbnds:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customQueryIndex = "custom_alerts_index" - val analyzer = "dfbdfbafd" - val testDoc = """{ - "rule": {"title": "some_title"}, - "message": "msg 1 2 3 4" - }""" - indexDoc(index, "2", testDoc) - client().admin().indices() - .create( - CreateIndexRequest(customQueryIndex + "-000001").alias(Alias(customQueryIndex)) - .mapping( - """ - { - "_meta": { - "schema_version": 1 - }, - "properties": { - "query": { - "type": "percolator_ext" - }, - "monitor_id": { - "type": "text" - }, - "index": { - "type": "text" - } - } - } - """.trimIndent() - ) - ).get() - - client().admin().indices().close(CloseIndexRequest(customQueryIndex + "-000001")).get() - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), - ) - ) - try { - createMonitor(monitor) - fail("monitor creation should fail due to incorrect analyzer name in test setup") - } catch (e: Exception) { - Assert.assertEquals(client().search(SearchRequest(SCHEDULED_JOBS_INDEX)).get().hits.hits.size, 0) - } - } - - fun `test execute monitor without create when no monitors exists`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customQueryIndex = "custom_alerts_index" - val analyzer = "whitespace" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), - ) - ) - var executeMonitorResponse = executeMonitor(monitor, null) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - assertIndexNotExists(SCHEDULED_JOBS_INDEX) - - val createMonitorResponse = createMonitor(monitor) - - assertIndexExists(SCHEDULED_JOBS_INDEX) - - indexDoc(index, "1", testDoc) - - executeMonitorResponse = executeMonitor(monitor, createMonitorResponse?.id, dryRun = false) - - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - Assert.assertEquals( - (executeMonitorResponse.monitorRunResult.triggerResults.iterator().next().value as DocumentLevelTriggerRunResult) - .triggeredDocs.size, - 1 - ) - } - - fun `test execute monitor with custom query index and custom field mappings`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customQueryIndex = "custom_alerts_index" - val analyzer = "whitespace" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), - ) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - val mapping = client().admin().indices().getMappings(GetMappingsRequest().indices(customQueryIndex)).get() - Assert.assertTrue(mapping.toString().contains("\"analyzer\":\"$analyzer\"")) - } - - fun `test delete monitor deletes all queries and metadata too`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customQueryIndex = "custom_query_index" - val analyzer = "whitespace" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - queryIndexMappingsByType = mapOf(Pair("text", mapOf(Pair("analyzer", analyzer)))), - ) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val monitorId = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, monitorId, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(monitorId) - val clusterStateResponse = client().admin().cluster().state(ClusterStateRequest().indices(customQueryIndex).metadata(true)).get() - val mapping = client().admin().indices().getMappings(GetMappingsRequest().indices(customQueryIndex)).get() - Assert.assertTrue(mapping.toString().contains("\"analyzer\":\"$analyzer\"") == true) - // Verify queries exist - var searchResponse = client().search( - SearchRequest(customQueryIndex).source(SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) - ).get() - assertNotEquals(0, searchResponse.hits.hits.size) - - deleteMonitor(monitorId) - assertIndexNotExists(customQueryIndex + "*") - assertAliasNotExists(customQueryIndex) - } - - fun `test execute monitor with custom findings index and pattern`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) - ) - val monitorResponse = createMonitor(monitor) - client().admin().indices().refresh(RefreshRequest("*")) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val id = monitorResponse.id - var executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - - var findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - - indexDoc(index, "2", testDoc) - executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 2, findings.size) - assertTrue("Findings saved for test monitor", findings[1].relatedDocIds.contains("2")) - - val indices = getAllIndicesFromPattern("custom_findings_index*") - Assert.assertTrue(indices.isNotEmpty()) - } - - fun `test execute monitor with multiple indices in input success`() { - - val testSourceIndex1 = "test_source_index1" - val testSourceIndex2 = "test_source_index2" - - createIndex(testSourceIndex1, Settings.EMPTY) - createIndex(testSourceIndex2, Settings.EMPTY) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) - ) - val monitorResponse = createMonitor(monitor) - client().admin().indices().refresh(RefreshRequest("*")) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - - indexDoc(testSourceIndex1, "1", testDoc) - indexDoc(testSourceIndex2, "1", testDoc) - - val id = monitorResponse.id - var executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - - var findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 2, findings.size) - var foundFindings = findings.filter { it.relatedDocIds.contains("1") } - assertEquals("Didn't find 2 findings", 2, foundFindings.size) - - indexDoc(testSourceIndex1, "2", testDoc) - indexDoc(testSourceIndex2, "2", testDoc) - executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 4, findings.size) - foundFindings = findings.filter { it.relatedDocIds.contains("2") } - assertEquals("Didn't find 2 findings", 2, foundFindings.size) - - val indices = getAllIndicesFromPattern("custom_findings_index*") - Assert.assertTrue(indices.isNotEmpty()) - } - - fun `test execute monitor with multiple indices in input first index gets deleted`() { - // Index #1 does not exist - val testSourceIndex1 = "test_source_index1" - val testSourceIndex2 = "test_source_index2" - - createIndex(testSourceIndex1, Settings.EMPTY) - createIndex(testSourceIndex2, Settings.EMPTY) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) - ) - val monitorResponse = createMonitor(monitor) - client().admin().indices().refresh(RefreshRequest("*")) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - - indexDoc(testSourceIndex2, "1", testDoc) - - client().admin().indices().delete(DeleteIndexRequest(testSourceIndex1)).get() - - val id = monitorResponse.id - var executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - - var findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 1, findings.size) - var foundFindings = findings.filter { it.relatedDocIds.contains("1") } - assertEquals("Didn't find 2 findings", 1, foundFindings.size) - - indexDoc(testSourceIndex2, "2", testDoc) - executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 2, findings.size) - foundFindings = findings.filter { it.relatedDocIds.contains("2") } - assertEquals("Didn't find 2 findings", 1, foundFindings.size) - - val indices = getAllIndicesFromPattern("custom_findings_index*") - Assert.assertTrue(indices.isNotEmpty()) - } - - fun `test execute monitor with multiple indices in input second index gets deleted`() { - // Second index does not exist - val testSourceIndex1 = "test_source_index1" - val testSourceIndex2 = "test_source_index2" - - createIndex(testSourceIndex1, Settings.EMPTY) - createIndex(testSourceIndex2, Settings.EMPTY) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1, testSourceIndex2), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex, findingsIndexPattern = customFindingsIndexPattern) - ) - val monitorResponse = createMonitor(monitor) - client().admin().indices().refresh(RefreshRequest("*")) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - - indexDoc(testSourceIndex1, "1", testDoc) - - client().admin().indices().delete(DeleteIndexRequest(testSourceIndex2)).get() - - val id = monitorResponse.id - var executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - - var findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 1, findings.size) - var foundFindings = findings.filter { it.relatedDocIds.contains("1") } - assertEquals("Didn't find 2 findings", 1, foundFindings.size) - - indexDoc(testSourceIndex1, "2", testDoc) - - executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(id) - findings = searchFindings(id, "custom_findings_index*", true) - assertEquals("Findings saved for test monitor", 2, findings.size) - foundFindings = findings.filter { it.relatedDocIds.contains("2") } - assertEquals("Didn't find 2 findings", 1, foundFindings.size) - - val indices = getAllIndicesFromPattern("custom_findings_index*") - Assert.assertTrue(indices.isNotEmpty()) - } - - fun `test execute pre-existing monitor and update`() { - val request = CreateIndexRequest(SCHEDULED_JOBS_INDEX).mapping(ScheduledJobIndices.scheduledJobMappings()) - .settings(Settings.builder().put("index.hidden", true).build()) - client().admin().indices().create(request) - val monitorStringWithoutName = """ - { - "monitor": { - "type": "monitor", - "schema_version": 0, - "name": "UayEuXpZtb", - "monitor_type": "doc_level_monitor", - "user": { - "name": "", - "backend_roles": [], - "roles": [], - "custom_attribute_names": [], - "user_requested_tenant": null - }, - "enabled": true, - "enabled_time": 1662753436791, - "schedule": { - "period": { - "interval": 5, - "unit": "MINUTES" - } - }, - "inputs": [{ - "doc_level_input": { - "description": "description", - "indices": [ - "$index" - ], - "queries": [{ - "id": "63efdcce-b5a1-49f4-a25f-6b5f9496a755", - "name": "3", - "query": "test_field:\"us-west-2\"", - "tags": [] - }] - } - }], - "triggers": [{ - "document_level_trigger": { - "id": "OGnTI4MBv6qt0ATc9Phk", - "name": "mrbHRMevYI", - "severity": "1", - "condition": { - "script": { - "source": "return true", - "lang": "painless" - } - }, - "actions": [] - } - }], - "last_update_time": 1662753436791 - } - } - """.trimIndent() - val monitorId = "abc" - indexDoc(SCHEDULED_JOBS_INDEX, monitorId, monitorStringWithoutName) - val getMonitorResponse = getMonitorResponse(monitorId) - Assert.assertNotNull(getMonitorResponse) - Assert.assertNotNull(getMonitorResponse.monitor) - val monitor = getMonitorResponse.monitor - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - indexDoc(index, "1", testDoc) - var executeMonitorResponse = executeMonitor(monitor!!, monitorId, false) - Assert.assertNotNull(executeMonitorResponse) - if (executeMonitorResponse != null) { - Assert.assertNotNull(executeMonitorResponse.monitorRunResult.monitorName) - } - val alerts = searchAlerts(monitorId) - assertEquals(1, alerts.size) - - val customAlertsIndex = "custom_alerts_index" - val customQueryIndex = "custom_query_index" - Assert.assertFalse(client().admin().cluster().state(ClusterStateRequest()).get().state.routingTable.hasIndex(customQueryIndex)) - val customFindingsIndex = "custom_findings_index" - val updateMonitorResponse = updateMonitor( - monitor.copy( - id = monitorId, - owner = "security_analytics_plugin", - dataSources = DataSources( - alertsIndex = customAlertsIndex, - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex - ) - ), - monitorId - ) - Assert.assertNotNull(updateMonitorResponse) - Assert.assertEquals(updateMonitorResponse!!.monitor.owner, "security_analytics_plugin") - indexDoc(index, "2", testDoc) - if (updateMonitorResponse != null) { - executeMonitorResponse = executeMonitor(updateMonitorResponse.monitor, monitorId, false) - } - val findings = searchFindings(monitorId, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) - val customAlertsIndexAlerts = searchAlerts(monitorId, customAlertsIndex) - assertEquals("Alert saved for test monitor", 1, customAlertsIndexAlerts.size) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", monitorId, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - val searchRequest = SearchRequest(SCHEDULED_JOBS_INDEX) - var searchMonitorResponse = - client().execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, SearchMonitorRequest(searchRequest)) - .get() - Assert.assertEquals(searchMonitorResponse.hits.hits.size, 0) - searchRequest.source().query(MatchQueryBuilder("monitor.owner", "security_analytics_plugin")) - searchMonitorResponse = - client().execute(AlertingActions.SEARCH_MONITORS_ACTION_TYPE, SearchMonitorRequest(searchRequest)) - .get() - Assert.assertEquals(searchMonitorResponse.hits.hits.size, 1) - } - - fun `test execute pre-existing monitor without triggers`() { - val request = CreateIndexRequest(SCHEDULED_JOBS_INDEX).mapping(ScheduledJobIndices.scheduledJobMappings()) - .settings(Settings.builder().put("index.hidden", true).build()) - client().admin().indices().create(request) - val monitorStringWithoutName = """ - { - "monitor": { - "type": "monitor", - "schema_version": 0, - "name": "UayEuXpZtb", - "monitor_type": "doc_level_monitor", - "user": { - "name": "", - "backend_roles": [], - "roles": [], - "custom_attribute_names": [], - "user_requested_tenant": null - }, - "enabled": true, - "enabled_time": 1662753436791, - "schedule": { - "period": { - "interval": 5, - "unit": "MINUTES" - } - }, - "inputs": [{ - "doc_level_input": { - "description": "description", - "indices": [ - "$index" - ], - "queries": [{ - "id": "63efdcce-b5a1-49f4-a25f-6b5f9496a755", - "name": "3", - "query": "test_field:\"us-west-2\"", - "tags": [] - }] - } - }], - "triggers": [], - "last_update_time": 1662753436791 - } - } - """.trimIndent() - val monitorId = "abc" - indexDoc(SCHEDULED_JOBS_INDEX, monitorId, monitorStringWithoutName) - val getMonitorResponse = getMonitorResponse(monitorId) - Assert.assertNotNull(getMonitorResponse) - Assert.assertNotNull(getMonitorResponse.monitor) - val monitor = getMonitorResponse.monitor - - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - indexDoc(index, "1", testDoc) - var executeMonitorResponse = executeMonitor(monitor!!, monitorId, false) - Assert.assertNotNull(executeMonitorResponse) - if (executeMonitorResponse != null) { - Assert.assertNotNull(executeMonitorResponse.monitorRunResult.monitorName) - } - val alerts = searchAlerts(monitorId) - assertEquals(0, alerts.size) - - val findings = searchFindings(monitorId) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - } - - fun `test execute monitor with empty source index`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - - val monitorId = monitorResponse.id - var executeMonitorResponse = executeMonitor(monitor, monitorId, false) - - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - - refreshIndex(customFindingsIndex) - - var findings = searchFindings(monitorId, customFindingsIndex) - assertEquals("Findings saved for test monitor", 0, findings.size) - - indexDoc(index, "1", testDoc) - - executeMonitor(monitor, monitorId, false) - - refreshIndex(customFindingsIndex) - - findings = searchFindings(monitorId, customFindingsIndex) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - } - - fun `test execute GetFindingsAction with monitorId param`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val monitorId = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, monitorId, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(monitorId) - val findings = searchFindings(monitorId, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - // fetch findings - pass monitorId as reference to finding_index - val findingsFromAPI = getFindings(findings.get(0).id, monitorId, null) - assertEquals( - "Findings mismatch between manually searched and fetched via GetFindingsAction", - findings.get(0).id, - findingsFromAPI.get(0).id - ) - } - - fun `test execute GetFindingsAction with unknown monitorId`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val monitorId = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, monitorId, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(monitorId) - val findings = searchFindings(monitorId, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - // fetch findings - don't send monitorId or findingIndexName. It should fall back to hardcoded finding index name - try { - getFindings(findings.get(0).id, "unknown_monitor_id_123456789", null) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetMonitor Action error ", - it.contains("Monitor not found") - ) - } - } - } - - fun `test execute monitor with owner field`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex = "custom_alerts_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(alertsIndex = customAlertsIndex), - owner = "owner" - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - Assert.assertEquals(monitor.owner, "owner") - indexDoc(index, "1", testDoc) - val id = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - val alerts = searchAlerts(id, customAlertsIndex) - assertEquals("Alert saved for test monitor", 1, alerts.size) - val table = Table("asc", "id", null, 1, 0, "") - var getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", null, customAlertsIndex)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - getAlertsResponse = client() - .execute(AlertingActions.GET_ALERTS_ACTION_TYPE, GetAlertsRequest(table, "ALL", "ALL", id, null)) - .get() - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 1) - } - - fun `test execute GetFindingsAction with unknown findingIndex param`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources(findingsIndex = customFindingsIndex) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val monitorId = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, monitorId, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - searchAlerts(monitorId) - val findings = searchFindings(monitorId, customFindingsIndex) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - // fetch findings - don't send monitorId or findingIndexName. It should fall back to hardcoded finding index name - try { - getFindings(findings.get(0).id, null, "unknown_finding_index_123456789") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetMonitor Action error ", - it.contains("no such index") - ) - } - } - } - - fun `test search custom alerts history index`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex = "custom_alerts_index" - val customAlertsHistoryIndex = "custom_alerts_history_index" - val customAlertsHistoryIndexPattern = "" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger1, trigger2), - dataSources = DataSources( - alertsIndex = customAlertsIndex, - alertsHistoryIndex = customAlertsHistoryIndex, - alertsHistoryIndexPattern = customAlertsHistoryIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val monitorId = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, monitorId, false) - var alertsBefore = searchAlerts(monitorId, customAlertsIndex) - Assert.assertEquals(2, alertsBefore.size) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) - // Remove 1 trigger from monitor to force moveAlerts call to move alerts to history index - monitor = monitor.copy(triggers = listOf(trigger1)) - updateMonitor(monitor, monitorId) - - var alerts = listOf() - OpenSearchTestCase.waitUntil({ - alerts = searchAlerts(monitorId, customAlertsHistoryIndex) - if (alerts.size == 1) { - return@waitUntil true - } - return@waitUntil false - }, 30, TimeUnit.SECONDS) - assertEquals("Alerts from custom history index", 1, alerts.size) - } - - fun `test search custom alerts history index after alert ack`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex = "custom_alerts_index" - val customAlertsHistoryIndex = "custom_alerts_history_index" - val customAlertsHistoryIndexPattern = "" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger1, trigger2), - dataSources = DataSources( - alertsIndex = customAlertsIndex, - alertsHistoryIndex = customAlertsHistoryIndex, - alertsHistoryIndexPattern = customAlertsHistoryIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - indexDoc(index, "1", testDoc) - val monitorId = monitorResponse.id - val executeMonitorResponse = executeMonitor(monitor, monitorId, false) - var alertsBefore = searchAlerts(monitorId, customAlertsIndex) - Assert.assertEquals(2, alertsBefore.size) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 2) - - var alerts = listOf() - OpenSearchTestCase.waitUntil({ - alerts = searchAlerts(monitorId, customAlertsIndex) - if (alerts.size == 1) { - return@waitUntil true - } - return@waitUntil false - }, 30, TimeUnit.SECONDS) - assertEquals("Alerts from custom index", 2, alerts.size) - - val ackReq = AcknowledgeAlertRequest(monitorId, alerts.map { it.id }.toMutableList(), WriteRequest.RefreshPolicy.IMMEDIATE) - client().execute(AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, ackReq).get() - - // verify alerts moved from alert index to alert history index - alerts = listOf() - OpenSearchTestCase.waitUntil({ - alerts = searchAlerts(monitorId, customAlertsHistoryIndex) - if (alerts.size == 1) { - return@waitUntil true - } - return@waitUntil false - }, 30, TimeUnit.SECONDS) - assertEquals("Alerts from custom history index", 2, alerts.size) - - // verify alerts deleted from alert index - alerts = listOf() - OpenSearchTestCase.waitUntil({ - alerts = searchAlerts(monitorId, customAlertsIndex) - if (alerts.size == 1) { - return@waitUntil true - } - return@waitUntil false - }, 30, TimeUnit.SECONDS) - assertEquals("Alerts from custom history index", 0, alerts.size) - } - - fun `test get alerts by list of monitors containing both existent and non-existent ids`() { - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - ) - val monitorResponse = createMonitor(monitor) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_strict_date_time" : "$testTime", - "test_field" : "us-west-2" - }""" - - monitor = monitorResponse!!.monitor - - val id = monitorResponse.id - - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - ) - val monitorResponse1 = createMonitor(monitor1) - monitor1 = monitorResponse1!!.monitor - val id1 = monitorResponse1.id - indexDoc(index, "1", testDoc) - executeMonitor(monitor1, id1, false) - executeMonitor(monitor, id, false) - val alerts = searchAlerts(id) - assertEquals("Alert saved for test monitor", 1, alerts.size) - val alerts1 = searchAlerts(id) - assertEquals("Alert saved for test monitor", 1, alerts1.size) - val table = Table("asc", "id", null, 1000, 0, "") - var getAlertsResponse = client() - .execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest(table, "ALL", "ALL", null, null) - ) - .get() - - Assert.assertTrue(getAlertsResponse != null) - Assert.assertTrue(getAlertsResponse.alerts.size == 2) - - var alertsResponseForRequestWithoutCustomIndex = client() - .execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest(table, "ALL", "ALL", null, null, monitorIds = listOf(id, id1, "1", "2")) - ) - .get() - Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex != null) - Assert.assertTrue(alertsResponseForRequestWithoutCustomIndex.alerts.size == 2) - val alertIds = getAlertsResponse.alerts.stream().map { alert -> alert.id }.collect(Collectors.toList()) - var getAlertsByAlertIds = client() - .execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = alertIds) - ) - .get() - Assert.assertTrue(getAlertsByAlertIds != null) - Assert.assertTrue(getAlertsByAlertIds.alerts.size == 2) - - var getAlertsByWrongAlertIds = client() - .execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest(table, "ALL", "ALL", null, null, alertIds = listOf("1", "2")) - ) - .get() - - Assert.assertTrue(getAlertsByWrongAlertIds != null) - Assert.assertEquals(getAlertsByWrongAlertIds.alerts.size, 0) - } - - fun `test queryIndex rollover and delete monitor success`() { - - val testSourceIndex = "test_source_index" - createIndex(testSourceIndex, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - // This doc should create close to 1000 (limit) fields in index mapping. It's easier to add mappings like this then via api - val docPayload: StringBuilder = StringBuilder(100000) - docPayload.append("{") - for (i in 1..3300) { - docPayload.append(""" "id$i.somefield.somefield$i":$i,""") - } - docPayload.append("\"test_field\" : \"us-west-2\" }") - indexDoc(testSourceIndex, "1", docPayload.toString()) - // Create monitor #1 - var monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - // Execute monitor #1 - var executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - // Create monitor #2 - var monitorResponse2 = createMonitor(monitor) - assertFalse(monitorResponse2?.id.isNullOrEmpty()) - monitor = monitorResponse2!!.monitor - // Insert doc #2. This one should trigger creation of alerts during monitor exec - val testDoc = """{ - "test_field" : "us-west-2" - }""" - indexDoc(testSourceIndex, "2", testDoc) - // Execute monitor #2 - var executeMonitorResponse2 = executeMonitor(monitor, monitorResponse2.id, false) - Assert.assertEquals(executeMonitorResponse2!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse2.monitorRunResult.triggerResults.size, 1) - - refreshIndex(AlertIndices.ALERT_INDEX) - var alerts = searchAlerts(monitorResponse2.id) - Assert.assertTrue(alerts != null) - Assert.assertTrue(alerts.size == 1) - - // Both monitors used same queryIndex alias. Since source index has close to limit amount of fields in mappings, - // we expect that creation of second monitor would trigger rollover of queryIndex - var getIndexResponse: GetIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - assertEquals(2, getIndexResponse.indices.size) - assertEquals(DOC_LEVEL_QUERIES_INDEX + "-000001", getIndexResponse.indices[0]) - assertEquals(DOC_LEVEL_QUERIES_INDEX + "-000002", getIndexResponse.indices[1]) - // Now we'll verify that execution of both monitors still works - indexDoc(testSourceIndex, "3", testDoc) - // Exec Monitor #1 - executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - refreshIndex(AlertIndices.ALERT_INDEX) - alerts = searchAlerts(monitorResponse.id) - Assert.assertTrue(alerts != null) - Assert.assertTrue(alerts.size == 2) - // Exec Monitor #2 - executeMonitorResponse = executeMonitor(monitor, monitorResponse2.id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - refreshIndex(AlertIndices.ALERT_INDEX) - alerts = searchAlerts(monitorResponse2.id) - Assert.assertTrue(alerts != null) - Assert.assertTrue(alerts.size == 2) - // Delete monitor #1 - client().execute( - AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorResponse.id, WriteRequest.RefreshPolicy.IMMEDIATE) - ).get() - // Expect first concrete queryIndex to be deleted since that one was only used by this monitor - getIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - assertEquals(1, getIndexResponse.indices.size) - assertEquals(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "-000002", getIndexResponse.indices[0]) - // Delete monitor #2 - client().execute( - AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorResponse2.id, WriteRequest.RefreshPolicy.IMMEDIATE) - ).get() - // Expect second concrete queryIndex to be deleted since that one was only used by this monitor - getIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - assertEquals(0, getIndexResponse.indices.size) - } - - fun `test queryIndex rollover failure source_index field count over limit`() { - - val testSourceIndex = "test_source_index" - createIndex(testSourceIndex, Settings.EMPTY) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - // This doc should create 999 fields in mapping, only 1 field less then limit - val docPayload: StringBuilder = StringBuilder(100000) - docPayload.append("{") - for (i in 1..998) { - docPayload.append(""" "id$i":$i,""") - } - docPayload.append("\"test_field\" : \"us-west-2\" }") - indexDoc(testSourceIndex, "1", docPayload.toString()) - // Create monitor and expect failure. - // queryIndex has 3 fields in mappings initially so 999 + 3 > 1000(default limit) - try { - createMonitor(monitor) - } catch (e: Exception) { - assertTrue(e.message?.contains("can't process index [$testSourceIndex] due to field mapping limit") ?: false) - } - } - - fun `test queryIndex not rolling over multiple monitors`() { - val testSourceIndex = "test_source_index" - createIndex(testSourceIndex, Settings.EMPTY) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - // Create doc with 11 fields - val docPayload: StringBuilder = StringBuilder(1000) - docPayload.append("{") - for (i in 1..10) { - docPayload.append(""" "id$i":$i,""") - } - docPayload.append("\"test_field\" : \"us-west-2\" }") - indexDoc(testSourceIndex, "1", docPayload.toString()) - // Create monitor #1 - var monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - // Execute monitor #1 - var executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - // Create monitor #2 - var monitorResponse2 = createMonitor(monitor) - assertFalse(monitorResponse2?.id.isNullOrEmpty()) - monitor = monitorResponse2!!.monitor - // Insert doc #2. This one should trigger creation of alerts during monitor exec - val testDoc = """{ - "test_field" : "us-west-2" - }""" - indexDoc(testSourceIndex, "2", testDoc) - // Execute monitor #2 - var executeMonitorResponse2 = executeMonitor(monitor, monitorResponse2.id, false) - Assert.assertEquals(executeMonitorResponse2!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse2.monitorRunResult.triggerResults.size, 1) - - refreshIndex(AlertIndices.ALERT_INDEX) - var alerts = searchAlerts(monitorResponse2.id) - Assert.assertTrue(alerts != null) - Assert.assertTrue(alerts.size == 1) - - // Both monitors used same queryIndex. Since source index has well below limit amount of fields in mappings, - // we expect only 1 backing queryIndex - val getIndexResponse: GetIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - assertEquals(1, getIndexResponse.indices.size) - // Now we'll verify that execution of both monitors work - indexDoc(testSourceIndex, "3", testDoc) - // Exec Monitor #1 - executeMonitorResponse = executeMonitor(monitor, monitorResponse.id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - refreshIndex(AlertIndices.ALERT_INDEX) - alerts = searchAlerts(monitorResponse.id) - Assert.assertTrue(alerts != null) - Assert.assertTrue(alerts.size == 2) - // Exec Monitor #2 - executeMonitorResponse = executeMonitor(monitor, monitorResponse2.id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - refreshIndex(AlertIndices.ALERT_INDEX) - alerts = searchAlerts(monitorResponse2.id) - Assert.assertTrue(alerts != null) - Assert.assertTrue(alerts.size == 2) - } - - /** - * 1. Create monitor with input source_index with 9000 fields in mappings - can fit 1 in queryIndex - * 2. Update monitor and change input source_index to a new one with 9000 fields in mappings - * 3. Expect queryIndex rollover resulting in 2 backing indices - * 4. Delete monitor and expect that all backing indices are deleted - * */ - fun `test updating monitor no execution queryIndex rolling over`() { - val testSourceIndex1 = "test_source_index1" - val testSourceIndex2 = "test_source_index2" - createIndex(testSourceIndex1, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) - createIndex(testSourceIndex2, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex1), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - // This doc should create close to 10000 (limit) fields in index mapping. It's easier to add mappings like this then via api - val docPayload: StringBuilder = StringBuilder(100000) - docPayload.append("{") - for (i in 1..9000) { - docPayload.append(""" "id$i":$i,""") - } - docPayload.append("\"test_field\" : \"us-west-2\" }") - // Indexing docs here as an easier means to set index mappings - indexDoc(testSourceIndex1, "1", docPayload.toString()) - indexDoc(testSourceIndex2, "1", docPayload.toString()) - // Create monitor - var monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - - // Update monitor and change input - val updatedMonitor = monitor.copy( - inputs = listOf( - DocLevelMonitorInput("description", listOf(testSourceIndex2), listOf(docQuery)) - ) - ) - updateMonitor(updatedMonitor, updatedMonitor.id) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - - // Expect queryIndex to rollover after setting new source_index with close to limit amount of fields in mappings - var getIndexResponse: GetIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - assertEquals(2, getIndexResponse.indices.size) - - deleteMonitor(updatedMonitor.id) - waitUntil { - getIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - return@waitUntil getIndexResponse.indices.isEmpty() - } - assertEquals(0, getIndexResponse.indices.size) - } - - fun `test queryIndex gets increased max fields in mappings`() { - val testSourceIndex = "test_source_index" - createIndex(testSourceIndex, Settings.builder().put("index.mapping.total_fields.limit", "10000").build()) - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - // This doc should create 12000 fields in index mapping. It's easier to add mappings like this then via api - val docPayload: StringBuilder = StringBuilder(100000) - docPayload.append("{") - for (i in 1..9998) { - docPayload.append(""" "id$i":$i,""") - } - docPayload.append("\"test_field\" : \"us-west-2\" }") - // Indexing docs here as an easier means to set index mappings - indexDoc(testSourceIndex, "1", docPayload.toString()) - // Create monitor - var monitorResponse = createMonitor(monitor) - assertFalse(monitorResponse?.id.isNullOrEmpty()) - monitor = monitorResponse!!.monitor - - // Expect queryIndex to rollover after setting new source_index with close to limit amount of fields in mappings - var getIndexResponse: GetIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - assertEquals(1, getIndexResponse.indices.size) - val field_max_limit = getIndexResponse - .getSetting(DOC_LEVEL_QUERIES_INDEX + "-000001", MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.key).toInt() - - assertEquals(10000 + DocLevelMonitorQueries.QUERY_INDEX_BASE_FIELDS_COUNT, field_max_limit) - - deleteMonitor(monitorResponse.id) - waitUntil { - getIndexResponse = - client().admin().indices().getIndex(GetIndexRequest().indices(ScheduledJob.DOC_LEVEL_QUERIES_INDEX + "*")).get() - return@waitUntil getIndexResponse.indices.isEmpty() - } - assertEquals(0, getIndexResponse.indices.size) - } - - fun `test queryIndex bwc when index was not an alias`() { - createIndex(DOC_LEVEL_QUERIES_INDEX, Settings.builder().put("index.hidden", true).build()) - assertIndexExists(DOC_LEVEL_QUERIES_INDEX) - - val testSourceIndex = "test_source_index" - createIndex(testSourceIndex, Settings.EMPTY) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testSourceIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - // This doc should create 999 fields in mapping, only 1 field less then limit - val docPayload = "{\"test_field\" : \"us-west-2\" }" - // Create monitor - try { - var monitorResponse = createMonitor(monitor) - indexDoc(testSourceIndex, "1", docPayload) - var executeMonitorResponse = executeMonitor(monitor, monitorResponse!!.id, false) - Assert.assertEquals(executeMonitorResponse!!.monitorRunResult.monitorName, monitor.name) - Assert.assertEquals(executeMonitorResponse.monitorRunResult.triggerResults.size, 1) - refreshIndex(AlertIndices.ALERT_INDEX) - val alerts = searchAlerts(monitorResponse.id) - Assert.assertTrue(alerts != null) - Assert.assertTrue(alerts.size == 1) - // check if DOC_LEVEL_QUERIES_INDEX alias exists - assertAliasExists(DOC_LEVEL_QUERIES_INDEX) - } catch (e: Exception) { - fail("Exception happend but it shouldn't!") - } - } - - // TODO - revisit single node integ tests setup to figure out why we cannot have multiple test classes implementing it - - fun `test execute workflow with custom alerts and finding index when bucket monitor is used in chained finding of doc monitor`() { - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field_1").field("test_field_1") - ) - val customAlertsHistoryIndex = "custom_alerts_history_index" - val customAlertsHistoryIndexPattern = "" - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Bucket level monitor will reduce the size of matched doc ids on those that belong - // to a bucket that contains more than 1 document after term grouping - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null, - ) - ) - val bucketCustomAlertsIndex = "custom_alerts_index" - val bucketCustomFindingsIndex = "custom_findings_index" - val bucketCustomFindingsIndexPattern = "custom_findings_index-1" - - val bucketLevelMonitorResponse = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources( - findingsEnabled = true, - alertsIndex = bucketCustomAlertsIndex, - findingsIndex = bucketCustomFindingsIndex, - findingsIndexPattern = bucketCustomFindingsIndexPattern - ) - ) - )!! - - val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) - val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_1\"", name = "2", fields = listOf()) - val docQuery3 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2, docQuery3)) - val docTrigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val docCustomAlertsIndex = "custom_alerts_index" - val docCustomFindingsIndex = "custom_findings_index" - val docCustomFindingsIndexPattern = "custom_findings_index-1" - var docLevelMonitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(docTrigger), - dataSources = DataSources( - alertsIndex = docCustomAlertsIndex, - findingsIndex = docCustomFindingsIndex, - findingsIndexPattern = docCustomFindingsIndexPattern - ) - ) - - val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! - // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) - var workflow = randomWorkflow( - monitorIds = listOf(bucketLevelMonitorResponse.id, docLevelMonitorResponse.id), - enabled = false, - auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - // Creates 5 documents - insertSampleTimeSerializedData( - index, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2", - "test_value_2", - "test_value_3" - ) - ) - - val workflowId = workflowResponse.id - // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) - // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - assertNotNull(executeWorkflowResponse) - - for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { - if (bucketLevelMonitorResponse.monitor.name == monitorRunResults.monitorName) { - val searchResult = monitorRunResults.inputResults.results.first() - - @Suppress("UNCHECKED_CAST") - val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg") - ?.get("buckets") as List> - assertEquals("Incorrect search result", 3, buckets.size) - - val getAlertsResponse = assertAlerts(bucketLevelMonitorResponse.id, bucketCustomAlertsIndex, 2, workflowId) - assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) - assertFindings(bucketLevelMonitorResponse.id, bucketCustomFindingsIndex, 1, 4, listOf("1", "2", "3", "4")) - } else { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult - val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } - val expectedTriggeredDocIds = listOf("1", "2", "3", "4") - assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) - - val getAlertsResponse = assertAlerts(docLevelMonitorResponse.id, docCustomAlertsIndex, 4, workflowId) - assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) - assertFindings(docLevelMonitorResponse.id, docCustomFindingsIndex, 4, 4, listOf("1", "2", "3", "4")) - } - } - } - - fun `test execute workflow with custom alerts and finding index when doc level delegate is used in chained finding`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) - val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "2", fields = listOf()) - - var docLevelMonitor = randomDocumentLevelMonitor( - inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2))), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), - dataSources = DataSources( - alertsIndex = "custom_alerts_index", - findingsIndex = "custom_findings_index", - findingsIndexPattern = "custom_findings_index-1" - ) - ) - - val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field_1").field("test_field_1") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null, - ) - ) - - val bucketLevelMonitorResponse = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources( - findingsEnabled = true, - alertsIndex = "custom_alerts_index", - findingsIndex = "custom_findings_index", - findingsIndexPattern = "custom_findings_index-1" - ) - ) - )!! - - var docLevelMonitor1 = randomDocumentLevelMonitor( - // Match the documents with test_field_1: test_value_3 - inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery2))), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), - dataSources = DataSources( - findingsEnabled = true, - alertsIndex = "custom_alerts_index_1", - findingsIndex = "custom_findings_index_1", - findingsIndexPattern = "custom_findings_index_1-1" - ) - ) - - val docLevelMonitorResponse1 = createMonitor(docLevelMonitor1)!! - - val queryMonitorInput = SearchInput( - indices = listOf(index), - query = SearchSourceBuilder().query( - QueryBuilders - .rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - ) - ) - val queryTriggerScript = """ - return ctx.results[0].hits.hits.size() > 0 - """.trimIndent() - - val queryLevelTrigger = randomQueryLevelTrigger(condition = Script(queryTriggerScript)) - val queryMonitorResponse = - createMonitor(randomQueryLevelMonitor(inputs = listOf(queryMonitorInput), triggers = listOf(queryLevelTrigger)))!! - - // 1. docMonitor (chainedFinding = null) 2. bucketMonitor (chainedFinding = docMonitor) 3. docMonitor (chainedFinding = bucketMonitor) 4. queryMonitor (chainedFinding = docMonitor 3) - var workflow = randomWorkflow( - monitorIds = listOf( - docLevelMonitorResponse.id, - bucketLevelMonitorResponse.id, - docLevelMonitorResponse1.id, - queryMonitorResponse.id - ), - auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - // Creates 5 documents - insertSampleTimeSerializedData( - index, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2", - "test_value_2", - "test_value_3", - "test_value_3" - ) - ) - - val workflowId = workflowResponse.id - // 1. Doc level monitor should reduce the doc findings to 4 (3 - test_value_2, 4 - test_value_2, 5 - test_value_3, 6 - test_value_3) - // 2. Bucket level monitor will match the fetch the docs from current findings execution, although it contains rules for matching documents which has test_value_2 and test value_3 - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - assertNotNull(executeWorkflowResponse) - - for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { - when (monitorRunResults.monitorName) { - // Verify first doc level monitor execution, alerts and findings - docLevelMonitorResponse.monitor.name -> { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult - val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } - val expectedTriggeredDocIds = listOf("3", "4", "5", "6") - assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) - - val getAlertsResponse = - assertAlerts(docLevelMonitorResponse.id, docLevelMonitorResponse.monitor.dataSources.alertsIndex, 4, workflowId) - assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) - assertFindings( - docLevelMonitorResponse.id, - docLevelMonitorResponse.monitor.dataSources.findingsIndex, - 4, - 4, - listOf("3", "4", "5", "6") - ) - } - // Verify second bucket level monitor execution, alerts and findings - bucketLevelMonitorResponse.monitor.name -> { - val searchResult = monitorRunResults.inputResults.results.first() - - @Suppress("UNCHECKED_CAST") - val buckets = - searchResult - .stringMap("aggregations")?.stringMap("composite_agg") - ?.get("buckets") as List> - assertEquals("Incorrect search result", 2, buckets.size) - - val getAlertsResponse = - assertAlerts( - bucketLevelMonitorResponse.id, - bucketLevelMonitorResponse.monitor.dataSources.alertsIndex, - 2, - workflowId - ) - assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) - assertFindings( - bucketLevelMonitorResponse.id, - bucketLevelMonitorResponse.monitor.dataSources.findingsIndex, - 1, - 4, - listOf("3", "4", "5", "6") - ) - } - // Verify third doc level monitor execution, alerts and findings - docLevelMonitorResponse1.monitor.name -> { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult - val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } - val expectedTriggeredDocIds = listOf("5", "6") - assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) - - val getAlertsResponse = - assertAlerts(docLevelMonitorResponse1.id, docLevelMonitorResponse1.monitor.dataSources.alertsIndex, 2, workflowId) - assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse1.id, 2) - assertFindings( - docLevelMonitorResponse1.id, - docLevelMonitorResponse1.monitor.dataSources.findingsIndex, - 2, - 2, - listOf("5", "6") - ) - } - // Verify fourth query level monitor execution - queryMonitorResponse.monitor.name -> { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val totalHits = - ( - ( - monitorRunResults.inputResults.results[0]["hits"] as kotlin.collections.Map - )["total"] as kotlin.collections.Map - )["value"] - assertEquals(2, totalHits) - @Suppress("UNCHECKED_CAST") - val docIds = - ( - ( - monitorRunResults.inputResults.results[0]["hits"] as kotlin.collections.Map - )["hits"] as List> - ).map { it["_id"]!! } - assertEquals(listOf("5", "6"), docIds.sorted()) - } - } - } - } - - private fun assertAlerts( - monitorId: String, - customAlertsIndex: String, - alertSize: Int, - workflowId: String, - ): GetAlertsResponse { - val table = Table("asc", "id", null, alertSize, 0, "") - val getAlertsResponse = client() - .execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest( - table, "ALL", "ALL", monitorId, customAlertsIndex, - workflowIds = listOf(workflowId) - ) - ) - .get() - assertTrue(getAlertsResponse != null) - assertTrue(getAlertsResponse.alerts.size == alertSize) - return getAlertsResponse - } - - fun `test execute workflow with custom alerts and finding index with doc level delegates`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex1 = "custom_alerts_index" - val customFindingsIndex1 = "custom_findings_index" - val customFindingsIndexPattern1 = "custom_findings_index-1" - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1), - dataSources = DataSources( - alertsIndex = customAlertsIndex1, - findingsIndex = customFindingsIndex1, - findingsIndexPattern = customFindingsIndexPattern1 - ) - ) - val monitorResponse = createMonitor(monitor1)!! - - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex2 = "custom_alerts_index_2" - val customFindingsIndex2 = "custom_findings_index_2" - val customFindingsIndexPattern2 = "custom_findings_index-2" - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput2), - triggers = listOf(trigger2), - dataSources = DataSources( - alertsIndex = customAlertsIndex2, - findingsIndex = customFindingsIndex2, - findingsIndexPattern = customFindingsIndexPattern2 - ) - ) - - val monitorResponse2 = createMonitor(monitor2)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id), auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 and monitor2 - val testDoc2 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "2", testDoc2) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Doesn't match - val testDoc3 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-east-1" - }""" - indexDoc(index, "3", testDoc3) - - val workflowId = workflowResponse.id - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(2, monitorsRunResults.size) - - assertEquals(monitor1.name, monitorsRunResults[0].monitorName) - assertEquals(1, monitorsRunResults[0].triggerResults.size) - - Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) - Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) - - val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex1, alertSize = 2, workflowId = workflowId) - assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) - assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) - - val getAlertsResponse2 = assertAlerts(monitorResponse2.id, customAlertsIndex2, alertSize = 1, workflowId = workflowId) - assertAcknowledges(getAlertsResponse2.alerts, monitorResponse2.id, 1) - assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) - } - - fun `test execute workflow with multiple monitors in chained monitor findings of single monitor`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex1 = "custom_alerts_index" - val customFindingsIndex1 = "custom_findings_index" - val customFindingsIndexPattern1 = "custom_findings_index-1" - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1), - enabled = false, - dataSources = DataSources( - alertsIndex = customAlertsIndex1, - findingsIndex = customFindingsIndex1, - findingsIndexPattern = customFindingsIndexPattern1 - ) - ) - val monitorResponse = createMonitor(monitor1)!! - - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput2), - triggers = listOf(trigger2), - enabled = false, - dataSources = DataSources( - alertsIndex = customAlertsIndex1, - findingsIndex = customFindingsIndex1, - findingsIndexPattern = customFindingsIndexPattern1 - ) - ) - - val monitorResponse2 = createMonitor(monitor2)!! - val docQuery3 = DocLevelQuery(query = "_id:*", name = "5", fields = listOf()) - val docLevelInput3 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery3)) - val trigger3 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - var monitor3 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput3), - triggers = listOf(trigger3), - enabled = false, - dataSources = DataSources( - alertsIndex = customAlertsIndex1, - findingsIndex = customFindingsIndex1, - findingsIndexPattern = customFindingsIndexPattern1 - ) - ) - - val monitorResponse3 = createMonitor(monitor3)!! - val d1 = Delegate(1, monitorResponse.id) - val d2 = Delegate(2, monitorResponse2.id) - val d3 = Delegate( - 3, monitorResponse3.id, - ChainedMonitorFindings(null, listOf(monitorResponse.id, monitorResponse2.id)) - ) - var workflow = Workflow( - id = "", - name = "test", - enabled = false, - schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - lastUpdateTime = Instant.now(), - enabledTime = null, - workflowType = Workflow.WorkflowType.COMPOSITE, - user = randomUser(), - inputs = listOf(CompositeInput(org.opensearch.commons.alerting.model.Sequence(listOf(d1, d2, d3)))), - version = -1L, - schemaVersion = 0, - triggers = emptyList(), - auditDelegateMonitorAlerts = false - - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 and monitor2 - val testDoc2 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "2", testDoc2) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 and monitor2 - val testDoc3 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-east-1" - }""" - indexDoc(index, "3", testDoc3) - - val workflowId = workflowResponse.id - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(3, monitorsRunResults.size) - assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) - assertFindings(monitorResponse2.id, customFindingsIndex1, 2, 2, listOf("2", "3")) - assertFindings(monitorResponse3.id, customFindingsIndex1, 3, 3, listOf("1", "2", "3")) - } - - fun `test execute workflows with shared doc level monitor delegate`() { - val docQuery = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customAlertsIndex = "custom_alerts_index" - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - alertsIndex = customAlertsIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id), - auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - var workflow1 = randomWorkflow( - monitorIds = listOf(monitorResponse.id), - auditDelegateMonitorAlerts = false - ) - val workflowResponse1 = upsertWorkflow(workflow1)!! - val workflowById1 = searchWorkflow(workflowResponse1.id) - assertNotNull(workflowById1) - - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - val testDoc2 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "2", testDoc2) - - val workflowId = workflowResponse.id - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(1, monitorsRunResults.size) - - assertEquals(monitor.name, monitorsRunResults[0].monitorName) - assertEquals(1, monitorsRunResults[0].triggerResults.size) - - // Assert and not ack the alerts (in order to verify later on that all the alerts are generated) - assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId) - assertFindings(monitorResponse.id, customFindingsIndex, 2, 2, listOf("1", "2")) - // Verify workflow and monitor delegate metadata - val workflowMetadata = searchWorkflowMetadata(id = workflowId) - assertNotNull("Workflow metadata not initialized", workflowMetadata) - assertEquals( - "Workflow metadata execution id not correct", - executeWorkflowResponse.workflowRunResult.executionId, - workflowMetadata!!.latestExecutionId - ) - val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) - val monitorMetadata = searchMonitorMetadata(monitorMetadataId) - assertNotNull(monitorMetadata) - - // Execute second workflow - val workflowId1 = workflowResponse1.id - val executeWorkflowResponse1 = executeWorkflow(workflowById1, workflowId1, false)!! - val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults - assertEquals(1, monitorsRunResults1.size) - - assertEquals(monitor.name, monitorsRunResults1[0].monitorName) - assertEquals(1, monitorsRunResults1[0].triggerResults.size) - - val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId1) - assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) - assertFindings(monitorResponse.id, customFindingsIndex, 4, 4, listOf("1", "2", "1", "2")) - // Verify workflow and monitor delegate metadata - val workflowMetadata1 = searchWorkflowMetadata(id = workflowId1) - assertNotNull("Workflow metadata not initialized", workflowMetadata1) - assertEquals( - "Workflow metadata execution id not correct", - executeWorkflowResponse1.workflowRunResult.executionId, - workflowMetadata1!!.latestExecutionId - ) - val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) - val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) - assertNotNull(monitorMetadata1) - // Verify that for two workflows two different doc level monitor metadata has been created - assertTrue("Different monitor is used in workflows", monitorMetadata!!.monitorId == monitorMetadata1!!.monitorId) - assertTrue(monitorMetadata.id != monitorMetadata1.id) - } - - fun `test execute workflows with shared doc level monitor delegate updating delegate datasource`() { - val docQuery = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - var monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor)!! - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - val workflow1 = randomWorkflow( - monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false - ) - val workflowResponse1 = upsertWorkflow(workflow1)!! - val workflowById1 = searchWorkflow(workflowResponse1.id) - assertNotNull(workflowById1) - - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - val testDoc2 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "2", testDoc2) - - val workflowId = workflowResponse.id - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(1, monitorsRunResults.size) - - assertEquals(monitor.name, monitorsRunResults[0].monitorName) - assertEquals(1, monitorsRunResults[0].triggerResults.size) - - assertAlerts(monitorResponse.id, AlertIndices.ALERT_INDEX, alertSize = 2, workflowId) - assertFindings(monitorResponse.id, AlertIndices.FINDING_HISTORY_WRITE_INDEX, 2, 2, listOf("1", "2")) - // Verify workflow and monitor delegate metadata - val workflowMetadata = searchWorkflowMetadata(id = workflowId) - assertNotNull("Workflow metadata not initialized", workflowMetadata) - assertEquals( - "Workflow metadata execution id not correct", - executeWorkflowResponse.workflowRunResult.executionId, - workflowMetadata!!.latestExecutionId - ) - val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) - val monitorMetadata = searchMonitorMetadata(monitorMetadataId) - assertNotNull(monitorMetadata) - - val customAlertsIndex = "custom_alerts_index" - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val monitorId = monitorResponse.id - updateMonitor( - monitor = monitor.copy( - dataSources = DataSources( - alertsIndex = customAlertsIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ), - monitorId - ) - - // Execute second workflow - val workflowId1 = workflowResponse1.id - val executeWorkflowResponse1 = executeWorkflow(workflowById1, workflowId1, false)!! - val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults - assertEquals(1, monitorsRunResults1.size) - - assertEquals(monitor.name, monitorsRunResults1[0].monitorName) - assertEquals(1, monitorsRunResults1[0].triggerResults.size) - - // Verify alerts for the custom index - val getAlertsResponse = assertAlerts(monitorResponse.id, customAlertsIndex, alertSize = 2, workflowId1) - assertAcknowledges(getAlertsResponse.alerts, monitorResponse.id, 2) - assertFindings(monitorResponse.id, customFindingsIndex, 2, 2, listOf("1", "2")) - - // Verify workflow and monitor delegate metadata - val workflowMetadata1 = searchWorkflowMetadata(id = workflowId1) - assertNotNull("Workflow metadata not initialized", workflowMetadata1) - assertEquals( - "Workflow metadata execution id not correct", - executeWorkflowResponse1.workflowRunResult.executionId, - workflowMetadata1!!.latestExecutionId - ) - val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) - val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) - assertNotNull(monitorMetadata1) - // Verify that for two workflows two different doc level monitor metadata has been created - assertTrue("Different monitor is used in workflows", monitorMetadata!!.monitorId == monitorMetadata1!!.monitorId) - assertTrue(monitorMetadata.id != monitorMetadata1.id) - } - - fun `test execute workflow verify workflow metadata`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - val monitorResponse = createMonitor(monitor1)!! - - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput2), - triggers = listOf(trigger2), - ) - - val monitorResponse2 = createMonitor(monitor2)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - // First execution - val workflowId = workflowResponse.id - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(2, monitorsRunResults.size) - - val workflowMetadata = searchWorkflowMetadata(id = workflowId) - assertNotNull("Workflow metadata not initialized", workflowMetadata) - assertEquals( - "Workflow metadata execution id not correct", - executeWorkflowResponse.workflowRunResult.executionId, - workflowMetadata!!.latestExecutionId - ) - val monitorMetadataId = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) - val monitorMetadata = searchMonitorMetadata(monitorMetadataId) - assertNotNull(monitorMetadata) - - // Second execution - val executeWorkflowResponse1 = executeWorkflow(workflowById, workflowId, false)!! - val monitorsRunResults1 = executeWorkflowResponse1.workflowRunResult.monitorRunResults - assertEquals(2, monitorsRunResults1.size) - - val workflowMetadata1 = searchWorkflowMetadata(id = workflowId) - assertNotNull("Workflow metadata not initialized", workflowMetadata) - assertEquals( - "Workflow metadata execution id not correct", - executeWorkflowResponse1.workflowRunResult.executionId, - workflowMetadata1!!.latestExecutionId - ) - val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata1, monitorResponse) - assertTrue(monitorMetadataId == monitorMetadataId1) - val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) - assertNotNull(monitorMetadata1) - } - - fun `test execute workflow dryrun verify workflow metadata not created`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - val monitorResponse = createMonitor(monitor1)!! - - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput2), - triggers = listOf(trigger2), - ) - - val monitorResponse2 = createMonitor(monitor2)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - // First execution - val workflowId = workflowResponse.id - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, true) - - assertNotNull("Workflow run result is null", executeWorkflowResponse) - val monitorsRunResults = executeWorkflowResponse!!.workflowRunResult.monitorRunResults - assertEquals(2, monitorsRunResults.size) - - var exception: java.lang.Exception? = null - try { - searchWorkflowMetadata(id = workflowId) - } catch (ex: java.lang.Exception) { - exception = ex - assertTrue(exception is java.util.NoSuchElementException) - } - } - - fun `test execute workflow with custom alerts and finding index with bucket and doc monitor bucket monitor used as chained finding`() { - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field_1").field("test_field_1") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null, - ) - ) - val bucketCustomAlertsIndex = "custom_alerts_index" - val bucketCustomFindingsIndex = "custom_findings_index" - val bucketCustomFindingsIndexPattern = "custom_findings_index-1" - - val bucketLevelMonitorResponse = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources( - findingsEnabled = true, - alertsIndex = bucketCustomAlertsIndex, - findingsIndex = bucketCustomFindingsIndex, - findingsIndexPattern = bucketCustomFindingsIndexPattern - ) - ) - )!! - - val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) - val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_1\"", name = "2", fields = listOf()) - val docQuery3 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2, docQuery3)) - val docTrigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val docCustomAlertsIndex = "custom_alerts_index" - val docCustomFindingsIndex = "custom_findings_index" - val docCustomFindingsIndexPattern = "custom_findings_index-1" - var docLevelMonitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(docTrigger), - dataSources = DataSources( - alertsIndex = docCustomAlertsIndex, - findingsIndex = docCustomFindingsIndex, - findingsIndexPattern = docCustomFindingsIndexPattern - ) - ) - - val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! - // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) - var workflow = randomWorkflow( - monitorIds = listOf(bucketLevelMonitorResponse.id, docLevelMonitorResponse.id), auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - // Creates 5 documents - insertSampleTimeSerializedData( - index, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2", - "test_value_2", - "test_value_3" - ) - ) - - val workflowId = workflowResponse.id - // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) - // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - assertNotNull(executeWorkflowResponse) - - for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { - if (bucketLevelMonitorResponse.monitor.name == monitorRunResults.monitorName) { - val searchResult = monitorRunResults.inputResults.results.first() - - @Suppress("UNCHECKED_CAST") - val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg") - ?.get("buckets") as List> - assertEquals("Incorrect search result", 3, buckets.size) - - val getAlertsResponse = assertAlerts(bucketLevelMonitorResponse.id, bucketCustomAlertsIndex, alertSize = 2, workflowId) - assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) - assertFindings(bucketLevelMonitorResponse.id, bucketCustomFindingsIndex, 1, 4, listOf("1", "2", "3", "4")) - } else { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult - val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } - val expectedTriggeredDocIds = listOf("1", "2", "3", "4") - assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) - - val getAlertsResponse = assertAlerts(docLevelMonitorResponse.id, docCustomAlertsIndex, alertSize = 4, workflowId) - assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) - assertFindings(docLevelMonitorResponse.id, docCustomFindingsIndex, 4, 4, listOf("1", "2", "3", "4")) - } - } - } - - fun `test chained alerts for bucket level monitors generating audit alerts custom alerts index`() { - val customAlertIndex = "custom-alert-index" - val customAlertHistoryIndex = "custom-alert-history-index" - val customAlertHistoryIndexPattern = "" - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field_1").field("test_field_1") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null, - ) - ) - - val bucketLevelMonitorResponse = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources( - alertsIndex = customAlertIndex, - alertsHistoryIndexPattern = customAlertHistoryIndexPattern, - alertsHistoryIndex = customAlertHistoryIndex - - ) - ) - )!! - - val bucketLevelMonitorResponse2 = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources( - alertsIndex = customAlertIndex, - alertsHistoryIndexPattern = customAlertHistoryIndexPattern, - alertsHistoryIndex = customAlertHistoryIndex - - ) - ) - )!! - - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${bucketLevelMonitorResponse.id}] && monitor[id=${bucketLevelMonitorResponse2.id}]") - ) - // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) - var workflow = randomWorkflow( - monitorIds = listOf(bucketLevelMonitorResponse.id, bucketLevelMonitorResponse2.id), - triggers = listOf(andTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - // Creates 5 documents - insertSampleTimeSerializedData( - index, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2", - "test_value_2", - "test_value_3" - ) - ) - - val workflowId = workflowResponse.id - // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) - // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - assertNotNull(executeWorkflowResponse) - - Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.isNotEmpty()) - Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.containsKey(andTrigger.id)) - Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) - - val auditStateAlerts = getAuditStateAlerts( - alertsIndex = customAlertHistoryIndex, - monitorId = bucketLevelMonitorResponse.id, - executionId = executeWorkflowResponse.workflowRunResult.executionId - ) - Assert.assertEquals(auditStateAlerts.size, 2) - - val auditStateAlerts2 = getAuditStateAlerts( - alertsIndex = customAlertHistoryIndex, - monitorId = bucketLevelMonitorResponse2.id, - executionId = executeWorkflowResponse.workflowRunResult.executionId - ) - Assert.assertEquals(auditStateAlerts2.size, 2) - } - - fun `test chained alerts for bucket level monitors generating audit alerts`() { - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field_1").field("test_field_1") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null, - ) - ) - - val bucketLevelMonitorResponse = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger) - ) - )!! - - val bucketLevelMonitorResponse2 = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger) - ) - )!! - - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${bucketLevelMonitorResponse.id}] && monitor[id=${bucketLevelMonitorResponse2.id}]") - ) - // 1. bucketMonitor (chainedFinding = null) 2. docMonitor (chainedFinding = bucketMonitor) - var workflow = randomWorkflow( - monitorIds = listOf(bucketLevelMonitorResponse.id, bucketLevelMonitorResponse2.id), - triggers = listOf(andTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - // Creates 5 documents - insertSampleTimeSerializedData( - index, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2", - "test_value_2", - "test_value_3" - ) - ) - - val workflowId = workflowResponse.id - // 1. bucket level monitor should reduce the doc findings to 4 (1, 2, 3, 4) - // 2. Doc level monitor will match those 4 documents although it contains rules for matching all 5 documents (docQuery3 matches the fifth) - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - assertNotNull(executeWorkflowResponse) - - Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.isNotEmpty()) - Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults.containsKey(andTrigger.id)) - Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) - - val auditStateAlerts = getAuditStateAlerts( - alertsIndex = bucketLevelMonitorResponse.monitor.dataSources.alertsHistoryIndex, - monitorId = bucketLevelMonitorResponse.id, - executionId = executeWorkflowResponse.workflowRunResult.executionId - ) - Assert.assertEquals(auditStateAlerts.size, 2) - - val auditStateAlerts2 = getAuditStateAlerts( - alertsIndex = bucketLevelMonitorResponse.monitor.dataSources.alertsHistoryIndex, - monitorId = bucketLevelMonitorResponse2.id, - executionId = executeWorkflowResponse.workflowRunResult.executionId - ) - Assert.assertEquals(auditStateAlerts2.size, 2) - } - - fun `test execute with custom alerts and finding index with bucket and doc monitor when doc monitor is used in chained finding`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"test_value_2\"", name = "1", fields = listOf()) - val docQuery2 = DocLevelQuery(query = "test_field_1:\"test_value_3\"", name = "2", fields = listOf()) - - var docLevelMonitor = randomDocumentLevelMonitor( - inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery1, docQuery2))), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), - dataSources = DataSources( - alertsIndex = "custom_alerts_index", - findingsIndex = "custom_findings_index", - findingsIndexPattern = "custom_findings_index-1" - ) - ) - - val docLevelMonitorResponse = createMonitor(docLevelMonitor)!! - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field_1").field("test_field_1") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(index), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Bucket level monitor will reduce the size of matched doc ids on those that belong to a bucket that contains more than 1 document after term grouping - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null, - ) - ) - - val bucketLevelMonitorResponse = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources( - findingsEnabled = true, - alertsIndex = "custom_alerts_index", - findingsIndex = "custom_findings_index", - findingsIndexPattern = "custom_findings_index-1" - ) - ) - )!! - - var docLevelMonitor1 = randomDocumentLevelMonitor( - // Match the documents with test_field_1: test_value_3 - inputs = listOf(DocLevelMonitorInput("description", listOf(index), listOf(docQuery2))), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)), - dataSources = DataSources( - findingsEnabled = true, - alertsIndex = "custom_alerts_index_1", - findingsIndex = "custom_findings_index_1", - findingsIndexPattern = "custom_findings_index_1-1" - ) - ) - - val docLevelMonitorResponse1 = createMonitor(docLevelMonitor1)!! - - val queryMonitorInput = SearchInput( - indices = listOf(index), - query = SearchSourceBuilder().query( - QueryBuilders - .rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - ) - ) - val queryTriggerScript = """ - return ctx.results[0].hits.hits.size() > 0 - """.trimIndent() - - val queryLevelTrigger = randomQueryLevelTrigger(condition = Script(queryTriggerScript)) - val queryMonitorResponse = - createMonitor(randomQueryLevelMonitor(inputs = listOf(queryMonitorInput), triggers = listOf(queryLevelTrigger)))!! - - // 1. docMonitor (chainedFinding = null) 2. bucketMonitor (chainedFinding = docMonitor) 3. docMonitor (chainedFinding = bucketMonitor) 4. queryMonitor (chainedFinding = docMonitor 3) - var workflow = randomWorkflow( - monitorIds = listOf( - docLevelMonitorResponse.id, - bucketLevelMonitorResponse.id, - docLevelMonitorResponse1.id, - queryMonitorResponse.id - ), - auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - // Creates 5 documents - insertSampleTimeSerializedData( - index, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2", - "test_value_2", - "test_value_3", - "test_value_3" - ) - ) - - val workflowId = workflowResponse.id - // 1. Doc level monitor should reduce the doc findings to 4 (3 - test_value_2, 4 - test_value_2, 5 - test_value_3, 6 - test_value_3) - // 2. Bucket level monitor will match the fetch the docs from current findings execution, although it contains rules for matching documents which has test_value_2 and test value_3 - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - assertNotNull(executeWorkflowResponse) - - for (monitorRunResults in executeWorkflowResponse.workflowRunResult.monitorRunResults) { - when (monitorRunResults.monitorName) { - // Verify first doc level monitor execution, alerts and findings - docLevelMonitorResponse.monitor.name -> { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult - val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } - val expectedTriggeredDocIds = listOf("3", "4", "5", "6") - assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) - - val getAlertsResponse = - assertAlerts( - docLevelMonitorResponse.id, - docLevelMonitorResponse.monitor.dataSources.alertsIndex, - alertSize = 4, - workflowId = workflowId - ) - assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse.id, 4) - assertFindings( - docLevelMonitorResponse.id, - docLevelMonitorResponse.monitor.dataSources.findingsIndex, - 4, - 4, - listOf("3", "4", "5", "6") - ) - } - // Verify second bucket level monitor execution, alerts and findings - bucketLevelMonitorResponse.monitor.name -> { - val searchResult = monitorRunResults.inputResults.results.first() - - @Suppress("UNCHECKED_CAST") - val buckets = - searchResult - .stringMap("aggregations")?.stringMap("composite_agg") - ?.get("buckets") as List> - assertEquals("Incorrect search result", 2, buckets.size) - - val getAlertsResponse = - assertAlerts( - bucketLevelMonitorResponse.id, - bucketLevelMonitorResponse.monitor.dataSources.alertsIndex, - alertSize = 2, - workflowId - ) - assertAcknowledges(getAlertsResponse.alerts, bucketLevelMonitorResponse.id, 2) - assertFindings( - bucketLevelMonitorResponse.id, - bucketLevelMonitorResponse.monitor.dataSources.findingsIndex, - 1, - 4, - listOf("3", "4", "5", "6") - ) - } - // Verify third doc level monitor execution, alerts and findings - docLevelMonitorResponse1.monitor.name -> { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val docLevelTrigger = values.iterator().next() as DocumentLevelTriggerRunResult - val triggeredDocIds = docLevelTrigger.triggeredDocs.map { it.split("|")[0] } - val expectedTriggeredDocIds = listOf("5", "6") - assertEquals(expectedTriggeredDocIds, triggeredDocIds.sorted()) - - val getAlertsResponse = - assertAlerts( - docLevelMonitorResponse1.id, - docLevelMonitorResponse1.monitor.dataSources.alertsIndex, - alertSize = 2, - workflowId - ) - assertAcknowledges(getAlertsResponse.alerts, docLevelMonitorResponse1.id, 2) - assertFindings( - docLevelMonitorResponse1.id, - docLevelMonitorResponse1.monitor.dataSources.findingsIndex, - 2, - 2, - listOf("5", "6") - ) - } - // Verify fourth query level monitor execution - queryMonitorResponse.monitor.name -> { - assertEquals(1, monitorRunResults.inputResults.results.size) - val values = monitorRunResults.triggerResults.values - assertEquals(1, values.size) - @Suppress("UNCHECKED_CAST") - val totalHits = - ( - ( - monitorRunResults.inputResults.results[0]["hits"] - as kotlin.collections.Map - )["total"] as kotlin.collections.Map - )["value"] - assertEquals(2, totalHits) - @Suppress("UNCHECKED_CAST") - val docIds = - ( - ( - monitorRunResults.inputResults.results[0]["hits"] - as kotlin.collections.Map - )["hits"] as List> - ) - .map { it["_id"]!! } - assertEquals(listOf("5", "6"), docIds.sorted()) - } - } - } - } - - fun `test execute workflow input error`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor)!! - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id), auditDelegateMonitorAlerts = false - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - deleteIndex(index) - - val response = executeWorkflow(workflowById, workflowById!!.id, false)!! - val error = response.workflowRunResult.monitorRunResults[0].error - assertNotNull(error) - assertTrue(error is AlertingException) - assertEquals(RestStatus.INTERNAL_SERVER_ERROR, (error as AlertingException).status) - assertTrue(error.message!!.contains("no such index [$index]")) - } - - fun `test execute workflow wrong workflow id`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor)!! - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflowRequest)!! - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - var exception: java.lang.Exception? = null - val badWorkflowId = getWorkflowResponse.id + "bad" - try { - executeWorkflow(id = badWorkflowId) - } catch (ex: java.lang.Exception) { - exception = ex - } - assertTrue(exception is ExecutionException) - assertTrue(exception!!.cause is AlertingException) - assertEquals(RestStatus.NOT_FOUND, (exception.cause as AlertingException).status) - assertEquals("Can't find workflow with id: $badWorkflowId", exception.cause!!.message) - } - - private fun assertFindings( - monitorId: String, - customFindingsIndex: String, - findingSize: Int, - matchedQueryNumber: Int, - relatedDocIds: List, - ) { - val findings = searchFindings(monitorId, customFindingsIndex) - assertEquals("Findings saved for test monitor", findingSize, findings.size) - - val findingDocIds = findings.flatMap { it.relatedDocIds } - - assertEquals("Didn't match $matchedQueryNumber query", matchedQueryNumber, findingDocIds.size) - assertTrue("Findings saved for test monitor", relatedDocIds.containsAll(findingDocIds)) - } - - private fun getAuditStateAlerts( - alertsIndex: String? = AlertIndices.ALERT_INDEX, - monitorId: String, - executionId: String? = null, - ): List { - val searchRequest = SearchRequest(alertsIndex) - val boolQueryBuilder = QueryBuilders.boolQuery() - boolQueryBuilder.must(TermQueryBuilder("monitor_id", monitorId)) - if (executionId.isNullOrEmpty() == false) - boolQueryBuilder.must(TermQueryBuilder("execution_id", executionId)) - searchRequest.source().query(boolQueryBuilder) - val searchResponse = client().search(searchRequest).get() - return searchResponse.hits.map { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry(), LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alert - } - } - - private fun assertAlerts( - monitorId: String, - alertsIndex: String? = AlertIndices.ALERT_INDEX, - executionId: String? = null, - alertSize: Int, - workflowId: String, - ): GetAlertsResponse { - val alerts = searchAlerts(monitorId, alertsIndex!!, executionId = executionId) - assertEquals("Alert saved for test monitor", alertSize, alerts.size) - val table = Table("asc", "id", null, alertSize, 0, "") - var getAlertsResponse = client() - .execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest(table, "ALL", "ALL", null, alertsIndex) - ) - .get() - assertTrue(getAlertsResponse != null) - assertTrue(getAlertsResponse.alerts.size == alertSize) - getAlertsResponse = client() - .execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest(table, "ALL", "ALL", monitorId, null, workflowIds = listOf(workflowId)) - ) - .get() - assertTrue(getAlertsResponse != null) - assertTrue(getAlertsResponse.alerts.size == alertSize) - - return getAlertsResponse - } - - private fun assertAcknowledges( - alerts: List, - monitorId: String, - alertSize: Int, - ) { - val alertIds = alerts.map { it.id } - val acknowledgeAlertResponse = client().execute( - AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, - AcknowledgeAlertRequest(monitorId, alertIds, WriteRequest.RefreshPolicy.IMMEDIATE) - ).get() - - assertEquals(alertSize, acknowledgeAlertResponse.acknowledged.size) - } - - private fun verifyAcknowledgeChainedAlerts( - alerts: List, - workflowId: String, - alertSize: Int, - ) { - val alertIds = alerts.map { it.id }.toMutableList() - val acknowledgeAlertResponse = ackChainedAlerts(alertIds, workflowId) - assertTrue(acknowledgeAlertResponse.acknowledged.stream().map { it.id }.collect(Collectors.toList()).containsAll(alertIds)) - assertEquals(alertSize, acknowledgeAlertResponse.acknowledged.size) - alertIds.add("dummy") - val redundantAck = ackChainedAlerts(alertIds, workflowId) - Assert.assertTrue(redundantAck.acknowledged.isEmpty()) - Assert.assertTrue(redundantAck.missing.contains("dummy")) - alertIds.remove("dummy") - Assert.assertTrue(redundantAck.failed.map { it.id }.toList().containsAll(alertIds)) - } - - private fun ackChainedAlerts(alertIds: List, workflowId: String): AcknowledgeAlertResponse { - - return client().execute( - AlertingActions.ACKNOWLEDGE_CHAINED_ALERTS_ACTION_TYPE, - AcknowledgeChainedAlertRequest(workflowId, alertIds) - ).get() - } - - private fun assertAuditStateAlerts( - monitorId: String, - alerts: List, - ) { - alerts.forEach { Assert.assertEquals(it.state, Alert.State.AUDIT) } - val alertIds = alerts.stream().map { it.id }.collect(Collectors.toList()) - val ack = client().execute( - AlertingActions.ACKNOWLEDGE_ALERTS_ACTION_TYPE, - AcknowledgeAlertRequest(monitorId, alertIds, WriteRequest.RefreshPolicy.IMMEDIATE) - ).get() - Assert.assertTrue(ack.acknowledged.isEmpty()) - Assert.assertTrue(ack.missing.containsAll(alertIds)) - Assert.assertTrue(ack.failed.isEmpty()) - } - - fun `test execute workflow with bucket-level and doc-level chained monitors`() { - createTestIndex(TEST_HR_INDEX) - - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field_1").field("test_field_1") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) - ) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ), - actions = listOf() - ) - val bucketMonitor = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger) - ) - ) - assertNotNull("The bucket monitor was not created", bucketMonitor) - - val docQuery1 = DocLevelQuery(query = "test_field_1:\"a\"", name = "3", fields = listOf()) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), listOf(docQuery1))), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) - ) - val docMonitor = createMonitor(monitor1)!! - assertNotNull("The doc level monitor was not created", docMonitor) - - val workflow = randomWorkflow(monitorIds = listOf(bucketMonitor!!.id, docMonitor.id)) - val workflowResponse = upsertWorkflow(workflow) - assertNotNull("The workflow was not created", workflowResponse) - - // Add a doc that is accessible to the user - indexDoc( - TEST_HR_INDEX, - "1", - """ - { - "test_field_1": "a", - "accessible": true - } - """.trimIndent() - ) - - // Add a second doc that is not accessible to the user - indexDoc( - TEST_HR_INDEX, - "2", - """ - { - "test_field_1": "b", - "accessible": false - } - """.trimIndent() - ) - - indexDoc( - TEST_HR_INDEX, - "3", - """ - { - "test_field_1": "c", - "accessible": true - } - """.trimIndent() - ) - - val executeResult = executeWorkflow(id = workflowResponse!!.id) - assertNotNull(executeResult) - assertEquals(2, executeResult!!.workflowRunResult.monitorRunResults.size) - } - - fun `test chained alerts for AND OR and NOT conditions with custom alerts indices`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex1 = "custom_findings_index" - val customFindingsIndexPattern1 = "custom_findings_index-1" - val customAlertsIndex = "custom_alerts_index" - val customAlertsHistoryIndex = "custom_alerts_history_index" - val customAlertsHistoryIndexPattern = "" - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1), - dataSources = DataSources( - findingsIndex = customFindingsIndex1, - findingsIndexPattern = customFindingsIndexPattern1, - alertsIndex = customAlertsIndex, - alertsHistoryIndex = customAlertsHistoryIndex, - alertsHistoryIndexPattern = customAlertsHistoryIndexPattern - ) - ) - val monitorResponse = createMonitor(monitor1)!! - - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex2 = "custom_findings_index_2" - val customFindingsIndexPattern2 = "custom_findings_index-2" - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput2), - triggers = listOf(trigger2), - dataSources = DataSources( - findingsIndex = customFindingsIndex2, - findingsIndexPattern = customFindingsIndexPattern2, - alertsIndex = customAlertsIndex, - alertsHistoryIndex = customAlertsHistoryIndex, - alertsHistoryIndexPattern = customAlertsHistoryIndexPattern - ) - ) - - val monitorResponse2 = createMonitor(monitor2)!! - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") - ) - val notTrigger = randomChainedAlertTrigger( - name = "Not1OrNot2", - condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") - ) - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id), - triggers = listOf(andTrigger, notTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - val workflowId = workflowResponse.id - - var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - var triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults - Assert.assertEquals(triggerResults.size, 2) - Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) - Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) - var andTriggerResult = triggerResults[andTrigger.id] - var notTriggerResult = triggerResults[notTrigger.id] - Assert.assertTrue(notTriggerResult!!.triggered) - Assert.assertFalse(andTriggerResult!!.triggered) - var res = - getWorkflowAlerts(workflowId = workflowId, alertIndex = customAlertsIndex, associatedAlertsIndex = customAlertsHistoryIndex) - var chainedAlerts = res.alerts - Assert.assertTrue(chainedAlerts.size == 1) - Assert.assertTrue(res.associatedAlerts.isEmpty()) - verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) - Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) - Assert.assertTrue(chainedAlerts[0].monitorId == "") - Assert.assertTrue(chainedAlerts[0].triggerId == notTrigger.id) - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 and monitor2 - val testDoc2 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "2", testDoc2) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Doesn't match - val testDoc3 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-east-1" - }""" - indexDoc(index, "3", testDoc3) - executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults - Assert.assertEquals(triggerResults.size, 2) - Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) - Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) - andTriggerResult = triggerResults[andTrigger.id] - notTriggerResult = triggerResults[notTrigger.id] - Assert.assertFalse(notTriggerResult!!.triggered) - Assert.assertTrue(andTriggerResult!!.triggered) - res = getWorkflowAlerts(workflowId, alertIndex = customAlertsIndex, associatedAlertsIndex = customAlertsHistoryIndex) - chainedAlerts = res.alerts - val numChainedAlerts = 1 - Assert.assertTrue(res.associatedAlerts.isNotEmpty()) - Assert.assertTrue(chainedAlerts.size == numChainedAlerts) - Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) - Assert.assertTrue(chainedAlerts[0].monitorId == "") - Assert.assertTrue(chainedAlerts[0].triggerId == andTrigger.id) - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(2, monitorsRunResults.size) - - assertEquals(monitor1.name, monitorsRunResults[0].monitorName) - assertEquals(1, monitorsRunResults[0].triggerResults.size) - - Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) - Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) - - Assert.assertEquals( - monitor1.dataSources.alertsHistoryIndex, - CompositeWorkflowRunner.getDelegateMonitorAlertIndex(dataSources = monitor1.dataSources, workflow, true) - ) - val alerts = getAuditStateAlerts( - monitorId = monitorResponse.id, executionId = executeWorkflowResponse.workflowRunResult.executionId, - alertsIndex = monitor1.dataSources.alertsHistoryIndex, - ) - assertAuditStateAlerts(monitorResponse.id, alerts) - assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) - val associatedAlertIds = res.associatedAlerts.map { it.id }.toList() - associatedAlertIds.containsAll(alerts.map { it.id }.toList()) - val alerts1 = getAuditStateAlerts( - alertsIndex = monitor2.dataSources.alertsHistoryIndex, monitorId = monitorResponse2.id, - executionId = executeWorkflowResponse.workflowRunResult.executionId, - ) - assertAuditStateAlerts(monitorResponse2.id, alerts1) - assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) - associatedAlertIds.containsAll(alerts1.map { it.id }.toList()) - verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, numChainedAlerts) - } - - fun `test chained alerts for AND OR and NOT conditions`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex1 = "custom_findings_index" - val customFindingsIndexPattern1 = "custom_findings_index-1" - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1), - dataSources = DataSources( - findingsIndex = customFindingsIndex1, - findingsIndexPattern = customFindingsIndexPattern1 - ) - ) - val monitorResponse = createMonitor(monitor1)!! - - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex2 = "custom_findings_index_2" - val customFindingsIndexPattern2 = "custom_findings_index-2" - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput2), - triggers = listOf(trigger2), - dataSources = DataSources( - findingsIndex = customFindingsIndex2, - findingsIndexPattern = customFindingsIndexPattern2 - ) - ) - - val monitorResponse2 = createMonitor(monitor2)!! - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") - ) - val notTrigger = randomChainedAlertTrigger( - name = "Not1OrNot2", - condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") - ) - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id), - triggers = listOf(andTrigger, notTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - val workflowId = workflowResponse.id - - var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - var triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults - Assert.assertEquals(triggerResults.size, 2) - Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) - Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) - var andTriggerResult = triggerResults[andTrigger.id] - var notTriggerResult = triggerResults[notTrigger.id] - Assert.assertTrue(notTriggerResult!!.triggered) - Assert.assertFalse(andTriggerResult!!.triggered) - var res = getWorkflowAlerts( - workflowId, - ) - var chainedAlerts = res.alerts - Assert.assertTrue(chainedAlerts.size == 1) - - // verify get alerts api with defaults set in query params returns only chained alerts and not audit alerts - val table = Table("asc", "id", null, 1, 0, "") - val getAlertsDefaultParamsResponse = client().execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest( - table = table, - severityLevel = "ALL", - alertState = "ALL", - monitorId = null, - alertIndex = null, - monitorIds = null, - workflowIds = null, - alertIds = null - ) - ).get() - Assert.assertEquals(getAlertsDefaultParamsResponse.alerts.size, 1) - Assert.assertTrue(res.associatedAlerts.isEmpty()) - verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) - Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) - Assert.assertTrue(chainedAlerts[0].monitorId == "") - Assert.assertTrue(chainedAlerts[0].triggerId == notTrigger.id) - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 and monitor2 - val testDoc2 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "2", testDoc2) - - testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Doesn't match - val testDoc3 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16645, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-east-1" - }""" - indexDoc(index, "3", testDoc3) - executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - triggerResults = executeWorkflowResponse.workflowRunResult.triggerResults - Assert.assertEquals(triggerResults.size, 2) - Assert.assertTrue(triggerResults.containsKey(andTrigger.id)) - Assert.assertTrue(triggerResults.containsKey(notTrigger.id)) - andTriggerResult = triggerResults[andTrigger.id] - notTriggerResult = triggerResults[notTrigger.id] - Assert.assertFalse(notTriggerResult!!.triggered) - Assert.assertTrue(andTriggerResult!!.triggered) - val getAuditAlertsForMonitor1 = client().execute( - AlertingActions.GET_ALERTS_ACTION_TYPE, - GetAlertsRequest( - table = table, - severityLevel = "ALL", - alertState = "AUDIT", - monitorId = monitorResponse.id, - alertIndex = null, - monitorIds = null, - workflowIds = listOf(workflowId), - alertIds = null - ) - ).get() - Assert.assertEquals(getAuditAlertsForMonitor1.alerts.size, 1) - res = getWorkflowAlerts(workflowId) - chainedAlerts = res.alerts - Assert.assertTrue(chainedAlerts.size == 1) - Assert.assertTrue(res.associatedAlerts.isNotEmpty()) - Assert.assertTrue(chainedAlerts[0].executionId == executeWorkflowResponse.workflowRunResult.executionId) - Assert.assertTrue(chainedAlerts[0].monitorId == "") - Assert.assertTrue(chainedAlerts[0].triggerId == andTrigger.id) - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(2, monitorsRunResults.size) - - assertEquals(monitor1.name, monitorsRunResults[0].monitorName) - assertEquals(1, monitorsRunResults[0].triggerResults.size) - - Assert.assertEquals(monitor2.name, monitorsRunResults[1].monitorName) - Assert.assertEquals(1, monitorsRunResults[1].triggerResults.size) - - Assert.assertEquals( - monitor1.dataSources.alertsHistoryIndex, - CompositeWorkflowRunner.getDelegateMonitorAlertIndex(dataSources = monitor1.dataSources, workflow, true) - ) - val alerts = getAuditStateAlerts( - alertsIndex = monitor1.dataSources.alertsHistoryIndex, monitorId = monitorResponse.id, - executionId = executeWorkflowResponse.workflowRunResult.executionId - ) - val associatedAlertIds = res.associatedAlerts.map { it.id }.toList() - associatedAlertIds.containsAll(alerts.map { it.id }.toList()) - assertAuditStateAlerts(monitorResponse.id, alerts) - assertFindings(monitorResponse.id, customFindingsIndex1, 2, 2, listOf("1", "2")) - - val alerts1 = getAuditStateAlerts( - alertsIndex = monitor2.dataSources.alertsHistoryIndex, monitorId = monitorResponse2.id, - executionId = executeWorkflowResponse.workflowRunResult.executionId - ) - associatedAlertIds.containsAll(alerts1.map { it.id }.toList()) - assertAuditStateAlerts(monitorResponse2.id, alerts1) - assertFindings(monitorResponse2.id, customFindingsIndex2, 1, 1, listOf("2")) - verifyAcknowledgeChainedAlerts(chainedAlerts, workflowId, 1) - // test redundant executions of workflow dont query old data again to verify metadata updation works fine - val redundantExec = executeWorkflow(workflow) - Assert.assertFalse(redundantExec?.workflowRunResult!!.triggerResults[andTrigger.id]!!.triggered) - Assert.assertTrue(redundantExec.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) - } - - private fun getDelegateMonitorMetadataId( - workflowMetadata: WorkflowMetadata?, - monitorResponse: IndexMonitorResponse, - ) = "${workflowMetadata!!.id}-${monitorResponse.id}-metadata" - - fun `test create workflow success`() { - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitorResponse1 = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) - ) - - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - assertNotNull(workflowResponse.workflow) - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) - assertTrue("incorrect version", workflowResponse.version > 0) - - val workflowById = searchWorkflow(workflowResponse.id)!! - assertNotNull(workflowById) - - // Verify workflow - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) - assertTrue("incorrect version", workflowById.version > 0) - assertEquals("Workflow name not correct", workflow.name, workflowById.name) - assertEquals("Workflow owner not correct", workflow.owner, workflowById.owner) - assertEquals("Workflow input not correct", workflow.inputs, workflowById.inputs) - - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 2, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) - - val delegate2 = delegates[1] - assertNotNull(delegate2) - assertEquals("Delegate2 order not correct", 2, delegate2.order) - assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) - assertEquals( - "Delegate2 Chained finding not correct", monitorResponse1.id, delegate2.chainedMonitorFindings!!.monitorId - ) - } - - fun `test update workflow add monitor success`() { - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitorResponse1 = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) - ) - - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - assertNotNull(workflowResponse.workflow) - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) - assertTrue("incorrect version", workflowResponse.version > 0) - - var workflowById = searchWorkflow(workflowResponse.id)!! - assertNotNull(workflowById) - - val monitor3 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - val monitorResponse3 = createMonitor(monitor3)!! - - val updatedWorkflowResponse = upsertWorkflow( - randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id, monitorResponse3.id) - ), - workflowResponse.id, - RestRequest.Method.PUT - )!! - - assertNotNull("Workflow creation failed", updatedWorkflowResponse) - assertNotNull(updatedWorkflowResponse.workflow) - assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) - assertTrue("incorrect version", updatedWorkflowResponse.version > 0) - - workflowById = searchWorkflow(updatedWorkflowResponse.id)!! - - // Verify workflow - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) - assertTrue("incorrect version", workflowById.version > 0) - assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) - assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) - assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) - - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 3, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) - - val delegate2 = delegates[1] - assertNotNull(delegate2) - assertEquals("Delegate2 order not correct", 2, delegate2.order) - assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) - assertEquals( - "Delegate2 Chained finding not correct", monitorResponse1.id, delegate2.chainedMonitorFindings!!.monitorId - ) - - val delegate3 = delegates[2] - assertNotNull(delegate3) - assertEquals("Delegate3 order not correct", 3, delegate3.order) - assertEquals("Delegate3 id not correct", monitorResponse3.id, delegate3.monitorId) - assertEquals( - "Delegate3 Chained finding not correct", monitorResponse2.id, delegate3.chainedMonitorFindings!!.monitorId - ) - } - - fun `test update workflow change order of delegate monitors`() { - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitorResponse1 = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) - ) - - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - assertNotNull(workflowResponse.workflow) - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) - assertTrue("incorrect version", workflowResponse.version > 0) - - var workflowById = searchWorkflow(workflowResponse.id)!! - assertNotNull(workflowById) - - val updatedWorkflowResponse = upsertWorkflow( - randomWorkflow( - monitorIds = listOf(monitorResponse2.id, monitorResponse1.id) - ), - workflowResponse.id, - RestRequest.Method.PUT - )!! - - assertNotNull("Workflow creation failed", updatedWorkflowResponse) - assertNotNull(updatedWorkflowResponse.workflow) - assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) - assertTrue("incorrect version", updatedWorkflowResponse.version > 0) - - workflowById = searchWorkflow(updatedWorkflowResponse.id)!! - - // Verify workflow - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) - assertTrue("incorrect version", workflowById.version > 0) - assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) - assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) - assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) - - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 2, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", monitorResponse2.id, delegate1.monitorId) - - val delegate2 = delegates[1] - assertNotNull(delegate2) - assertEquals("Delegate2 order not correct", 2, delegate2.order) - assertEquals("Delegate2 id not correct", monitorResponse1.id, delegate2.monitorId) - assertEquals( - "Delegate2 Chained finding not correct", monitorResponse2.id, delegate2.chainedMonitorFindings!!.monitorId - ) - } - - fun `test update workflow remove monitor success`() { - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitorResponse1 = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) - ) - - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - assertNotNull(workflowResponse.workflow) - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) - assertTrue("incorrect version", workflowResponse.version > 0) - - var workflowById = searchWorkflow(workflowResponse.id)!! - assertNotNull(workflowById) - - val updatedWorkflowResponse = upsertWorkflow( - randomWorkflow( - monitorIds = listOf(monitorResponse1.id) - ), - workflowResponse.id, - RestRequest.Method.PUT - )!! - - assertNotNull("Workflow creation failed", updatedWorkflowResponse) - assertNotNull(updatedWorkflowResponse.workflow) - assertEquals("Workflow id changed", workflowResponse.id, updatedWorkflowResponse.id) - assertTrue("incorrect version", updatedWorkflowResponse.version > 0) - - workflowById = searchWorkflow(updatedWorkflowResponse.id)!! - - // Verify workflow - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) - assertTrue("incorrect version", workflowById.version > 0) - assertEquals("Workflow name not correct", updatedWorkflowResponse.workflow.name, workflowById.name) - assertEquals("Workflow owner not correct", updatedWorkflowResponse.workflow.owner, workflowById.owner) - assertEquals("Workflow input not correct", updatedWorkflowResponse.workflow.inputs, workflowById.inputs) - - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 1, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", monitorResponse1.id, delegate1.monitorId) - } - - fun `test update workflow doesn't exist failure`() { - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitorResponse1 = createMonitor(monitor1)!! - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - - try { - upsertWorkflow(workflow, "testId", RestRequest.Method.PUT) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow with testId is not found") - ) - } - } - } - - fun `test get workflow`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - ) - - val monitorResponse = createMonitor(monitor)!! - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - - val workflowResponse = upsertWorkflow(workflowRequest)!! - assertNotNull("Workflow creation failed", workflowResponse) - assertNotNull(workflowResponse.workflow) - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) - assertTrue("incorrect version", workflowResponse.version > 0) - - val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) - assertNotNull(getWorkflowResponse) - - val workflowById = getWorkflowResponse.workflow!! - // Verify workflow - assertNotEquals("response is missing Id", Monitor.NO_ID, getWorkflowResponse.id) - assertTrue("incorrect version", getWorkflowResponse.version > 0) - assertEquals("Workflow name not correct", workflowRequest.name, workflowById.name) - assertEquals("Workflow owner not correct", workflowRequest.owner, workflowById.owner) - assertEquals("Workflow input not correct", workflowRequest.inputs, workflowById.inputs) - - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 1, delegates.size) - - val delegate = delegates[0] - assertNotNull(delegate) - assertEquals("Delegate order not correct", 1, delegate.order) - assertEquals("Delegate id not correct", monitorResponse.id, delegate.monitorId) - } - - fun `test get workflow for invalid id monitor index doesn't exist`() { - // Get workflow for non existing workflow id - try { - getWorkflowById(id = "-1") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found") - ) - } - } - } - - fun `test get workflow for invalid id monitor index exists`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - ) - createMonitor(monitor) - // Get workflow for non existing workflow id - try { - getWorkflowById(id = "-1") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found") - ) - } - } - } - - fun `test delete workflow keeping delegate monitor`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor)!! - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflowRequest)!! - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - deleteWorkflow(workflowId, false) - // Verify that the workflow is deleted - try { - getWorkflowById(workflowId) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - // Verify that the monitor is not deleted - val existingDelegate = getMonitorResponse(monitorResponse.id) - assertNotNull(existingDelegate) - } - - fun `test delete workflow delegate monitor deleted`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor)!! - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflowRequest)!! - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - deleteWorkflow(workflowId, true) - // Verify that the workflow is deleted - try { - getWorkflowById(workflowId) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - // Verify that the monitor is deleted - try { - getMonitorResponse(monitorResponse.id) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetMonitor Action error ", - it.contains("Monitor not found") - ) - } - } - } - - fun `test delete executed workflow with metadata deleted`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - val monitorResponse = createMonitor(monitor1)!! - - val docQuery2 = DocLevelQuery(query = "source.ip.v6.v2:16645", name = "4", fields = listOf()) - val docLevelInput2 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery2)) - val trigger2 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput2), - triggers = listOf(trigger2), - ) - - val monitorResponse2 = createMonitor(monitor2)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - var testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(ChronoUnit.MILLIS)) - // Matches monitor1 - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - - val workflowId = workflowResponse.id - val executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - val monitorsRunResults = executeWorkflowResponse.workflowRunResult.monitorRunResults - assertEquals(2, monitorsRunResults.size) - - val workflowMetadata = searchWorkflowMetadata(workflowId) - assertNotNull(workflowMetadata) - - val monitorMetadataId1 = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse) - val monitorMetadata1 = searchMonitorMetadata(monitorMetadataId1) - assertNotNull(monitorMetadata1) - - val monitorMetadataId2 = getDelegateMonitorMetadataId(workflowMetadata, monitorResponse2) - val monitorMetadata2 = searchMonitorMetadata(monitorMetadataId2) - assertNotNull(monitorMetadata2) - - assertFalse(monitorMetadata1!!.id == monitorMetadata2!!.id) - - deleteWorkflow(workflowId, true) - // Verify that the workflow is deleted - try { - getWorkflowById(workflowId) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - // Verify that the workflow metadata is deleted - try { - searchWorkflowMetadata(workflowId) - fail("expected searchWorkflowMetadata method to throw exception") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetMonitor Action error ", - it.contains("List is empty") - ) - } - } - // Verify that the monitors metadata are deleted - try { - searchMonitorMetadata(monitorMetadataId1) - fail("expected searchMonitorMetadata method to throw exception") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetMonitor Action error ", - it.contains("List is empty") - ) - } - } - - try { - searchMonitorMetadata(monitorMetadataId2) - fail("expected searchMonitorMetadata method to throw exception") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetMonitor Action error ", - it.contains("List is empty") - ) - } - } - } - - fun `test delete workflow delegate monitor part of another workflow not deleted`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor)!! - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflowRequest)!! - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - val workflowRequest2 = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse2 = upsertWorkflow(workflowRequest2)!! - val workflowId2 = workflowResponse2.id - val getWorkflowResponse2 = getWorkflowById(id = workflowResponse2.id) - - assertNotNull(getWorkflowResponse2) - assertEquals(workflowId2, getWorkflowResponse2.id) - - try { - deleteWorkflow(workflowId, true) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("[Not allowed to delete ${monitorResponse.id} monitors") - ) - } - } - val existingMonitor = getMonitorResponse(monitorResponse.id) - assertNotNull(existingMonitor) - } - - fun `test trying to delete monitor that is part of workflow sequence`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor)!! - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - - val workflowResponse = upsertWorkflow(workflowRequest)!! - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflowById(id = workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - // Verify that the monitor can't be deleted because it's included in the workflow - try { - deleteMonitor(monitorResponse.id) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning DeleteMonitor Action error ", - it.contains("Monitor can't be deleted because it is a part of workflow(s)") - ) - } - } - } - - fun `test delete workflow for invalid id monitor index doesn't exists`() { - // Try deleting non-existing workflow - try { - deleteWorkflow("-1") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning DeleteWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - } - - fun `test delete workflow for invalid id monitor index exists`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - ) - createMonitor(monitor) - // Try deleting non-existing workflow - try { - deleteWorkflow("-1") - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning DeleteWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - } - - fun `test create workflow without delegate failure`() { - val workflow = randomWorkflow( - monitorIds = Collections.emptyList() - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Delegates list can not be empty.") - ) - } - } - } - - fun `test create workflow with 26 delegates failure`() { - val monitorsIds = mutableListOf() - for (i in 0..25) { - monitorsIds.add(UUID.randomUUID().toString()) - } - val workflow = randomWorkflow( - monitorIds = monitorsIds - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Delegates list can not be larger then 25.") - ) - } - } - } - - fun `test update workflow without delegate failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - ) - - val monitorResponse1 = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) - ) - - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - - workflow = randomWorkflow( - id = workflowResponse.id, - monitorIds = Collections.emptyList() - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Delegates list can not be empty.") - ) - } - } - } - - fun `test create workflow duplicate delegate failure`() { - val workflow = randomWorkflow( - monitorIds = listOf("1", "1", "2") - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Duplicate delegates not allowed") - ) - } - } - } - - fun `test update workflow duplicate delegate failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - - workflow = randomWorkflow( - id = workflowResponse.id, - monitorIds = listOf("1", "1", "2") - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Duplicate delegates not allowed") - ) - } - } - } - - fun `test create workflow delegate monitor doesn't exist failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor)!! - - val workflow = randomWorkflow( - monitorIds = listOf("-1", monitorResponse.id) - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("are not valid monitor ids") - ) - } - } - } - - fun `test update workflow delegate monitor doesn't exist failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - - workflow = randomWorkflow( - id = workflowResponse.id, - monitorIds = listOf("-1", monitorResponse.id) - ) - - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("are not valid monitor ids") - ) - } - } - } - - fun `test create workflow sequence order not correct failure`() { - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(1, "monitor-2"), - Delegate(2, "monitor-3") - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") - ) - } - } - } - - fun `test update workflow sequence order not correct failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(1, "monitor-2"), - Delegate(2, "monitor-3") - ) - workflow = randomWorkflowWithDelegates( - id = workflowResponse.id, - delegates = delegates - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") - ) - } - } - } - - fun `test create workflow chained findings monitor not in sequence failure`() { - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") - ) - } - } - } - - fun `test create workflow query monitor chained findings monitor failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val docMonitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val docMonitorResponse = createMonitor(docMonitor)!! - - val queryMonitor = randomQueryLevelMonitor() - val queryMonitorResponse = createMonitor(queryMonitor)!! - - val workflow = randomWorkflow( - monitorIds = listOf(queryMonitorResponse.id, docMonitorResponse.id) - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Query level monitor can't be part of chained findings") - ) - } - } - } - - fun `test create workflow delegate and chained finding monitor different indices failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val docMonitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val docMonitorResponse = createMonitor(docMonitor)!! - - val index1 = "$index-1" - createTestIndex(index1) - - val docLevelInput1 = DocLevelMonitorInput( - "description", listOf(index1), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - - val docMonitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger) - ) - val docMonitorResponse1 = createMonitor(docMonitor1)!! - - val workflow = randomWorkflow( - monitorIds = listOf(docMonitorResponse1.id, docMonitorResponse.id) - ) - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("doesn't query all of chained findings monitor's indices") - ) - } - } - } - - fun `test create workflow when monitor index not initialized failure`() { - val delegates = listOf( - Delegate(1, "monitor-1") - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Monitors not found") - ) - } - } - } - - fun `test update workflow chained findings monitor not in sequence failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) - ) - workflow = randomWorkflowWithDelegates( - id = workflowResponse.id, - delegates = delegates - ) - - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") - ) - } - } - } - - fun `test create workflow chained findings order not correct failure`() { - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") - ) - } - } - } - - fun `test update workflow chained findings order not correct failure`() { - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor)!! - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = upsertWorkflow(workflow)!! - assertNotNull("Workflow creation failed", workflowResponse) - - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) - ) - workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - - try { - upsertWorkflow(workflow) - } catch (e: Exception) { - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") - ) - } - } - } - - fun `test create workflow with chained alert triggers`() { - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val customFindingsIndex = "custom_findings_index" - val customFindingsIndexPattern = "custom_findings_index-1" - val customQueryIndex = "custom_alerts_index" - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - dataSources = DataSources( - queryIndex = customQueryIndex, - findingsIndex = customFindingsIndex, - findingsIndexPattern = customFindingsIndexPattern - ) - ) - - val monitorResponse1 = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val chainedAlertTrigger1 = randomChainedAlertTrigger( - condition = Script("monitor[id=${monitorResponse1.id}] && monitor[id=${monitorResponse2.id}") - ) - val chainedAlertTrigger2 = randomChainedAlertTrigger( - condition = Script("monitor[id=${monitorResponse1.id}] || monitor[id=${monitorResponse2.id}]") - ) - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id), - triggers = listOf( - chainedAlertTrigger1, - chainedAlertTrigger2 - ) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id)!! - - assertEquals("Workflow input not correct", workflowById.triggers.size, 2) - assertEquals("Workflow input not correct", workflowById.triggers.get(0).name, chainedAlertTrigger1.name) - assertEquals("Workflow input not correct", workflowById.triggers.get(1).name, chainedAlertTrigger2.name) - assertEquals("Workflow input not correct", workflowById.triggers.get(0).id, chainedAlertTrigger1.id) - assertEquals("Workflow input not correct", workflowById.triggers.get(1).id, chainedAlertTrigger2.id) - assertEquals( - "Workflow input not correct", - (workflowById.triggers.get(0) as ChainedAlertTrigger).condition.idOrCode, - chainedAlertTrigger1.condition.idOrCode - ) - assertEquals( - "Workflow input not correct", - (workflowById.triggers.get(1) as ChainedAlertTrigger).condition.idOrCode, - chainedAlertTrigger2.condition.idOrCode - ) - } - - fun `test postIndex on workflow update with trigger deletion`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - val monitorResponse = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") - ) - val notTrigger = randomChainedAlertTrigger( - name = "Not1OrNot2", - condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") - ) - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id), - triggers = listOf(andTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - val workflowId = workflowById!!.id - var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - var res = getWorkflowAlerts( - workflowId, - ) - var chainedAlerts = res.alerts - Assert.assertTrue(chainedAlerts.size == 1) - val updatedWorkflowResponse = upsertWorkflow( - workflowById.copy(triggers = listOf(notTrigger)), - workflowResponse.id, - RestRequest.Method.PUT - )!! - val updatedWorkflow = searchWorkflow(workflowResponse.id) - Assert.assertTrue(updatedWorkflow!!.triggers.size == 1) - Assert.assertTrue(updatedWorkflow.triggers[0].id == notTrigger.id) - OpenSearchTestCase.waitUntil({ - val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) - val sr = client().search(searchRequest).get() - sr.hits.hits.size == 3 - }, 5, TimeUnit.MINUTES) - val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) - val sr = client().search(searchRequest).get() - Assert.assertTrue(sr.hits.hits.size == 3) - val alerts = sr.hits.map { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry(), - LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alert - } - Assert.assertTrue(alerts.stream().anyMatch { it.state == Alert.State.DELETED && chainedAlerts[0].id == it.id }) - } - - fun `test postDelete on workflow deletion`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - val monitorResponse = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") - ) - val notTrigger = randomChainedAlertTrigger( - name = "Not1OrNot2", - condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") - ) - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id), - triggers = listOf(andTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - val workflowId = workflowById!!.id - var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - var res = getWorkflowAlerts( - workflowId, - ) - var chainedAlerts = res.alerts - Assert.assertTrue(chainedAlerts.size == 1) - val deleteRes = deleteWorkflow(workflowId, false) - logger.info(deleteRes) - OpenSearchTestCase.waitUntil({ - val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) - val sr = client().search(searchRequest).get() - sr.hits.hits.size == 3 - }, 5, TimeUnit.MINUTES) - val searchRequest = SearchRequest(AlertIndices.ALERT_HISTORY_ALL) - val sr = client().search(searchRequest).get() - Assert.assertTrue(sr.hits.hits.size == 3) - val alerts = sr.hits.map { hit -> - val xcp = XContentHelper.createParser( - xContentRegistry(), - LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, - XContentType.JSON - ) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - val alert = Alert.parse(xcp, hit.id, hit.version) - alert - } - Assert.assertTrue(alerts.stream().anyMatch { it.state == Alert.State.DELETED && chainedAlerts[0].id == it.id }) - } - - fun `test get chained alerts with alertId paginating for associated alerts`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - val monitorResponse = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") - ) - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id), - triggers = listOf(andTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - val workflowId = workflowById!!.id - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - var i = 1 - val indexRequests = mutableListOf() - while (i++ < 300) { - indexRequests += IndexRequest(index).source(testDoc1, XContentType.JSON).id("$i").opType(DocWriteRequest.OpType.INDEX) - } - val bulkResponse: BulkResponse = - client().bulk(BulkRequest().add(indexRequests).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)).get() - if (bulkResponse.hasFailures()) { - fail("Bulk request to index to test index has failed") - } - var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - var res = getWorkflowAlerts( - workflowId = workflowId - ) - Assert.assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[andTrigger.id]!!.triggered) - var chainedAlerts = res.alerts - Assert.assertTrue(chainedAlerts.size == 1) - Assert.assertEquals(res.associatedAlerts.size, 10) - var res100to200 = getWorkflowAlerts( - workflowId = workflowId, - alertIds = listOf(res.alerts[0].id), - table = Table("asc", "monitor_id", null, 100, 100, null) - ) - Assert.assertEquals(res100to200.associatedAlerts.size, 100) - var res200to300 = getWorkflowAlerts( - workflowId = workflowId, - alertIds = listOf(res.alerts[0].id), - table = Table("asc", "monitor_id", null, 100, 201, null) - ) - Assert.assertEquals(res200to300.associatedAlerts.size, 100) - var res0to99 = getWorkflowAlerts( - workflowId = workflowId, - alertIds = listOf(res.alerts[0].id), - table = Table("asc", "monitor_id", null, 100, 0, null) - ) - Assert.assertEquals(res0to99.associatedAlerts.size, 100) - - val ids100to200 = res100to200.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) - val idsSet0to99 = res0to99.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) - val idsSet200to300 = res200to300.associatedAlerts.stream().map { it.id }.collect(Collectors.toSet()) - - Assert.assertTrue(idsSet0to99.all { it !in ids100to200 }) - Assert.assertTrue(idsSet0to99.all { it !in idsSet200to300 }) - Assert.assertTrue(ids100to200.all { it !in idsSet200to300 }) - } - - fun `test existing chained alert active alert is updated on consequtive trigger condition match`() { - val docQuery1 = DocLevelQuery(query = "test_field_1:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - var monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1) - ) - val monitorResponse = createMonitor(monitor1)!! - val monitorResponse2 = createMonitor(monitor2)!! - val notTrigger = randomChainedAlertTrigger( - name = "Not1OrNot2", - condition = Script("!monitor[id=${monitorResponse.id}] || !monitor[id=${monitorResponse2.id}]") - ) - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id), - triggers = listOf(notTrigger) - ) - val workflowResponse = upsertWorkflow(workflow)!! - val workflowById = searchWorkflow(workflowResponse.id) - val workflowId = workflowById!!.id - - /** no ACTIVE alert exists and chained alert trigger matches. Expect: new ACTIVE alert created**/ - var executeWorkflowResponse = executeWorkflow(workflowById, workflowId, false)!! - assertTrue(executeWorkflowResponse.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) - val workflowAlerts = getWorkflowAlerts(workflowId) - Assert.assertTrue(workflowAlerts.alerts.size == 1) - Assert.assertEquals(workflowAlerts.alerts[0].state, Alert.State.ACTIVE) - /** ACTIVE alert exists and chained alert trigger matched again. Expect: existing alert updated and remains in ACTIVE*/ - var executeWorkflowResponse1 = executeWorkflow(workflowById, workflowId, false)!! - assertTrue(executeWorkflowResponse1.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) - val udpdatedActiveAlerts = getWorkflowAlerts(workflowId) - Assert.assertTrue(udpdatedActiveAlerts.alerts.size == 1) - Assert.assertEquals(udpdatedActiveAlerts.alerts[0].state, Alert.State.ACTIVE) - Assert.assertTrue(udpdatedActiveAlerts.alerts[0].lastNotificationTime!! > workflowAlerts.alerts[0].lastNotificationTime!!) - - /** Acknowledge ACTIVE alert*/ - val ackChainedAlerts = ackChainedAlerts(udpdatedActiveAlerts.alerts.stream().map { it.id }.collect(Collectors.toList()), workflowId) - Assert.assertTrue(ackChainedAlerts.acknowledged.size == 1) - Assert.assertTrue(ackChainedAlerts.missing.size == 0) - Assert.assertTrue(ackChainedAlerts.failed.size == 0) - - /** ACKNOWLEDGED alert exists and chained alert trigger matched again. Expect: existing alert updated and remains ACKNOWLEDGED*/ - var executeWorkflowResponse2 = executeWorkflow(workflowById, workflowId, false)!! - assertTrue(executeWorkflowResponse2.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) - val acknowledgedAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.ACKNOWLEDGED) - Assert.assertTrue(acknowledgedAlert.alerts.size == 1) - Assert.assertEquals(acknowledgedAlert.alerts[0].state, Alert.State.ACKNOWLEDGED) - Assert.assertTrue(acknowledgedAlert.alerts[0].lastNotificationTime!! == udpdatedActiveAlerts.alerts[0].lastNotificationTime!!) - - /** ACKNOWLEDGED alert exists and chained alert trigger NOT matched. Expect: ACKNOWLEDGD alert marked as COMPLETED**/ - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(ZonedDateTime.now().truncatedTo(MILLIS)) - val testDoc1 = """{ - "message" : "This is an error from IAD region", - "source.ip.v6.v2" : 16644, - "test_strict_date_time" : "$testTime", - "test_field_1" : "us-west-2" - }""" - indexDoc(index, "1", testDoc1) - var executeWorkflowResponse3 = executeWorkflow(workflowById, workflowId, false)!! - assertFalse(executeWorkflowResponse3.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) - val completedAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.COMPLETED) - Assert.assertTrue(completedAlert.alerts.size == 1) - Assert.assertEquals(completedAlert.alerts[0].state, Alert.State.COMPLETED) - Assert.assertTrue(completedAlert.alerts[0].endTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) - - /** COMPLETED state alert exists and trigger matches. Expect: new ACTIVE state chaiend alert created*/ - var executeWorkflowResponse4 = executeWorkflow(workflowById, workflowId, false)!! - assertTrue(executeWorkflowResponse4.workflowRunResult.triggerResults[notTrigger.id]!!.triggered) - val newActiveAlert = getWorkflowAlerts(workflowId, alertState = Alert.State.ACTIVE) - Assert.assertTrue(newActiveAlert.alerts.size == 1) - Assert.assertEquals(newActiveAlert.alerts[0].state, Alert.State.ACTIVE) - Assert.assertTrue(newActiveAlert.alerts[0].lastNotificationTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) - val completedAlert1 = getWorkflowAlerts(workflowId, alertState = Alert.State.COMPLETED) - Assert.assertTrue(completedAlert1.alerts.size == 1) - Assert.assertEquals(completedAlert1.alerts[0].state, Alert.State.COMPLETED) - Assert.assertTrue(completedAlert1.alerts[0].endTime!! > acknowledgedAlert.alerts[0].lastNotificationTime!!) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt b/alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt deleted file mode 100644 index 72b7c0423..000000000 --- a/alerting/bin/test/org/opensearch/alerting/MonitorRunnerServiceIT.kt +++ /dev/null @@ -1,2093 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.junit.Assert -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.destination.CustomWebhook -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.email.Email -import org.opensearch.alerting.model.destination.email.Recipient -import org.opensearch.alerting.util.DestinationType -import org.opensearch.alerting.util.getBucketKeysHash -import org.opensearch.client.ResponseException -import org.opensearch.client.WarningFailureException -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.alerts.AlertError -import org.opensearch.commons.alerting.model.ActionExecutionResult -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.Alert.State -import org.opensearch.commons.alerting.model.Alert.State.ACKNOWLEDGED -import org.opensearch.commons.alerting.model.Alert.State.ACTIVE -import org.opensearch.commons.alerting.model.Alert.State.COMPLETED -import org.opensearch.commons.alerting.model.Alert.State.ERROR -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.IntervalSchedule -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy -import org.opensearch.commons.alerting.model.action.AlertCategory -import org.opensearch.commons.alerting.model.action.PerAlertActionScope -import org.opensearch.commons.alerting.model.action.PerExecutionActionScope -import org.opensearch.commons.alerting.model.action.Throttle -import org.opensearch.commons.authuser.User -import org.opensearch.core.rest.RestStatus -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder -import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder -import org.opensearch.search.aggregations.bucket.terms.MultiTermsAggregationBuilder -import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder -import org.opensearch.search.aggregations.metrics.CardinalityAggregationBuilder -import org.opensearch.search.aggregations.support.MultiTermsValuesSourceConfig -import org.opensearch.search.builder.SearchSourceBuilder -import java.net.URLEncoder -import java.time.Instant -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter -import java.time.temporal.ChronoUnit -import java.time.temporal.ChronoUnit.DAYS -import java.time.temporal.ChronoUnit.MILLIS -import java.time.temporal.ChronoUnit.MINUTES -import kotlin.collections.HashMap - -class MonitorRunnerServiceIT : AlertingRestTestCase() { - - fun `test execute monitor with dryrun`() { - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val monitor = randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - for (actionResult in triggerResult.objectMap("action_results").values) { - @Suppress("UNCHECKED_CAST") val actionOutput = actionResult["output"] as Map - assertEquals("Hello ${monitor.name}", actionOutput["subject"]) - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert saved for test monitor", 0, alerts.size) - } - - fun `test execute monitor returns search result`() { - val testIndex = createTestIndex() - val twoMinsAgo = ZonedDateTime.now().minus(2, MINUTES).truncatedTo(MILLIS) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(twoMinsAgo) - val testDoc = """{ "test_strict_date_time" : "$testTime" }""" - indexDoc(testIndex, "1", testDoc) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(query)) - val triggerScript = """ - // make sure there is exactly one hit - return ctx.results[0].hits.hits.size() == 1 - """.trimIndent() - - val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)) - val monitor = randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 1, total["value"]) - } - - fun `test execute monitor not triggered`() { - val monitor = randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN))) - - val response = executeMonitor(monitor) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - assertTrue("Unexpected trigger was run", triggerResult.objectMap("action_results").isEmpty()) - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert saved for test monitor", 0, alerts.size) - } - - fun `test active alert is updated on each run`() { - val monitor = createMonitor( - randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, destinationId = createDestination().id)) - ) - ) - - executeMonitor(monitor.id) - val firstRunAlert = searchAlerts(monitor).single() - verifyAlert(firstRunAlert, monitor) - // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to - // see lastNotificationTime change. - Thread.sleep(200) - executeMonitor(monitor.id) - val secondRunAlert = searchAlerts(monitor).single() - verifyAlert(secondRunAlert, monitor) - - assertEquals("New alert was created, instead of updating existing alert.", firstRunAlert.id, secondRunAlert.id) - assertEquals("Start time shouldn't change", firstRunAlert.startTime, secondRunAlert.startTime) - assertNotEquals( - "Last notification should be different.", - firstRunAlert.lastNotificationTime, secondRunAlert.lastNotificationTime - ) - } - - fun `test execute monitor input error`() { - // use a non-existent index to trigger an input error - createIndex("foo", Settings.EMPTY) - val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) - val monitor = createMonitor( - randomQueryLevelMonitor( - inputs = listOf(input), - triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN)) - ) - ) - - deleteIndex("foo") - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val inputResults = output.stringMap("input_results") - assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ERROR) - } - - fun `test execute monitor wrong monitorid`() { - // use a non-existent monitoid to trigger a 404. - createIndex("foo", Settings.EMPTY) - val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) - val monitor = createMonitor( - randomQueryLevelMonitor( - inputs = listOf(input), - triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN)) - ) - ) - - var exception: ResponseException? = null - try { - executeMonitor(monitor.id + "bad") - } catch (ex: ResponseException) { - exception = ex - } - Assert.assertEquals(404, exception?.response?.statusLine?.statusCode) - } - - fun `test execute doclevel monitor without triggers success`() { - // use a non-existent monitoid to trigger a 404. - val index = "foo" - createIndex(index, Settings.EMPTY) - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "1", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(index), listOf(docQuery)) - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf() - ) - ) - val doc = """ - { "test_field": "us-west-2" } - """.trimIndent() - indexDoc(index, "1", doc) - - val response = executeMonitor(monitor.id) - var output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - assertTrue("Unexpected monitor error message", (output["error"] as String?).isNullOrEmpty()) - assertTrue(searchFindings(monitor).size == 1) - assertTrue(searchAlerts(monitor).isEmpty()) - } - - fun `test acknowledged alert does not suppress subsequent errors`() { - val destinationId = createDestination().id - - createIndex("foo", Settings.EMPTY) - val input = SearchInput(indices = listOf("foo"), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) - val monitor = createMonitor( - randomQueryLevelMonitor( - inputs = listOf(input), - triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, destinationId = destinationId)) - ) - ) - - var response = executeMonitor(monitor.id) - - var output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - assertTrue("Unexpected monitor error message", (output["error"] as String?).isNullOrEmpty()) - val activeAlert = searchAlerts(monitor).single() - verifyAlert(activeAlert, monitor) - - // Now acknowledge the alert and delete the index to cause the next run of the monitor to fail... - acknowledgeAlerts(monitor, activeAlert) - deleteIndex("foo") - response = executeMonitor(monitor.id) - - output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - val errorAlert = searchAlerts(monitor).single { it.state == ERROR } - verifyAlert(errorAlert, monitor, ERROR) - } - - fun `test acknowledged alert is not updated unnecessarily`() { - val monitor = createMonitor( - randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, destinationId = createDestination().id)) - ) - ) - executeMonitor(monitor.id) - acknowledgeAlerts(monitor, searchAlerts(monitor).single()) - val acknowledgedAlert = searchAlerts(monitor).single() - verifyAlert(acknowledgedAlert, monitor, ACKNOWLEDGED) - - // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to - // let lastNotificationTime change. W/o this sleep the test can result in a false negative. - Thread.sleep(200) - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - val currentAlert = searchAlerts(monitor).single() - assertEquals("Acknowledged alert was updated when nothing changed", currentAlert, acknowledgedAlert) - for (triggerResult in output.objectMap("trigger_results").values) { - assertTrue("Action run when alert is acknowledged.", triggerResult.objectMap("action_results").isEmpty()) - } - } - - fun `test alert completion`() { - val trigger = randomQueryLevelTrigger(condition = Script("ctx.alert == null"), destinationId = createDestination().id) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - - executeMonitor(monitor.id) - val activeAlert = searchAlerts(monitor).single() - verifyAlert(activeAlert, monitor) - - executeMonitor(monitor.id) - assertTrue("There's still an active alert", searchAlerts(monitor, AlertIndices.ALERT_INDEX).isEmpty()) - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() - verifyAlert(completedAlert, monitor, COMPLETED) - } - - fun `test execute monitor script error`() { - // This painless script should cause a syntax error - val trigger = randomQueryLevelTrigger(condition = Script("foo bar baz")) - val monitor = randomQueryLevelMonitor(triggers = listOf(trigger)) - - val response = executeMonitor(monitor) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - assertTrue("Missing trigger error message", (triggerResult["error"] as String).isNotEmpty()) - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert saved for test monitor", 0, alerts.size) - } - - fun `test execute action template error`() { - // Intentional syntax error in mustache template - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) - val monitor = randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))) - ) - - val response = executeMonitor(monitor) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - for (actionResult in triggerResult.objectMap("action_results").values) { - assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) - } - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert saved for test monitor", 0, alerts.size) - } - - fun `test execute monitor search with period`() { - // We cant query .opendistro-alerting-config as its system index. Create a test index starting with "." - val testIndex = createTestConfigIndex() - val fiveDaysAgo = ZonedDateTime.now().minus(5, DAYS).truncatedTo(MILLIS) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(fiveDaysAgo) - val testDoc = """{ "test_strict_date_time" : "$testTime" }""" - indexDoc(testIndex, "1", testDoc) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val input = SearchInput(indices = listOf(".*"), query = SearchSourceBuilder().query(query)) - val triggerScript = """ - // make sure there is at least one monitor - return ctx.results[0].hits.hits.size() > 0 - """.trimIndent() - val destinationId = createDestination().id - val trigger = randomQueryLevelTrigger(condition = Script(triggerScript), destinationId = destinationId) - val monitor = createMonitor(randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger))) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - val triggerResult = output.objectMap("trigger_results").objectMap(trigger.id) - assertEquals(true, triggerResult["triggered"].toString().toBoolean()) - assertTrue("Unexpected trigger error message", triggerResult["error"]?.toString().isNullOrEmpty()) - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor) - } - - fun `test execute monitor search with period date math`() { - val testIndex = createTestIndex() - val fiveDaysAgo = ZonedDateTime.now().minus(5, DAYS).truncatedTo(MILLIS) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(fiveDaysAgo) - val testDoc = """{ "test_strict_date_time" : "$testTime" }""" - indexDoc(testIndex, "1", testDoc) - - // Queries that use period_start/end should expect these values to always be formatted as 'epoch_millis'. Either - // the query should specify the format (like below) or the mapping for the index/field being queried should allow - // epoch_millis as an alternative (OpenSearch's default mapping for date fields "strict_date_optional_time||epoch_millis") - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(query)) - val triggerScript = """ - // make sure there is exactly one hit - return ctx.results[0].hits.hits.size() == 1 - """.trimIndent() - val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)) - val monitor = randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) - - val response = executeMonitor(monitor) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - val triggerResult = output.objectMap("trigger_results").objectMap(trigger.id) - assertEquals(true, triggerResult["triggered"].toString().toBoolean()) - assertTrue("Unexpected trigger error message", triggerResult["error"]?.toString().isNullOrEmpty()) - assertNotEquals("period incorrect", output["period_start"], output["period_end"]) - - // Don't expect any alerts for this monitor as it has not been saved - val alerts = searchAlerts(monitor) - assertEquals("Alert saved for test monitor", 0, alerts.size) - } - - fun `test execute monitor search with date math`() { - // Give the index name in the date math format. - val testIndex = "" - // Add percent encoding for the http client to resolve the format. - val encodedTestIndex = createTestIndex( - URLEncoder.encode(testIndex, "utf-8") - ) - - val fiveDaysAgo = ZonedDateTime.now().minus(5, DAYS).truncatedTo(MILLIS) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(fiveDaysAgo) - val testDoc = """{ "test_strict_date_time" : "$testTime" }""" - indexDoc(encodedTestIndex, "1", testDoc) - - // Queries that use period_start/end should expect these values to always be formatted as 'epoch_millis'. Either - // the query should specify the format (like below) or the mapping for the index/field being queried should allow - // epoch_millis as an alternative (OpenSearch's default mapping for date fields "strict_date_optional_time||epoch_millis") - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().query(query)) - val triggerScript = """ - // make sure there is exactly one hit - return ctx.results[0].hits.hits.size() == 1 - """.trimIndent() - val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)) - val monitor = randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) - - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 1, total["value"]) - } - - fun `test monitor with one bad action and one good action`() { - val goodAction = randomAction( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id - ) - val syntaxErrorAction = randomAction( - name = "bad syntax", - template = randomTemplateScript("{{foo"), - destinationId = createDestination().id - ) - val actions = listOf(goodAction, syntaxErrorAction) - val monitor = createMonitor( - randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = actions))) - ) - - val output = entityAsMap(executeMonitor(monitor.id)) - - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - for (actionResult in triggerResult.objectMap("action_results").values) { - @Suppress("UNCHECKED_CAST") val actionOutput = actionResult["output"] as Map - if (actionResult["name"] == goodAction.name) { - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } else if (actionResult["name"] == syntaxErrorAction.name) { - assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) - } else { - fail("Unknown action: ${actionResult["name"]}") - } - } - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ERROR) - } - - fun `test execute monitor adds to alert error history`() { - putAlertMappings() // Required as we do not have a create alert API. - // This template script has a parsing error to purposefully create an errorMessage during runMonitor - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - val listOfFiveErrorMessages = (1..5).map { i -> AlertError(timestamp = Instant.now(), message = "error message $i") } - val activeAlert = createAlert( - randomAlert(monitor).copy( - state = ACTIVE, errorHistory = listOfFiveErrorMessages, - triggerId = trigger.id, triggerName = trigger.name, severity = trigger.severity - ) - ) - - val response = executeMonitor(monitor.id) - - val updatedAlert = searchAlerts(monitor).single() - assertEquals("Existing active alert was not updated", activeAlert.id, updatedAlert.id) - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - for (actionResult in triggerResult.objectMap("action_results").values) { - assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) - } - } - assertEquals("Wrong number of error messages in history", 6, updatedAlert.errorHistory.size) - } - - fun `test latest error is not lost when alert is completed`() { - // Creates an active alert the first time it's run and completes it the second time the monitor is run. - val trigger = randomQueryLevelTrigger( - condition = Script( - """ - if (ctx.alert == null) { - throw new RuntimeException("foo"); - } else { - return false; - } - """.trimIndent() - ) - ) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - - executeMonitor(monitor.id) - val errorAlert = searchAlerts(monitor).single() - verifyAlert(errorAlert, monitor, ERROR) - executeMonitor(monitor.id) - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() - verifyAlert(completedAlert, monitor, COMPLETED) - - assertNull("Completed alert still has error message.", completedAlert.errorMessage) - assertTrue("Missing error history.", completedAlert.errorHistory.isNotEmpty()) - val latestError = completedAlert.errorHistory.single().message - assertTrue("Latest error is missing from history.", latestError.contains("RuntimeException(\"foo\")")) - } - - fun `test throw script exception`() { - // Creates an active alert the first time it's run and completes it the second time the monitor is run. - val trigger = randomQueryLevelTrigger( - condition = Script( - """ - param[0]; return true - """.trimIndent() - ) - ) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - - executeMonitor(monitor.id) - val errorAlert = searchAlerts(monitor).single() - verifyAlert(errorAlert, monitor, ERROR) - executeMonitor(monitor.id) - assertEquals( - "Error does not match", - "Failed evaluating trigger:\nparam[0]; return true\n ^---- HERE", errorAlert.errorMessage - ) - } - - fun `test execute monitor limits alert error history to 10 error messages`() { - putAlertMappings() // Required as we do not have a create alert API. - // This template script has a parsing error to purposefully create an errorMessage during runMonitor - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name")) - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - val listOfTenErrorMessages = (1..10).map { i -> AlertError(timestamp = Instant.now(), message = "error message $i") } - val activeAlert = createAlert( - randomAlert(monitor).copy( - state = ACTIVE, errorHistory = listOfTenErrorMessages, - triggerId = trigger.id, triggerName = trigger.name, severity = trigger.severity - ) - ) - - val response = executeMonitor(monitor.id) - - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - for (actionResult in triggerResult.objectMap("action_results").values) { - assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) - } - } - val updatedAlert = searchAlerts(monitor).single() - assertEquals("Existing active alert was not updated", activeAlert.id, updatedAlert.id) - assertEquals("Wrong number of error messages in history", 10, updatedAlert.errorHistory.size) - } - - fun `test execute monitor creates alert for trigger with no actions`() { - putAlertMappings() // Required as we do not have a create alert API. - - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = emptyList(), destinationId = createDestination().id) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - - executeMonitor(monitor.id) - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ACTIVE) - } - - fun `test execute monitor non-dryrun`() { - val monitor = createMonitor( - randomQueryLevelMonitor( - triggers = listOf( - randomQueryLevelTrigger( - condition = ALWAYS_RUN, - actions = listOf(randomAction(destinationId = createDestination().id)) - ) - ) - ) - ) - - val response = executeMonitor(monitor.id, mapOf("dryrun" to "false")) - - assertEquals("failed dryrun", RestStatus.OK, response.restStatus()) - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ACTIVE) - } - - fun `test execute monitor with already active alert`() { - val monitor = createMonitor( - randomQueryLevelMonitor( - triggers = listOf( - randomQueryLevelTrigger( - condition = ALWAYS_RUN, - actions = listOf(randomAction(destinationId = createDestination().id)) - ) - ) - ) - ) - - val firstExecuteResponse = executeMonitor(monitor.id, mapOf("dryrun" to "false")) - - assertEquals("failed dryrun", RestStatus.OK, firstExecuteResponse.restStatus()) - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ACTIVE) - - val secondExecuteResponse = executeMonitor(monitor.id, mapOf("dryrun" to "false")) - - assertEquals("failed dryrun", RestStatus.OK, secondExecuteResponse.restStatus()) - val newAlerts = searchAlerts(monitor) - assertEquals("Second alert not saved", 1, newAlerts.size) - verifyAlert(newAlerts.single(), monitor, ACTIVE) - } - - fun `test delete monitor with no alerts after alert indices is initialized`() { - putAlertMappings() - - val newMonitor = createMonitor( - randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = NEVER_RUN, actions = listOf(randomAction()))) - ) - ) - val deleteNewMonitorResponse = client().makeRequest("DELETE", "$ALERTING_BASE_URI/${newMonitor.id}") - - assertEquals("Delete request not successful", RestStatus.OK, deleteNewMonitorResponse.restStatus()) - } - - fun `test update monitor stays on schedule`() { - val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) - - updateMonitor(monitor.copy(enabledTime = Instant.now())) - - val retrievedMonitor = getMonitor(monitorId = monitor.id) - assertEquals("Monitor enabled time changed.", monitor.enabledTime, retrievedMonitor.enabledTime) - } - - fun `test enabled time by disabling and re-enabling monitor`() { - val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) - assertNotNull("Enabled time is null on a enabled monitor.", getMonitor(monitor.id).enabledTime) - - val disabledMonitor = updateMonitor(randomQueryLevelMonitor(enabled = false).copy(id = monitor.id)) - assertNull("Enabled time is not null on a disabled monitor.", disabledMonitor.enabledTime) - - val enabledMonitor = updateMonitor(randomQueryLevelMonitor(enabled = true).copy(id = monitor.id)) - assertNotNull("Enabled time is null on a enabled monitor.", enabledMonitor.enabledTime) - } - - fun `test enabled time by providing enabled time`() { - val enabledTime = Instant.ofEpochSecond(1538164858L) // This is 2018-09-27 20:00:58 GMT - val monitor = createMonitor(randomQueryLevelMonitor(enabled = true, enabledTime = enabledTime)) - - val retrievedMonitor = getMonitor(monitorId = monitor.id) - assertTrue("Monitor is not enabled", retrievedMonitor.enabled) - assertEquals("Enabled times do not match", monitor.enabledTime, retrievedMonitor.enabledTime) - } - - fun `test monitor with throttled action for same alert`() { - val actionThrottleEnabled = randomAction( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - throttleEnabled = true, throttle = Throttle(value = 5, unit = MINUTES) - ) - val actionThrottleNotEnabled = randomAction( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - throttleEnabled = false, throttle = Throttle(value = 5, unit = MINUTES) - ) - val actions = listOf(actionThrottleEnabled, actionThrottleNotEnabled) - val monitor = createMonitor( - randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = actions)), - schedule = IntervalSchedule(interval = 1, unit = MINUTES) - ) - ) - val monitorRunResultNotThrottled = entityAsMap(executeMonitor(monitor.id)) - verifyActionThrottleResults( - monitorRunResultNotThrottled, - mutableMapOf( - Pair(actionThrottleEnabled.id, false), - Pair(actionThrottleNotEnabled.id, false) - ) - ) - - val notThrottledAlert = searchAlerts(monitor) - assertEquals("1 alert should be returned", 1, notThrottledAlert.size) - verifyAlert(notThrottledAlert.single(), monitor, ACTIVE) - val notThrottledActionResults = verifyActionExecutionResultInAlert( - notThrottledAlert[0], - mutableMapOf(Pair(actionThrottleEnabled.id, 0), Pair(actionThrottleNotEnabled.id, 0)) - ) - - assertEquals(notThrottledActionResults.size, 2) - val monitorRunResultThrottled = entityAsMap(executeMonitor(monitor.id)) - verifyActionThrottleResults( - monitorRunResultThrottled, - mutableMapOf( - Pair(actionThrottleEnabled.id, true), - Pair(actionThrottleNotEnabled.id, false) - ) - ) - - val throttledAlert = searchAlerts(monitor) - assertEquals("1 alert should be returned", 1, throttledAlert.size) - verifyAlert(throttledAlert.single(), monitor, ACTIVE) - val throttledActionResults = verifyActionExecutionResultInAlert( - throttledAlert[0], - mutableMapOf(Pair(actionThrottleEnabled.id, 1), Pair(actionThrottleNotEnabled.id, 0)) - ) - - assertEquals(notThrottledActionResults.size, 2) - - assertEquals( - notThrottledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime, - throttledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime - ) - } - - fun `test monitor with throttled action for different alerts`() { - val actionThrottleEnabled = randomAction( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - throttleEnabled = true, throttle = Throttle(value = 5, unit = MINUTES) - ) - val actions = listOf(actionThrottleEnabled) - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = actions) - val monitor = createMonitor( - randomQueryLevelMonitor( - triggers = listOf(trigger), - schedule = IntervalSchedule(interval = 1, unit = ChronoUnit.MINUTES) - ) - ) - val monitorRunResult1 = entityAsMap(executeMonitor(monitor.id)) - verifyActionThrottleResults(monitorRunResult1, mutableMapOf(Pair(actionThrottleEnabled.id, false))) - - val activeAlert1 = searchAlerts(monitor) - assertEquals("1 alert should be returned", 1, activeAlert1.size) - verifyAlert(activeAlert1.single(), monitor, ACTIVE) - val actionResults1 = verifyActionExecutionResultInAlert(activeAlert1[0], mutableMapOf(Pair(actionThrottleEnabled.id, 0))) - - Thread.sleep(200) - updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = NEVER_RUN)), id = monitor.id)) - executeMonitor(monitor.id) - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() - verifyAlert(completedAlert, monitor, COMPLETED) - - updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = ALWAYS_RUN)), id = monitor.id)) - val monitorRunResult2 = entityAsMap(executeMonitor(monitor.id)) - verifyActionThrottleResults(monitorRunResult2, mutableMapOf(Pair(actionThrottleEnabled.id, false))) - val activeAlert2 = searchAlerts(monitor) - assertEquals("1 alert should be returned", 1, activeAlert2.size) - assertNotEquals(activeAlert1[0].id, activeAlert2[0].id) - - val actionResults2 = verifyActionExecutionResultInAlert(activeAlert2[0], mutableMapOf(Pair(actionThrottleEnabled.id, 0))) - assertNotEquals( - actionResults1[actionThrottleEnabled.id]!!.lastExecutionTime, - actionResults2[actionThrottleEnabled.id]!!.lastExecutionTime - ) - } - - fun `test execute monitor with email destination creates alert in error state`() { - putAlertMappings() // Required as we do not have a create alert API. - - val emailAccount = createRandomEmailAccount() - val emailGroup = createRandomEmailGroup() - val email = Email( - emailAccountID = emailAccount.id, - recipients = listOf( - Recipient(type = Recipient.RecipientType.EMAIL, emailGroupID = null, email = "test@email.com"), - Recipient(type = Recipient.RecipientType.EMAIL_GROUP, emailGroupID = emailGroup.id, email = null) - ) - ) - - val destination = createDestination( - Destination( - type = DestinationType.EMAIL, - name = "testDesination", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = null, - email = email - ) - ) - val action = randomAction(destinationId = destination.id) - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - - executeMonitor(monitor.id) - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ERROR) - Assert.assertTrue(alerts.single().errorMessage?.contains("Failed running action") as Boolean) - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - fun `test execute monitor with custom webhook destination`() { - val customWebhook = CustomWebhook("http://15.16.17.18", null, null, 80, null, "PUT", emptyMap(), emptyMap(), null, null) - val destination = createDestination( - Destination( - type = DestinationType.CUSTOM_WEBHOOK, - name = "testDesination", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = customWebhook, - email = null - ) - ) - val action = randomAction(destinationId = destination.id) - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - executeMonitor(adminClient(), monitor.id) - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ERROR) - Assert.assertTrue(alerts.single().errorMessage?.contains("Connect timed out") as Boolean) - } - */ - - fun `test create ClusterMetricsInput monitor with ClusterHealth API`() { - // GIVEN - val path = "/_cluster/health" - val input = randomClusterMetricsInput(path = path) - val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input))) - - // WHEN - val response = executeMonitor(monitor.id) - - // THEN - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - val resultsContent = (inputResults?.get("results") as ArrayList<*>)[0] - val errorMessage = inputResults["error"] - - assertEquals(monitor.name, output["monitor_name"]) - assertTrue( - "Monitor results should contain cluster_name, but found: $resultsContent", - resultsContent.toString().contains("cluster_name") - ) - assertNull("There should not be an error message, but found: $errorMessage", errorMessage) - } - - fun `test create ClusterMetricsInput monitor with ClusterStats API`() { - // GIVEN - val path = "/_cluster/stats" - val input = randomClusterMetricsInput(path = path) - val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input))) - - // WHEN - val response = executeMonitor(monitor.id) - - // THEN - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - val resultsContent = (inputResults?.get("results") as ArrayList<*>)[0] - val errorMessage = inputResults["error"] - - assertEquals(monitor.name, output["monitor_name"]) - assertTrue( - "Monitor results should contain monitor_name, but found: $resultsContent", - resultsContent.toString().contains("memory_size_in_bytes") - ) - assertNull("There should not be an error message, but found: $errorMessage", errorMessage) - } - - fun `test create ClusterMetricsInput monitor with alert triggered`() { - // GIVEN - putAlertMappings() - val trigger = randomQueryLevelTrigger( - condition = Script( - """ - return ctx.results[0].number_of_pending_tasks >= 0 - """.trimIndent() - ), - destinationId = createDestination().id - ) - val path = "/_cluster/health" - val input = randomClusterMetricsInput(path = path) - val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input), triggers = listOf(trigger))) - - // WHEN - val response = executeMonitor(monitor.id) - - // THEN - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - - val triggerResults = output.objectMap("trigger_results").values - for (triggerResult in triggerResults) { - assertTrue( - "This triggerResult should be triggered: $triggerResult", - triggerResult.objectMap("action_results").isNotEmpty() - ) - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved, $output", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ACTIVE) - } - - fun `test create ClusterMetricsInput monitor with no alert triggered`() { - // GIVEN - putAlertMappings() - val trigger = randomQueryLevelTrigger( - condition = Script( - """ - return ctx.results[0].status.equals("red") - """.trimIndent() - ) - ) - val path = "/_cluster/stats" - val input = randomClusterMetricsInput(path = path) - val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input), triggers = listOf(trigger))) - - // WHEN - val response = executeMonitor(monitor.id) - - // THEN - val output = entityAsMap(response) - assertEquals(monitor.name, output["monitor_name"]) - - val triggerResults = output.objectMap("trigger_results").values - for (triggerResult in triggerResults) { - assertTrue( - "This triggerResult should not be triggered: $triggerResult", - triggerResult.objectMap("action_results").isEmpty() - ) - } - - val alerts = searchAlerts(monitor) - assertEquals("Alert saved for test monitor, output: $output", 0, alerts.size) - } - - fun `test create ClusterMetricsInput monitor for ClusterHealth API with path parameters`() { - // GIVEN - val indices = (1..5).map { createTestIndex() }.toTypedArray() - val pathParams = indices.joinToString(",") - val path = "/_cluster/health/" - val input = randomClusterMetricsInput( - path = path, - pathParams = pathParams - ) - val monitor = createMonitor(randomClusterMetricsMonitor(inputs = listOf(input))) - - // WHEN - val response = executeMonitor(monitor.id) - - // THEN - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - val resultsContent = (inputResults?.get("results") as ArrayList<*>)[0] - val errorMessage = inputResults["error"] - - assertEquals(monitor.name, output["monitor_name"]) - assertTrue( - "Monitor results should contain cluster_name, but found: $resultsContent", - resultsContent.toString().contains("cluster_name") - ) - assertNull("There should not be an error message, but found: $errorMessage", errorMessage) - } - - // TODO: Once an API is implemented that supports adding/removing entries on the - // SupportedApiSettings::supportedApiList, create an test that simulates executing - // a preexisting ClusterMetricsInput monitor for an API that has been removed from the supportedApiList. - // This will likely involve adding an API to the list before creating the monitor, and then removing - // the API from the list before executing the monitor. - - fun `test execute monitor with custom webhook destination and denied host`() { - - listOf("http://10.1.1.1", "127.0.0.1").forEach { - val customWebhook = CustomWebhook(it, null, null, 80, null, "PUT", emptyMap(), emptyMap(), null, null) - val destination = createDestination( - Destination( - type = DestinationType.CUSTOM_WEBHOOK, - name = "testDesination", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = customWebhook, - email = null - ) - ) - val action = randomAction(destinationId = destination.id) - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action)) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - executeMonitor(adminClient(), monitor.id) - - val alerts = searchAlerts(monitor) - assertEquals("Alert not saved", 1, alerts.size) - verifyAlert(alerts.single(), monitor, ERROR) - - Assert.assertNotNull(alerts.single().errorMessage) - } - } - - fun `test execute AD monitor returns search result without user`() { - // TODO: change to REST API call to test security enabled case - if (!securityEnabled()) { - val user = randomUser() - val detectorId = randomAlphaOfLength(5) - prepareTestAnomalyResult(detectorId, user) - // for old monitor before enable FGAC, the user field is empty - val monitor = randomADMonitor(inputs = listOf(adSearchInput(detectorId)), triggers = listOf(adMonitorTrigger()), user = null) - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - val output = entityAsMap(response) - @Suppress("UNCHECKED_CAST") - (output["trigger_results"] as HashMap).forEach { - _, v -> - assertTrue((v as HashMap)["triggered"] as Boolean) - } - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 5, total["value"]) - @Suppress("UNCHECKED_CAST") - val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map - assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) - } - } - - fun `test execute AD monitor returns search result with empty backend role`() { - // TODO: change to REST API call to test security enabled case - if (!securityEnabled()) { - val user = randomUser() - val detectorId = randomAlphaOfLength(5) - prepareTestAnomalyResult(detectorId, user) - // for old monitor before enable FGAC, the user field is empty - val monitor = randomADMonitor( - inputs = listOf(adSearchInput(detectorId)), triggers = listOf(adMonitorTrigger()), - user = User(user.name, listOf(), user.roles, user.customAttNames) - ) - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - val output = entityAsMap(response) - @Suppress("UNCHECKED_CAST") - (output["trigger_results"] as HashMap).forEach { - _, v -> - assertTrue((v as HashMap)["triggered"] as Boolean) - } - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 5, total["value"]) - @Suppress("UNCHECKED_CAST") - val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map - assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) - } - } - - fun `test execute AD monitor returns search result with same backend role`() { - // TODO: change to REST API call to test security enabled case - if (!securityEnabled()) { - val detectorId = randomAlphaOfLength(5) - val user = randomUser() - prepareTestAnomalyResult(detectorId, user) - // Test monitor with same user - val monitor = randomADMonitor(inputs = listOf(adSearchInput(detectorId)), triggers = listOf(adMonitorTrigger()), user = user) - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - val output = entityAsMap(response) - @Suppress("UNCHECKED_CAST") - (output["trigger_results"] as HashMap).forEach { - _, v -> - assertTrue((v as HashMap)["triggered"] as Boolean) - } - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 5, total["value"]) - @Suppress("UNCHECKED_CAST") - val maxAnomalyGrade = searchResult.stringMap("aggregations")?.get("max_anomaly_grade") as Map - assertEquals("Incorrect search result", 0.9, maxAnomalyGrade["value"]) - } - } - - fun `test execute AD monitor returns no search result with different backend role`() { - // TODO: change to REST API call to test security enabled case - if (!securityEnabled()) { - val detectorId = randomAlphaOfLength(5) - val user = randomUser() - prepareTestAnomalyResult(detectorId, user) - // Test monitor with different user - val monitor = randomADMonitor( - inputs = listOf(adSearchInput(detectorId)), - triggers = listOf(adMonitorTrigger()), user = randomUser() - ) - val response = executeMonitor(monitor, params = DRYRUN_MONITOR) - val output = entityAsMap(response) - @Suppress("UNCHECKED_CAST") - (output["trigger_results"] as HashMap).forEach { - _, v -> - assertTrue((v as HashMap)["triggered"] as Boolean) - } - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val total = searchResult.stringMap("hits")?.get("total") as Map - assertEquals("Incorrect search result", 5, total["value"]) - } - } - - fun `test execute bucket-level monitor returns search result`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) - val response = executeMonitor(monitor.id, params = DRYRUN_MONITOR) - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val buckets = searchResult.stringMap("aggregations")?.stringMap("composite_agg")?.get("buckets") as List> - assertEquals("Incorrect search result", 2, buckets.size) - } - - fun `test execute bucket-level monitor returns search result with multi term agg`() { - val index = "test_index_1234" - indexDoc( - index, - "1", - """{"user_id": "1", - "ip_addr": "12345678", - "user_agent": "chrome" - } - """.trimIndent() - ) - indexDoc( - index, - "2", - """{"user_id": "2", - "ip_addr": "12345678", - "user_agent": "chrome" - } - """.trimIndent() - ) - indexDoc( - index, - "3", - """{"user_id": "2", - "ip_addr": "3443534", - "user_agent": "chrome" - } - """.trimIndent() - ) - - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("_value" to "distinct_user_count", "docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "hot", - filter = null - ) - ) - - val m = randomBucketLevelMonitor( - triggers = listOf(trigger), - inputs = listOf( - SearchInput( - listOf(index), - SearchSourceBuilder().aggregation( - MultiTermsAggregationBuilder("hot") - .terms( - listOf( - MultiTermsValuesSourceConfig.Builder().setFieldName("ip_addr.keyword").build(), - MultiTermsValuesSourceConfig.Builder().setFieldName("user_agent.keyword").build() - ) - ) - .subAggregation(CardinalityAggregationBuilder("distinct_user_count").field("user_id.keyword")) - ) - ) - ) - ) - val monitor = createMonitor(m) - val response = executeMonitor(monitor.id, params = DRYRUN_MONITOR) - val output = entityAsMap(response) - - assertEquals(monitor.name, output["monitor_name"]) - @Suppress("UNCHECKED_CAST") - val searchResult = (output.objectMap("input_results")["results"] as List>).first() - @Suppress("UNCHECKED_CAST") - val buckets = searchResult.stringMap("aggregations")?.stringMap("hot")?.get("buckets") as List> - assertEquals("Incorrect search result", 2, buckets.size) - val distinctUserCountAgg1 = buckets.find { - it.get("key_as_string") == "12345678|chrome" - }!!.get("distinct_user_count") as Map - assertEquals(2, distinctUserCountAgg1.get("value")) - val distinctUserCountAgg2 = buckets.find { - it.get("key_as_string") == "3443534|chrome" - }!!.get("distinct_user_count") as Map - assertEquals(1, distinctUserCountAgg2.get("value")) - } - - fun `test bucket-level monitor alert creation and completion`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_1", // adding duplicate to verify aggregation - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) - executeMonitor(monitor.id) - - // Check created alerts - var alerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, alerts.size) - alerts.forEach { - // Given the random configuration of the Bucket-Level Trigger for the test, it's possible to get - // an action configuration that leads to no notifications (meaning the field for the Alert is null). - // Since testing action execution is not relevant to this test, verifyAlert is asked to ignore it. - verifyAlert(it, monitor, ACTIVE, expectNotification = false) - } - - // Delete documents of a particular value - deleteDataWithDocIds( - testIndex, - listOf( - "1", // test_value_1 - "2" // test_value_1 - ) - ) - - // Execute monitor again - executeMonitor(monitor.id) - - // Verify expected alert was completed - alerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) - val activeAlerts = alerts.filter { it.state == ACTIVE } - val completedAlerts = alerts.filter { it.state == COMPLETED } - assertEquals("Incorrect number of active alerts", 1, activeAlerts.size) - assertEquals("Incorrect number of completed alerts", 1, completedAlerts.size) - } - - fun `test bucket-level monitor with acknowledged alert`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy - // so that the assertions done later in this test don't fail. - // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). - val actions = randomActionsForBucketLevelTrigger(min = 1).map { - if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { - it.copy( - actionExecutionPolicy = ActionExecutionPolicy( - PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) - ) - ) - } else { - it - } - } - var trigger = randomBucketLevelTrigger(actions = actions) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) - executeMonitor(monitor.id) - - // Check created Alerts - var currentAlerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, currentAlerts.size) - currentAlerts.forEach { - verifyAlert(it, monitor, ACTIVE) - } - - // Acknowledge one of the Alerts - val alertToAcknowledge = currentAlerts.single { it.aggregationResultBucket?.getBucketKeysHash().equals("test_value_1") } - acknowledgeAlerts(monitor, alertToAcknowledge) - currentAlerts = searchAlerts(monitor) - val acknowledgedAlert = currentAlerts.single { it.state == ACKNOWLEDGED } - val activeAlert = currentAlerts.single { it.state == ACTIVE } - - // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to - // let lastNotificationTime change. W/o this sleep the test can result in a false negative. - Thread.sleep(200) - executeMonitor(monitor.id) - - // Check that the lastNotification time of the acknowledged Alert wasn't updated and the active Alert's was - currentAlerts = searchAlerts(monitor) - val acknowledgedAlert2 = currentAlerts.single { it.state == ACKNOWLEDGED } - val activeAlert2 = currentAlerts.single { it.state == ACTIVE } - assertEquals("Acknowledged alert was updated", acknowledgedAlert.lastNotificationTime, acknowledgedAlert2.lastNotificationTime) - assertTrue("Active alert was not updated", activeAlert2.lastNotificationTime!! > activeAlert.lastNotificationTime) - - // Remove data so that both Alerts are moved into completed - deleteDataWithDocIds( - testIndex, - listOf( - "1", // test_value_1 - "2" // test_value_2 - ) - ) - - // Execute Monitor and check that both Alerts were updated - Thread.sleep(200) - executeMonitor(monitor.id) - currentAlerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) - val completedAlerts = currentAlerts.filter { it.state == COMPLETED } - assertEquals("Incorrect number of completed alerts", 2, completedAlerts.size) - val previouslyAcknowledgedAlert = completedAlerts.single { it.aggregationResultBucket?.getBucketKeysHash().equals("test_value_1") } - val previouslyActiveAlert = completedAlerts.single { it.aggregationResultBucket?.getBucketKeysHash().equals("test_value_2") } - assertTrue( - "Previously acknowledged alert was not updated when it moved to completed", - previouslyAcknowledgedAlert.lastNotificationTime!! > acknowledgedAlert2.lastNotificationTime - ) - assertTrue( - "Previously active alert was not updated when it moved to completed", - previouslyActiveAlert.lastNotificationTime!! > activeAlert2.lastNotificationTime - ) - } - - fun `test bucket-level monitor with findings enabled on term agg`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val termAgg = TermsAggregationBuilder("test_field").field("test_field") - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(termAgg)) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy - // so that the assertions done later in this test don't fail. - // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). - val actions = randomActionsForBucketLevelTrigger(min = 1).map { - if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { - it.copy( - actionExecutionPolicy = ActionExecutionPolicy( - PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) - ) - ) - } else { - it - } - } - var trigger = randomBucketLevelTrigger(actions = actions) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "test_field", - filter = null - ) - ) - val monitor = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources(findingsEnabled = true) - ) - ) - executeMonitor(monitor.id) - - // Check created Alerts - var currentAlerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, currentAlerts.size) - currentAlerts.forEach { alert -> - Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) - } - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) - } - - fun `test bucket-level monitor with findings enabled on composite agg`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy - // so that the assertions done later in this test don't fail. - // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). - val actions = randomActionsForBucketLevelTrigger(min = 1).map { - if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { - it.copy( - actionExecutionPolicy = ActionExecutionPolicy( - PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) - ) - ) - } else { - it - } - } - var trigger = randomBucketLevelTrigger(actions = actions) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources(findingsEnabled = true) - ) - ) - executeMonitor(monitor.id) - - // Check created Alerts - var currentAlerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, currentAlerts.size) - currentAlerts.forEach { alert -> - Assert.assertEquals("expected findings for alert", alert.findingIds.size, 1) - } - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 1, findings.size) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("1")) - assertTrue("Findings saved for test monitor", findings[0].relatedDocIds.contains("2")) - } - - fun `test bucket-level monitor with findings enabled for multiple group by fields`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field"), - TermsValuesSourceBuilder("number").field("number") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - // For the Actions ensure that there is at least one and any PER_ALERT actions contain ACTIVE, DEDUPED and COMPLETED in its policy - // so that the assertions done later in this test don't fail. - // The config is being mutated this way to still maintain the randomness in configuration (like including other ActionExecutionScope). - val actions = randomActionsForBucketLevelTrigger(min = 1).map { - if (it.actionExecutionPolicy?.actionExecutionScope is PerAlertActionScope) { - it.copy( - actionExecutionPolicy = ActionExecutionPolicy( - PerAlertActionScope(setOf(AlertCategory.NEW, AlertCategory.DEDUPED, AlertCategory.COMPLETED)) - ) - ) - } else { - it - } - } - var trigger = randomBucketLevelTrigger(actions = actions) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor( - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources(findingsEnabled = true) - ) - ) - executeMonitor(monitor.id) - - // Check created Alerts - var currentAlerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, currentAlerts.size) - currentAlerts.forEach { alert -> - Assert.assertEquals("expected findings for alert", alert.findingIds.size, 0) - } - val findings = searchFindings(monitor) - assertEquals("Findings saved for test monitor", 0, findings.size) - } - - @Suppress("UNCHECKED_CAST") - fun `test bucket-level monitor with one good action and one bad action`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_1", - "test_value_3", - "test_value_2", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Trigger script should only create Alerts for 'test_value_1' and 'test_value_2' - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - val goodAction = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val syntaxErrorAction = randomAction( - name = "bad syntax", - template = randomTemplateScript("{{foo"), - destinationId = createDestination().id - ) - val actions = listOf(goodAction, syntaxErrorAction) - - var trigger = randomBucketLevelTrigger(actions = actions) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) - - val output = entityAsMap(executeMonitor(monitor.id)) - // The 'events' in this case are the bucketKeys hashes representing the Alert events - val expectedEvents = setOf("test_value_1", "test_value_2") - - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - for (alertEvent in triggerResult.objectMap("action_results")) { - assertTrue(expectedEvents.contains(alertEvent.key)) - val actionResults = alertEvent.value.values as Collection> - for (actionResult in actionResults) { - val actionOutput = actionResult["output"] as Map - if (actionResult["name"] == goodAction.name) { - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } else if (actionResult["name"] == syntaxErrorAction.name) { - assertTrue("Missing action error message", (actionResult["error"] as String).isNotEmpty()) - } else { - fail("Unknown action: ${actionResult["name"]}") - } - } - } - } - - // Check created alerts - val alerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, alerts.size) - alerts.forEach { - verifyAlert(it, monitor, ACTIVE) - } - } - - @Suppress("UNCHECKED_CAST") - fun `test bucket-level monitor with per execution action scope`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_1", - "test_value_3", - "test_value_2", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - // Trigger script should only create Alerts for 'test_value_1' and 'test_value_2' - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - val action = randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - actionExecutionPolicy = ActionExecutionPolicy(PerExecutionActionScope()) - ) - var trigger = randomBucketLevelTrigger(actions = listOf(action)) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) - - val output = entityAsMap(executeMonitor(monitor.id)) - // The 'events' in this case are the bucketKeys hashes representing the Alert events - val expectedEvents = setOf("test_value_1", "test_value_2") - - assertEquals(monitor.name, output["monitor_name"]) - for (triggerResult in output.objectMap("trigger_results").values) { - for (alertEvent in triggerResult.objectMap("action_results")) { - assertTrue(expectedEvents.contains(alertEvent.key)) - val actionResults = alertEvent.value.values as Collection> - for (actionResult in actionResults) { - val actionOutput = actionResult["output"] as Map - assertEquals("Unknown action: ${actionResult["name"]}", action.name, actionResult["name"]) - assertEquals("Hello ${monitor.name}", actionOutput["message"]) - } - } - } - - // Check created alerts - val alerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, alerts.size) - alerts.forEach { - verifyAlert(it, monitor, ACTIVE) - } - } - - fun `test bucket-level monitor with per alert action scope saves completed alerts even if not actionable`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_1", - "test_value_2", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - val triggerScript = """ - params.docCount > 1 - """.trimIndent() - - val action = randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - actionExecutionPolicy = ActionExecutionPolicy(PerAlertActionScope(setOf(AlertCategory.DEDUPED, AlertCategory.NEW))) - ) - var trigger = randomBucketLevelTrigger(actions = listOf(action)) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) - executeMonitor(monitor.id) - - // Check created Alerts - var currentAlerts = searchAlerts(monitor) - assertEquals("Alerts not saved", 2, currentAlerts.size) - currentAlerts.forEach { - verifyAlert(it, monitor, ACTIVE) - } - - // Remove data so that both Alerts are moved into completed - deleteDataWithDocIds( - testIndex, - listOf( - "1", // test_value_1 - "2", // test_value_1 - "3", // test_value_2 - "4" // test_value_2 - ) - ) - - // Execute Monitor and check that both Alerts were moved to COMPLETED - executeMonitor(monitor.id) - currentAlerts = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN) - val completedAlerts = currentAlerts.filter { it.state == COMPLETED } - assertEquals("Incorrect number of completed alerts", 2, completedAlerts.size) - } - - @Suppress("UNCHECKED_CAST") - fun `test bucket-level monitor throttling with per alert action scope`() { - val testIndex = createTestIndex() - insertSampleTimeSerializedData( - testIndex, - listOf( - "test_value_1", - "test_value_2" - ) - ) - - val query = QueryBuilders.rangeQuery("test_strict_date_time") - .gt("{{period_end}}||-10d") - .lte("{{period_end}}") - .format("epoch_millis") - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput(indices = listOf(testIndex), query = SearchSourceBuilder().size(0).query(query).aggregation(compositeAgg)) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - val actionThrottleEnabled = randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - throttleEnabled = true, - throttle = Throttle(value = 5, unit = MINUTES), - actionExecutionPolicy = ActionExecutionPolicy( - actionExecutionScope = PerAlertActionScope(setOf(AlertCategory.DEDUPED, AlertCategory.NEW)) - ) - ) - val actionThrottleNotEnabled = randomActionWithPolicy( - template = randomTemplateScript("Hello {{ctx.monitor.name}}"), - destinationId = createDestination().id, - throttleEnabled = false, - throttle = Throttle(value = 5, unit = MINUTES), - actionExecutionPolicy = ActionExecutionPolicy( - actionExecutionScope = PerAlertActionScope(setOf(AlertCategory.DEDUPED, AlertCategory.NEW)) - ) - ) - val actions = listOf(actionThrottleEnabled, actionThrottleNotEnabled) - var trigger = randomBucketLevelTrigger(actions = actions) - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ) - ) - val monitor = createMonitor(randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger))) - - val monitorRunResultNotThrottled = entityAsMap(executeMonitor(monitor.id)) - verifyActionThrottleResultsForBucketLevelMonitor( - monitorRunResult = monitorRunResultNotThrottled, - expectedEvents = setOf("test_value_1", "test_value_2"), - expectedActionResults = mapOf( - Pair(actionThrottleEnabled.id, false), - Pair(actionThrottleNotEnabled.id, false) - ) - ) - - val notThrottledAlerts = searchAlerts(monitor) - assertEquals("Alerts may not have been saved correctly", 2, notThrottledAlerts.size) - val previousAlertExecutionTime: MutableMap> = mutableMapOf() - notThrottledAlerts.forEach { - verifyAlert(it, monitor, ACTIVE) - val notThrottledActionResults = verifyActionExecutionResultInAlert( - it, - mutableMapOf(Pair(actionThrottleEnabled.id, 0), Pair(actionThrottleNotEnabled.id, 0)) - ) - assertEquals(notThrottledActionResults.size, 2) - // Save the lastExecutionTimes of the actions for the Alert to be compared later against - // the next Monitor execution run - previousAlertExecutionTime[it.id] = mutableMapOf() - previousAlertExecutionTime[it.id]!![actionThrottleEnabled.id] = - notThrottledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime - previousAlertExecutionTime[it.id]!![actionThrottleNotEnabled.id] = - notThrottledActionResults[actionThrottleNotEnabled.id]!!.lastExecutionTime - } - - // Runner uses ThreadPool.CachedTimeThread thread which only updates once every 200 ms. Wait a bit to - // let Action executionTime change. W/o this sleep the test can result in a false negative. - Thread.sleep(200) - val monitorRunResultThrottled = entityAsMap(executeMonitor(monitor.id)) - verifyActionThrottleResultsForBucketLevelMonitor( - monitorRunResult = monitorRunResultThrottled, - expectedEvents = setOf("test_value_1", "test_value_2"), - expectedActionResults = mapOf( - Pair(actionThrottleEnabled.id, true), - Pair(actionThrottleNotEnabled.id, false) - ) - ) - - val throttledAlerts = searchAlerts(monitor) - assertEquals("Alerts may not have been saved correctly", 2, throttledAlerts.size) - throttledAlerts.forEach { - verifyAlert(it, monitor, ACTIVE) - val throttledActionResults = verifyActionExecutionResultInAlert( - it, - mutableMapOf(Pair(actionThrottleEnabled.id, 1), Pair(actionThrottleNotEnabled.id, 0)) - ) - assertEquals(throttledActionResults.size, 2) - - val prevthrottledActionLastExecutionTime = previousAlertExecutionTime[it.id]!![actionThrottleEnabled.id] - val prevNotThrottledActionLastExecutionTime = previousAlertExecutionTime[it.id]!![actionThrottleNotEnabled.id] - assertEquals( - "Last execution time of a throttled action was updated for one of the Alerts", - prevthrottledActionLastExecutionTime, - throttledActionResults[actionThrottleEnabled.id]!!.lastExecutionTime - ) - assertTrue( - "Last execution time of a non-throttled action was not updated for one of the Alerts", - throttledActionResults[actionThrottleNotEnabled.id]!!.lastExecutionTime!! > prevNotThrottledActionLastExecutionTime - ) - } - } - - private fun prepareTestAnomalyResult(detectorId: String, user: User) { - val adResultIndex = ".opendistro-anomaly-results-history-2020.10.17" - try { - createTestIndex(adResultIndex, anomalyResultIndexMapping()) - } catch (e: Exception) { - // WarningFailureException is expected as we are creating system index start with dot - assertTrue(e is WarningFailureException) - } - - val twoMinsAgo = ZonedDateTime.now().minus(2, MINUTES).truncatedTo(MILLIS) - val testTime = twoMinsAgo.toEpochSecond() * 1000 - val testResult1 = randomAnomalyResult( - detectorId = detectorId, executionEndTime = testTime, user = user, - anomalyGrade = 0.1 - ) - indexDoc(adResultIndex, "1", testResult1) - val testResult2 = randomAnomalyResult( - detectorId = detectorId, executionEndTime = testTime, user = user, - anomalyGrade = 0.8 - ) - indexDoc(adResultIndex, "2", testResult2) - val testResult3 = randomAnomalyResult( - detectorId = detectorId, executionEndTime = testTime, user = user, - anomalyGrade = 0.5 - ) - indexDoc(adResultIndex, "3", testResult3) - val testResult4 = randomAnomalyResult( - detectorId = detectorId, executionEndTime = testTime, - user = User(user.name, listOf(), user.roles, user.customAttNames), - anomalyGrade = 0.9 - ) - indexDoc(adResultIndex, "4", testResult4) - // User is null - val testResult5 = randomAnomalyResultWithoutUser( - detectorId = detectorId, executionEndTime = testTime, - anomalyGrade = 0.75 - ) - indexDoc(adResultIndex, "5", testResult5) - } - - private fun verifyActionExecutionResultInAlert(alert: Alert, expectedResult: Map): - MutableMap { - val actionResult = mutableMapOf() - for (result in alert.actionExecutionResults) { - val expected = expectedResult[result.actionId] - assertEquals(expected, result.throttledCount) - actionResult.put(result.actionId, result) - } - return actionResult - } - - private fun verifyActionThrottleResults(output: MutableMap, expectedResult: Map) { - for (triggerResult in output.objectMap("trigger_results").values) { - for (actionResult in triggerResult.objectMap("action_results").values) { - val expected = expectedResult[actionResult["id"]] - assertEquals(expected, actionResult["throttled"]) - } - } - } - - @Suppress("UNCHECKED_CAST") - private fun verifyActionThrottleResultsForBucketLevelMonitor( - monitorRunResult: MutableMap, - expectedEvents: Set, - expectedActionResults: Map - ) { - for (triggerResult in monitorRunResult.objectMap("trigger_results").values) { - for (alertEvent in triggerResult.objectMap("action_results")) { - assertTrue(expectedEvents.contains(alertEvent.key)) - val actionResults = alertEvent.value.values as Collection> - for (actionResult in actionResults) { - val expected = expectedActionResults[actionResult["id"]] - assertEquals(expected, actionResult["throttled"]) - } - } - } - } - - private fun verifyAlert( - alert: Alert, - monitor: Monitor, - expectedState: State = ACTIVE, - expectNotification: Boolean = true - ) { - assertNotNull(alert.id) - assertNotNull(alert.startTime) - if (expectNotification) { - assertNotNull(alert.lastNotificationTime) - } - assertEquals("Alert in wrong state", expectedState, alert.state) - if (expectedState == ERROR) { - assertNotNull("Missing error message", alert.errorMessage) - } else { - assertNull("Unexpected error message", alert.errorMessage) - } - if (expectedState == COMPLETED) { - assertNotNull("End time missing for completed alert.", alert.endTime) - } else { - assertNull("End time set for active alert", alert.endTime) - } - assertEquals(monitor.id, alert.monitorId) - assertEquals(monitor.name, alert.monitorName) - assertEquals(monitor.version, alert.monitorVersion) - - // assert trigger exists for alert - val trigger = monitor.triggers.single { it.id == alert.triggerId } - assertEquals(trigger.name, alert.triggerName) - } - - @Suppress("UNCHECKED_CAST") - /** helper that returns a field in a json map whose values are all json objects */ - private fun Map.objectMap(key: String): Map> { - return this[key] as Map> - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/MonitorTests.kt b/alerting/bin/test/org/opensearch/alerting/MonitorTests.kt deleted file mode 100644 index f6ed78541..000000000 --- a/alerting/bin/test/org/opensearch/alerting/MonitorTests.kt +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.opensearch.commons.alerting.model.Trigger -import org.opensearch.test.OpenSearchTestCase -import java.lang.IllegalArgumentException -import java.time.Instant - -class MonitorTests : OpenSearchTestCase() { - - fun `test enabled time`() { - val monitor = randomQueryLevelMonitor() - val enabledMonitor = monitor.copy(enabled = true, enabledTime = Instant.now()) - try { - enabledMonitor.copy(enabled = false) - fail("Disabling monitor with enabled time set should fail.") - } catch (e: IllegalArgumentException) { - } - - val disabledMonitor = monitor.copy(enabled = false, enabledTime = null) - - try { - disabledMonitor.copy(enabled = true) - fail("Enabling monitor without enabled time should fail") - } catch (e: IllegalArgumentException) { - } - } - - fun `test max triggers`() { - val monitor = randomQueryLevelMonitor() - - val tooManyTriggers = mutableListOf() - for (i in 0..10) { - tooManyTriggers.add(randomQueryLevelTrigger()) - } - - try { - monitor.copy(triggers = tooManyTriggers) - fail("Monitor with too many triggers should be rejected.") - } catch (e: IllegalArgumentException) { - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt b/alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt deleted file mode 100644 index 5cfcf430f..000000000 --- a/alerting/bin/test/org/opensearch/alerting/ODFERestTestCase.kt +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.apache.http.HttpHost -import org.junit.After -import org.opensearch.client.Request -import org.opensearch.client.RequestOptions -import org.opensearch.client.RestClient -import org.opensearch.client.WarningsHandler -import org.opensearch.common.io.PathUtils -import org.opensearch.common.settings.Settings -import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_ENABLED -import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_FILEPATH -import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_KEYPASSWORD -import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_PASSWORD -import org.opensearch.commons.ConfigConstants.OPENSEARCH_SECURITY_SSL_HTTP_PEMCERT_FILEPATH -import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.core.xcontent.DeprecationHandler -import org.opensearch.core.xcontent.MediaType -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.test.rest.OpenSearchRestTestCase -import java.io.IOException - -/** - * Must support below 3 scenario runs: - * 1. Without Security plugin - * 2. With Security plugin and https - * 3. With Security plugin and http - * Its possible to have security enabled with http transport. - * client() - > admin user - * adminClient() -> adminDN/super-admin user - */ - -abstract class ODFERestTestCase : OpenSearchRestTestCase() { - - fun isHttps(): Boolean { - return System.getProperty("https", "false")!!.toBoolean() - } - - fun securityEnabled(): Boolean { - return System.getProperty("security", "false")!!.toBoolean() - } - - @Suppress("UNCHECKED_CAST") - fun isNotificationPluginInstalled(): Boolean { - val response = entityAsMap(client().makeRequest("GET", "_nodes/plugins")) - val nodesInfo = response["nodes"] as Map> - for (nodeInfo in nodesInfo.values) { - val plugins = nodeInfo["plugins"] as List> - for (plugin in plugins) { - if (plugin["name"] == "opensearch-notifications") { - return true - } - } - } - return false - } - - override fun getProtocol(): String { - return if (isHttps()) { - "https" - } else { - "http" - } - } - - override fun preserveIndicesUponCompletion(): Boolean { - return true - } - - open fun preserveODFEIndicesAfterTest(): Boolean = false - - @Throws(IOException::class) - @After - open fun wipeAllODFEIndices() { - if (preserveODFEIndicesAfterTest()) return - - val response = client().performRequest(Request("GET", "/_cat/indices?format=json&expand_wildcards=all")) - - val xContentType = MediaType.fromMediaType(response.entity.contentType.value) - xContentType.xContent().createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - response.entity.content - ).use { parser -> - for (index in parser.list()) { - val jsonObject: Map<*, *> = index as java.util.HashMap<*, *> - val indexName: String = jsonObject["index"] as String - // .opendistro_security isn't allowed to delete from cluster - if (".opendistro_security" != indexName) { - var request = Request("DELETE", "/$indexName") - // TODO: remove PERMISSIVE option after moving system index access to REST API call - val options = RequestOptions.DEFAULT.toBuilder() - options.setWarningsHandler(WarningsHandler.PERMISSIVE) - request.options = options.build() - adminClient().performRequest(request) - } - } - } - } - - /** - * Returns the REST client settings used for super-admin actions like cleaning up after the test has completed. - */ - override fun restAdminSettings(): Settings { - return Settings - .builder() - .put("http.port", 9200) - .put(OPENSEARCH_SECURITY_SSL_HTTP_ENABLED, isHttps()) - .put(OPENSEARCH_SECURITY_SSL_HTTP_PEMCERT_FILEPATH, "sample.pem") - .put(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_FILEPATH, "test-kirk.jks") - .put(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_PASSWORD, "changeit") - .put(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_KEYPASSWORD, "changeit") - .build() - } - - @Throws(IOException::class) - override fun buildClient(settings: Settings, hosts: Array): RestClient { - if (securityEnabled()) { - val keystore = settings.get(OPENSEARCH_SECURITY_SSL_HTTP_KEYSTORE_FILEPATH) - return when (keystore != null) { - true -> { - // create adminDN (super-admin) client - val uri = javaClass.classLoader.getResource("sample.pem").toURI() - val configPath = PathUtils.get(uri).parent.toAbsolutePath() - SecureRestClientBuilder(settings, configPath).setSocketTimeout(60000).build() - } - false -> { - // create client with passed user - val userName = System.getProperty("user") - val password = System.getProperty("password") - SecureRestClientBuilder(hosts, isHttps(), userName, password).setSocketTimeout(60000).build() - } - } - } else { - val builder = RestClient.builder(*hosts) - configureClient(builder, settings) - builder.setStrictDeprecationMode(true) - return builder.build() - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/TestHelpers.kt b/alerting/bin/test/org/opensearch/alerting/TestHelpers.kt deleted file mode 100644 index 47d9f522f..000000000 --- a/alerting/bin/test/org/opensearch/alerting/TestHelpers.kt +++ /dev/null @@ -1,797 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import junit.framework.TestCase.assertNull -import org.apache.http.Header -import org.apache.http.HttpEntity -import org.opensearch.alerting.model.ActionRunResult -import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.DocumentLevelTriggerRunResult -import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.model.QueryLevelTriggerRunResult -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.EmailEntry -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.util.getBucketKeysHash -import org.opensearch.client.Request -import org.opensearch.client.RequestOptions -import org.opensearch.client.Response -import org.opensearch.client.RestClient -import org.opensearch.client.WarningsHandler -import org.opensearch.common.UUIDs -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter -import org.opensearch.commons.alerting.model.ActionExecutionResult -import org.opensearch.commons.alerting.model.AggregationResultBucket -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.BucketLevelTrigger -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.ChainedMonitorFindings -import org.opensearch.commons.alerting.model.ClusterMetricsInput -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.Delegate -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.commons.alerting.model.Input -import org.opensearch.commons.alerting.model.IntervalSchedule -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import org.opensearch.commons.alerting.model.Schedule -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.Sequence -import org.opensearch.commons.alerting.model.Trigger -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.commons.alerting.model.Workflow.WorkflowType -import org.opensearch.commons.alerting.model.action.Action -import org.opensearch.commons.alerting.model.action.ActionExecutionPolicy -import org.opensearch.commons.alerting.model.action.ActionExecutionScope -import org.opensearch.commons.alerting.model.action.AlertCategory -import org.opensearch.commons.alerting.model.action.PerAlertActionScope -import org.opensearch.commons.alerting.model.action.PerExecutionActionScope -import org.opensearch.commons.alerting.model.action.Throttle -import org.opensearch.commons.alerting.util.string -import org.opensearch.commons.authuser.User -import org.opensearch.core.common.settings.SecureString -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.script.ScriptType -import org.opensearch.search.SearchModule -import org.opensearch.search.aggregations.bucket.terms.IncludeExclude -import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase.randomBoolean -import org.opensearch.test.OpenSearchTestCase.randomInt -import org.opensearch.test.OpenSearchTestCase.randomIntBetween -import org.opensearch.test.rest.OpenSearchRestTestCase -import java.time.Instant -import java.time.temporal.ChronoUnit - -fun randomQueryLevelMonitor( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User = randomUser(), - inputs: List = listOf(SearchInput(emptyList(), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() - ) -} - -// Monitor of older versions without security. -fun randomQueryLevelMonitorWithoutUser( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - inputs: List = listOf(SearchInput(emptyList(), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.QUERY_LEVEL_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = null, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() - ) -} - -fun randomBucketLevelMonitor( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User = randomUser(), - inputs: List = listOf( - SearchInput( - emptyList(), - SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) - ) - ), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - triggers: List = (1..randomInt(10)).map { randomBucketLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.BUCKET_LEVEL_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() - ) -} - -fun randomBucketLevelMonitor( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User = randomUser(), - inputs: List = listOf( - SearchInput( - emptyList(), - SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) - ) - ), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - triggers: List = (1..randomInt(10)).map { randomBucketLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false, - dataSources: DataSources -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.BUCKET_LEVEL_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), - dataSources = dataSources - ) -} - -fun randomClusterMetricsMonitor( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User = randomUser(), - inputs: List = listOf(randomClusterMetricsInput()), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.CLUSTER_METRICS_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf() - ) -} - -fun randomDocumentLevelMonitor( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User? = randomUser(), - inputs: List = listOf(DocLevelMonitorInput("description", listOf("index"), emptyList())), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false, - owner: String? = null -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), owner = owner - ) -} - -fun randomDocumentLevelMonitor( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User? = randomUser(), - inputs: List = listOf(DocLevelMonitorInput("description", listOf("index"), emptyList())), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - triggers: List = (1..randomInt(10)).map { randomQueryLevelTrigger() }, - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - withMetadata: Boolean = false, - dataSources: DataSources, - owner: String? = null -): Monitor { - return Monitor( - name = name, monitorType = Monitor.MonitorType.DOC_LEVEL_MONITOR, enabled = enabled, inputs = inputs, - schedule = schedule, triggers = triggers, enabledTime = enabledTime, lastUpdateTime = lastUpdateTime, user = user, - uiMetadata = if (withMetadata) mapOf("foo" to "bar") else mapOf(), dataSources = dataSources, owner = owner - ) -} - -fun randomWorkflow( - id: String = Workflow.NO_ID, - monitorIds: List, - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User? = randomUser(), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - triggers: List = emptyList(), - auditDelegateMonitorAlerts: Boolean? = true -): Workflow { - val delegates = mutableListOf() - if (!monitorIds.isNullOrEmpty()) { - delegates.add(Delegate(1, monitorIds[0])) - for (i in 1 until monitorIds.size) { - // Order of monitors in workflow will be the same like forwarded meaning that the first monitorId will be used as second monitor chained finding - delegates.add(Delegate(i + 1, monitorIds [i], ChainedMonitorFindings(monitorIds[i - 1]))) - } - } - - return Workflow( - id = id, - name = name, - enabled = enabled, - schedule = schedule, - lastUpdateTime = lastUpdateTime, - enabledTime = enabledTime, - workflowType = WorkflowType.COMPOSITE, - user = user, - inputs = listOf(CompositeInput(Sequence(delegates))), - version = -1L, - schemaVersion = 0, - triggers = triggers, - auditDelegateMonitorAlerts = auditDelegateMonitorAlerts - ) -} - -fun randomWorkflowWithDelegates( - id: String = Workflow.NO_ID, - delegates: List, - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - user: User? = randomUser(), - schedule: Schedule = IntervalSchedule(interval = 5, unit = ChronoUnit.MINUTES), - enabled: Boolean = randomBoolean(), - enabledTime: Instant? = if (enabled) Instant.now().truncatedTo(ChronoUnit.MILLIS) else null, - lastUpdateTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - triggers: List = emptyList() -): Workflow { - return Workflow( - id = id, - name = name, - enabled = enabled, - schedule = schedule, - lastUpdateTime = lastUpdateTime, - enabledTime = enabledTime, - workflowType = WorkflowType.COMPOSITE, - user = user, - inputs = listOf(CompositeInput(Sequence(delegates))), - version = -1L, - schemaVersion = 0, - triggers = triggers - ) -} - -fun randomQueryLevelTrigger( - id: String = UUIDs.base64UUID(), - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - severity: String = "1", - condition: Script = randomScript(), - actions: List = mutableListOf(), - destinationId: String = "" -): QueryLevelTrigger { - return QueryLevelTrigger( - id = id, - name = name, - severity = severity, - condition = condition, - actions = if (actions.isEmpty()) (0..randomInt(10)).map { randomAction(destinationId = destinationId) } else actions - ) -} - -fun randomBucketLevelTrigger( - id: String = UUIDs.base64UUID(), - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - severity: String = "1", - bucketSelector: BucketSelectorExtAggregationBuilder = randomBucketSelectorExtAggregationBuilder(name = id), - actions: List = mutableListOf(), - destinationId: String = "" -): BucketLevelTrigger { - return BucketLevelTrigger( - id = id, - name = name, - severity = severity, - bucketSelector = bucketSelector, - actions = if (actions.isEmpty()) randomActionsForBucketLevelTrigger(destinationId = destinationId) else actions - ) -} - -fun randomActionsForBucketLevelTrigger(min: Int = 0, max: Int = 10, destinationId: String = ""): List = - (min..randomInt(max)).map { randomActionWithPolicy(destinationId = destinationId) } - -fun randomDocumentLevelTrigger( - id: String = UUIDs.base64UUID(), - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - severity: String = "1", - condition: Script = randomScript(), - actions: List = mutableListOf(), - destinationId: String = "" -): DocumentLevelTrigger { - return DocumentLevelTrigger( - id = id, - name = name, - severity = severity, - condition = condition, - actions = if (actions.isEmpty() && destinationId.isNotBlank()) { - (0..randomInt(10)).map { randomAction(destinationId = destinationId) } - } else actions - ) -} - -fun randomBucketSelectorExtAggregationBuilder( - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - bucketsPathsMap: MutableMap = mutableMapOf("avg" to "10"), - script: Script = randomBucketSelectorScript(params = bucketsPathsMap), - parentBucketPath: String = "testPath", - filter: BucketSelectorExtFilter = BucketSelectorExtFilter(IncludeExclude("foo*", "bar*")) -): BucketSelectorExtAggregationBuilder { - return BucketSelectorExtAggregationBuilder(name, bucketsPathsMap, script, parentBucketPath, filter) -} - -fun randomBucketSelectorScript( - idOrCode: String = "params.avg >= 0", - params: Map = mutableMapOf("avg" to "10") -): Script { - return Script(Script.DEFAULT_SCRIPT_TYPE, Script.DEFAULT_SCRIPT_LANG, idOrCode, emptyMap(), params) -} - -fun randomEmailAccount( - salt: String = "", - name: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(10), - email: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(5) + "@email.com", - host: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(10), - port: Int = randomIntBetween(1, 100), - method: EmailAccount.MethodType = randomEmailAccountMethod(), - username: SecureString? = null, - password: SecureString? = null -): EmailAccount { - return EmailAccount( - name = name, - email = email, - host = host, - port = port, - method = method, - username = username, - password = password - ) -} - -fun randomEmailGroup( - salt: String = "", - name: String = salt + OpenSearchRestTestCase.randomAlphaOfLength(10), - emails: List = (1..randomInt(10)).map { - EmailEntry(email = salt + OpenSearchRestTestCase.randomAlphaOfLength(5) + "@email.com") - } -): EmailGroup { - return EmailGroup(name = name, emails = emails) -} - -fun randomScript(source: String = "return " + OpenSearchRestTestCase.randomBoolean().toString()): Script = Script(source) - -val ADMIN = "admin" -val ALERTING_BASE_URI = "/_plugins/_alerting/monitors" -val WORKFLOW_ALERTING_BASE_URI = "/_plugins/_alerting/workflows" -val DESTINATION_BASE_URI = "/_plugins/_alerting/destinations" -val LEGACY_OPENDISTRO_ALERTING_BASE_URI = "/_opendistro/_alerting/monitors" -val LEGACY_OPENDISTRO_DESTINATION_BASE_URI = "/_opendistro/_alerting/destinations" -val ALWAYS_RUN = Script("return true") -val NEVER_RUN = Script("return false") -val DRYRUN_MONITOR = mapOf("dryrun" to "true") -val TEST_HR_INDEX = "hr_data" -val TEST_NON_HR_INDEX = "not_hr_data" -val TEST_HR_ROLE = "hr_role" -val TEST_HR_BACKEND_ROLE = "HR" - -// Using a triple-quote string for the query so escaped quotes are kept as-is -// in the request made using triple-quote strings (i.e. createIndexRoleWithDocLevelSecurity). -// Removing the escape slash in the request causes the security API role request to fail with parsing exception. -val TERM_DLS_QUERY = """{\"term\": { \"accessible\": true}}""" - -fun randomTemplateScript( - source: String, - params: Map = emptyMap() -): Script = Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, source, params) - -fun randomAction( - name: String = OpenSearchRestTestCase.randomUnicodeOfLength(10), - template: Script = randomTemplateScript("Hello World"), - destinationId: String = "", - throttleEnabled: Boolean = false, - throttle: Throttle = randomThrottle() -) = Action(name, destinationId, template, template, throttleEnabled, throttle, actionExecutionPolicy = null) - -fun randomActionWithPolicy( - name: String = OpenSearchRestTestCase.randomUnicodeOfLength(10), - template: Script = randomTemplateScript("Hello World"), - destinationId: String = "", - throttleEnabled: Boolean = false, - throttle: Throttle = randomThrottle(), - actionExecutionPolicy: ActionExecutionPolicy? = randomActionExecutionPolicy() -): Action { - return if (actionExecutionPolicy?.actionExecutionScope is PerExecutionActionScope) { - // Return null for throttle when using PerExecutionActionScope since throttling is currently not supported for it - Action(name, destinationId, template, template, throttleEnabled, null, actionExecutionPolicy = actionExecutionPolicy) - } else { - Action(name, destinationId, template, template, throttleEnabled, throttle, actionExecutionPolicy = actionExecutionPolicy) - } -} - -fun randomThrottle( - value: Int = randomIntBetween(60, 120), - unit: ChronoUnit = ChronoUnit.MINUTES -) = Throttle(value, unit) - -fun randomActionExecutionPolicy( - actionExecutionScope: ActionExecutionScope = randomActionExecutionScope() -) = ActionExecutionPolicy(actionExecutionScope) - -fun randomActionExecutionScope(): ActionExecutionScope { - return if (randomBoolean()) { - val alertCategories = AlertCategory.values() - PerAlertActionScope(actionableAlerts = (1..randomInt(alertCategories.size)).map { alertCategories[it - 1] }.toSet()) - } else { - PerExecutionActionScope() - } -} - -fun randomAlert(monitor: Monitor = randomQueryLevelMonitor()): Alert { - val trigger = randomQueryLevelTrigger() - val actionExecutionResults = mutableListOf(randomActionExecutionResult(), randomActionExecutionResult()) - return Alert( - monitor, - trigger, - Instant.now().truncatedTo(ChronoUnit.MILLIS), - null, - actionExecutionResults = actionExecutionResults - ) -} - -fun randomDocLevelQuery( - id: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - query: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - name: String = "${randomInt(5)}", - tags: List = mutableListOf(0..randomInt(10)).map { OpenSearchRestTestCase.randomAlphaOfLength(10) } -): DocLevelQuery { - return DocLevelQuery(id = id, query = query, name = name, tags = tags, fields = listOf()) -} - -fun randomDocLevelMonitorInput( - description: String = OpenSearchRestTestCase.randomAlphaOfLength(randomInt(10)), - indices: List = listOf(1..randomInt(10)).map { OpenSearchRestTestCase.randomAlphaOfLength(10) }, - queries: List = listOf(1..randomInt(10)).map { randomDocLevelQuery() } -): DocLevelMonitorInput { - return DocLevelMonitorInput(description = description, indices = indices, queries = queries) -} - -fun randomFinding( - id: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - relatedDocIds: List = listOf(OpenSearchRestTestCase.randomAlphaOfLength(10)), - monitorId: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - monitorName: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - index: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - docLevelQueries: List = listOf(randomDocLevelQuery()), - timestamp: Instant = Instant.now() -): Finding { - return Finding( - id = id, - relatedDocIds = relatedDocIds, - monitorId = monitorId, - monitorName = monitorName, - index = index, - docLevelQueries = docLevelQueries, - timestamp = timestamp - ) -} - -fun randomAlertWithAggregationResultBucket(monitor: Monitor = randomBucketLevelMonitor()): Alert { - val trigger = randomBucketLevelTrigger() - val actionExecutionResults = mutableListOf(randomActionExecutionResult(), randomActionExecutionResult()) - return Alert( - monitor, - trigger, - Instant.now().truncatedTo(ChronoUnit.MILLIS), - null, - actionExecutionResults = actionExecutionResults, - aggregationResultBucket = AggregationResultBucket( - "parent_bucket_path_1", - listOf("bucket_key_1"), - mapOf("k1" to "val1", "k2" to "val2") - ) - ) -} - -fun randomEmailAccountMethod(): EmailAccount.MethodType { - val methodValues = EmailAccount.MethodType.values().map { it.value } - val randomValue = methodValues[randomInt(methodValues.size - 1)] - return EmailAccount.MethodType.getByValue(randomValue)!! -} - -fun randomActionExecutionResult( - actionId: String = UUIDs.base64UUID(), - lastExecutionTime: Instant = Instant.now().truncatedTo(ChronoUnit.MILLIS), - throttledCount: Int = randomInt() -) = ActionExecutionResult(actionId, lastExecutionTime, throttledCount) - -fun randomQueryLevelMonitorRunResult(): MonitorRunResult { - val triggerResults = mutableMapOf() - val triggerRunResult = randomQueryLevelTriggerRunResult() - triggerResults.plus(Pair("test", triggerRunResult)) - - return MonitorRunResult( - "test-monitor", - Instant.now(), - Instant.now(), - null, - randomInputRunResults(), - triggerResults - ) -} - -fun randomBucketLevelMonitorRunResult(): MonitorRunResult { - val triggerResults = mutableMapOf() - val triggerRunResult = randomBucketLevelTriggerRunResult() - triggerResults.plus(Pair("test", triggerRunResult)) - - return MonitorRunResult( - "test-monitor", - Instant.now(), - Instant.now(), - null, - randomInputRunResults(), - triggerResults - ) -} - -fun randomDocumentLevelMonitorRunResult(): MonitorRunResult { - val triggerResults = mutableMapOf() - val triggerRunResult = randomDocumentLevelTriggerRunResult() - triggerResults.plus(Pair("test", triggerRunResult)) - - return MonitorRunResult( - "test-monitor", - Instant.now(), - Instant.now(), - null, - randomInputRunResults(), - triggerResults - ) -} - -fun randomInputRunResults(): InputRunResults { - return InputRunResults(listOf(), null) -} - -fun randomQueryLevelTriggerRunResult(): QueryLevelTriggerRunResult { - val map = mutableMapOf() - map.plus(Pair("key1", randomActionRunResult())) - map.plus(Pair("key2", randomActionRunResult())) - return QueryLevelTriggerRunResult("trigger-name", true, null, map) -} - -fun randomClusterMetricsInput( - path: String = ClusterMetricsInput.ClusterMetricType.CLUSTER_HEALTH.defaultPath, - pathParams: String = "", - url: String = "" -): ClusterMetricsInput { - return ClusterMetricsInput(path, pathParams, url) -} - -fun randomBucketLevelTriggerRunResult(): BucketLevelTriggerRunResult { - val map = mutableMapOf() - map.plus(Pair("key1", randomActionRunResult())) - map.plus(Pair("key2", randomActionRunResult())) - - val aggBucket1 = AggregationResultBucket( - "parent_bucket_path_1", - listOf("bucket_key_1"), - mapOf("k1" to "val1", "k2" to "val2") - ) - val aggBucket2 = AggregationResultBucket( - "parent_bucket_path_2", - listOf("bucket_key_2"), - mapOf("k1" to "val1", "k2" to "val2") - ) - - val actionResultsMap: MutableMap> = mutableMapOf() - actionResultsMap[aggBucket1.getBucketKeysHash()] = map - actionResultsMap[aggBucket2.getBucketKeysHash()] = map - - return BucketLevelTriggerRunResult( - "trigger-name", - null, - mapOf( - aggBucket1.getBucketKeysHash() to aggBucket1, - aggBucket2.getBucketKeysHash() to aggBucket2 - ), - actionResultsMap - ) -} - -fun randomDocumentLevelTriggerRunResult(): DocumentLevelTriggerRunResult { - val map = mutableMapOf() - map.plus(Pair("key1", randomActionRunResult())) - map.plus(Pair("key2", randomActionRunResult())) - return DocumentLevelTriggerRunResult( - "trigger-name", - mutableListOf(UUIDs.randomBase64UUID().toString()), - null, - mutableMapOf(Pair("alertId", map)) - ) -} - -fun randomActionRunResult(): ActionRunResult { - val map = mutableMapOf() - map.plus(Pair("key1", "val1")) - map.plus(Pair("key2", "val2")) - return ActionRunResult( - "1234", - "test-action", - map, - false, - Instant.now(), - null - ) -} - -fun Alert.toJsonString(): String { - val builder = XContentFactory.jsonBuilder() - return this.toXContent(builder, ToXContent.EMPTY_PARAMS).string() -} - -fun randomUser(): User { - return User( - OpenSearchRestTestCase.randomAlphaOfLength(10), - listOf( - OpenSearchRestTestCase.randomAlphaOfLength(10), - OpenSearchRestTestCase.randomAlphaOfLength(10) - ), - listOf(OpenSearchRestTestCase.randomAlphaOfLength(10), ALL_ACCESS_ROLE), - listOf("test_attr=test") - ) -} - -fun randomUserEmpty(): User { - return User("", listOf(), listOf(), listOf()) -} - -fun EmailAccount.toJsonString(): String { - val builder = XContentFactory.jsonBuilder() - return this.toXContent(builder).string() -} - -fun EmailGroup.toJsonString(): String { - val builder = XContentFactory.jsonBuilder() - return this.toXContent(builder).string() -} - -/** - * Wrapper for [RestClient.performRequest] which was deprecated in ES 6.5 and is used in tests. This provides - * a single place to suppress deprecation warnings. This will probably need further work when the API is removed entirely - * but that's an exercise for another day. - */ -@Suppress("DEPRECATION") -fun RestClient.makeRequest( - method: String, - endpoint: String, - params: Map = emptyMap(), - entity: HttpEntity? = null, - vararg headers: Header -): Response { - val request = Request(method, endpoint) - // TODO: remove PERMISSIVE option after moving system index access to REST API call - val options = RequestOptions.DEFAULT.toBuilder() - options.setWarningsHandler(WarningsHandler.PERMISSIVE) - headers.forEach { options.addHeader(it.name, it.value) } - request.options = options.build() - params.forEach { request.addParameter(it.key, it.value) } - if (entity != null) { - request.entity = entity - } - return performRequest(request) -} - -/** - * Wrapper for [RestClient.performRequest] which was deprecated in ES 6.5 and is used in tests. This provides - * a single place to suppress deprecation warnings. This will probably need further work when the API is removed entirely - * but that's an exercise for another day. - */ -@Suppress("DEPRECATION") -fun RestClient.makeRequest( - method: String, - endpoint: String, - entity: HttpEntity? = null, - vararg headers: Header -): Response { - val request = Request(method, endpoint) - val options = RequestOptions.DEFAULT.toBuilder() - // TODO: remove PERMISSIVE option after moving system index access to REST API call - options.setWarningsHandler(WarningsHandler.PERMISSIVE) - headers.forEach { options.addHeader(it.name, it.value) } - request.options = options.build() - if (entity != null) { - request.entity = entity - } - return performRequest(request) -} - -fun builder(): XContentBuilder { - return XContentBuilder.builder(XContentType.JSON.xContent()) -} - -fun parser(xc: String): XContentParser { - val parser = XContentType.JSON.xContent().createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, xc) - parser.nextToken() - return parser -} - -fun xContentRegistry(): NamedXContentRegistry { - return NamedXContentRegistry( - listOf( - SearchInput.XCONTENT_REGISTRY, - DocLevelMonitorInput.XCONTENT_REGISTRY, - QueryLevelTrigger.XCONTENT_REGISTRY, - BucketLevelTrigger.XCONTENT_REGISTRY, - DocumentLevelTrigger.XCONTENT_REGISTRY - ) + SearchModule(Settings.EMPTY, emptyList()).namedXContents - ) -} - -fun assertUserNull(map: Map) { - val user = map["user"] - assertNull("User is not null", user) -} - -fun assertUserNull(monitor: Monitor) { - assertNull("User is not null", monitor.user) -} - -fun assertUserNull(workflow: Workflow) { - assertNull("User is not null", workflow.user) -} - -fun randomChainedAlertTrigger( - id: String = UUIDs.base64UUID(), - name: String = OpenSearchRestTestCase.randomAlphaOfLength(10), - severity: String = "1", - condition: Script = randomScript(), - actions: List = mutableListOf(), - destinationId: String = "" -): ChainedAlertTrigger { - return ChainedAlertTrigger( - id = id, - name = name, - severity = severity, - condition = condition, - actions = if (actions.isEmpty() && destinationId.isNotBlank()) { - (0..randomInt(10)).map { randomAction(destinationId = destinationId) } - } else actions - ) -} diff --git a/alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt b/alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt deleted file mode 100644 index 6076ebac6..000000000 --- a/alerting/bin/test/org/opensearch/alerting/TriggerServiceTests.kt +++ /dev/null @@ -1,260 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting - -import org.junit.Before -import org.mockito.Mockito -import org.opensearch.alerting.model.BucketLevelTriggerRunResult -import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.MonitorRunResult -import org.opensearch.alerting.script.BucketLevelTriggerExecutionContext -import org.opensearch.common.xcontent.XContentType -import org.opensearch.core.xcontent.DeprecationHandler -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.script.ScriptService -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant - -class TriggerServiceTests : OpenSearchTestCase() { - private lateinit var scriptService: ScriptService - private lateinit var triggerService: TriggerService - - @Before - fun setup() { - scriptService = Mockito.mock(ScriptService::class.java) - triggerService = TriggerService(scriptService) - } - - fun `test run bucket level trigger with bucket key as int`() { - val bucketSelectorExtAggregationBuilder = randomBucketSelectorExtAggregationBuilder( - bucketsPathsMap = mutableMapOf("_count" to "_count", "_key" to "_key"), - script = randomScript(source = "params._count > 0"), - parentBucketPath = "status_code" - ) - val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) - val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - - val inputResultsStr = "{\n" + - " \"_shards\": {\n" + - " \"total\": 1,\n" + - " \"failed\": 0,\n" + - " \"successful\": 1,\n" + - " \"skipped\": 0\n" + - " },\n" + - " \"hits\": {\n" + - " \"hits\": [\n" + - " {\n" + - " \"_index\": \"sample-http-responses\",\n" + - " \"_type\": \"http\",\n" + - " \"_source\": {\n" + - " \"status_code\": 100,\n" + - " \"http_4xx\": 0,\n" + - " \"http_3xx\": 0,\n" + - " \"http_5xx\": 0,\n" + - " \"http_2xx\": 0,\n" + - " \"timestamp\": 100000,\n" + - " \"http_1xx\": 1\n" + - " },\n" + - " \"_id\": 1,\n" + - " \"_score\": 1\n" + - " }\n" + - " ],\n" + - " \"total\": {\n" + - " \"value\": 4,\n" + - " \"relation\": \"eq\"\n" + - " },\n" + - " \"max_score\": 1\n" + - " },\n" + - " \"took\": 37,\n" + - " \"timed_out\": false,\n" + - " \"aggregations\": {\n" + - " \"status_code\": {\n" + - " \"doc_count_error_upper_bound\": 0,\n" + - " \"sum_other_doc_count\": 0,\n" + - " \"buckets\": [\n" + - " {\n" + - " \"doc_count\": 2,\n" + - " \"key\": 100\n" + - " },\n" + - " {\n" + - " \"doc_count\": 1,\n" + - " \"key\": 102\n" + - " },\n" + - " {\n" + - " \"doc_count\": 1,\n" + - " \"key\": 201\n" + - " }\n" + - " ]\n" + - " },\n" + - " \"${trigger.id}\": {\n" + - " \"parent_bucket_path\": \"status_code\",\n" + - " \"bucket_indices\": [\n" + - " 0,\n" + - " 1,\n" + - " 2\n" + - " ]\n" + - " }\n" + - " }\n" + - "}" - - val parser = XContentType.JSON.xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - inputResultsStr - ) - - val inputResults = parser.map() - - var monitorRunResult = MonitorRunResult(monitor.name, Instant.now(), Instant.now()) - monitorRunResult = monitorRunResult.copy(inputResults = InputRunResults(listOf(inputResults))) - val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger, monitorRunResult) - - val bucketLevelTriggerRunResult = triggerService.runBucketLevelTrigger(monitor, trigger, triggerCtx) - assertNull(bucketLevelTriggerRunResult.error) - } - - fun `test run bucket level trigger with bucket key as map`() { - val bucketSelectorExtAggregationBuilder = randomBucketSelectorExtAggregationBuilder( - bucketsPathsMap = mutableMapOf("_count" to "_count", "_key" to "_key"), - script = randomScript(source = "params._count > 0"), - parentBucketPath = "status_code" - ) - val trigger = randomBucketLevelTrigger(bucketSelector = bucketSelectorExtAggregationBuilder) - val monitor = randomBucketLevelMonitor(triggers = listOf(trigger)) - - val inputResultsStr = "{\n" + - " \"_shards\": {\n" + - " \"total\": 1,\n" + - " \"failed\": 0,\n" + - " \"successful\": 1,\n" + - " \"skipped\": 0\n" + - " },\n" + - " \"hits\": {\n" + - " \"hits\": [\n" + - " {\n" + - " \"_index\": \"sample-http-responses\",\n" + - " \"_type\": \"http\",\n" + - " \"_source\": {\n" + - " \"status_code\": 100,\n" + - " \"http_4xx\": 0,\n" + - " \"http_3xx\": 0,\n" + - " \"http_5xx\": 0,\n" + - " \"http_2xx\": 0,\n" + - " \"timestamp\": 100000,\n" + - " \"http_1xx\": 1\n" + - " },\n" + - " \"_id\": 1,\n" + - " \"_score\": 1\n" + - " },\n" + - " {\n" + - " \"_index\": \"sample-http-responses\",\n" + - " \"_type\": \"http\",\n" + - " \"_source\": {\n" + - " \"status_code\": 102,\n" + - " \"http_4xx\": 0,\n" + - " \"http_3xx\": 0,\n" + - " \"http_5xx\": 0,\n" + - " \"http_2xx\": 0,\n" + - " \"timestamp\": 160000,\n" + - " \"http_1xx\": 1\n" + - " },\n" + - " \"_id\": 2,\n" + - " \"_score\": 1\n" + - " },\n" + - " {\n" + - " \"_index\": \"sample-http-responses\",\n" + - " \"_type\": \"http\",\n" + - " \"_source\": {\n" + - " \"status_code\": 100,\n" + - " \"http_4xx\": 0,\n" + - " \"http_3xx\": 0,\n" + - " \"http_5xx\": 0,\n" + - " \"http_2xx\": 0,\n" + - " \"timestamp\": 220000,\n" + - " \"http_1xx\": 1\n" + - " },\n" + - " \"_id\": 4,\n" + - " \"_score\": 1\n" + - " },\n" + - " {\n" + - " \"_index\": \"sample-http-responses\",\n" + - " \"_type\": \"http\",\n" + - " \"_source\": {\n" + - " \"status_code\": 201,\n" + - " \"http_4xx\": 0,\n" + - " \"http_3xx\": 0,\n" + - " \"http_5xx\": 0,\n" + - " \"http_2xx\": 1,\n" + - " \"timestamp\": 280000,\n" + - " \"http_1xx\": 0\n" + - " },\n" + - " \"_id\": 5,\n" + - " \"_score\": 1\n" + - " }\n" + - " ],\n" + - " \"total\": {\n" + - " \"value\": 4,\n" + - " \"relation\": \"eq\"\n" + - " },\n" + - " \"max_score\": 1\n" + - " },\n" + - " \"took\": 15,\n" + - " \"timed_out\": false,\n" + - " \"aggregations\": {\n" + - " \"${trigger.id}\": {\n" + - " \"parent_bucket_path\": \"status_code\",\n" + - " \"bucket_indices\": [\n" + - " 0,\n" + - " 1,\n" + - " 2\n" + - " ]\n" + - " },\n" + - " \"status_code\": {\n" + - " \"buckets\": [\n" + - " {\n" + - " \"doc_count\": 2,\n" + - " \"key\": {\n" + - " \"status_code\": 100\n" + - " }\n" + - " },\n" + - " {\n" + - " \"doc_count\": 1,\n" + - " \"key\": {\n" + - " \"status_code\": 102\n" + - " }\n" + - " },\n" + - " {\n" + - " \"doc_count\": 1,\n" + - " \"key\": {\n" + - " \"status_code\": 201\n" + - " }\n" + - " }\n" + - " ],\n" + - " \"after_key\": {\n" + - " \"status_code\": 201\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - - val parser = XContentType.JSON.xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - inputResultsStr - ) - - val inputResults = parser.map() - - var monitorRunResult = MonitorRunResult(monitor.name, Instant.now(), Instant.now()) - monitorRunResult = monitorRunResult.copy(inputResults = InputRunResults(listOf(inputResults))) - val triggerCtx = BucketLevelTriggerExecutionContext(monitor, trigger, monitorRunResult) - - val bucketLevelTriggerRunResult = triggerService.runBucketLevelTrigger(monitor, trigger, triggerCtx) - assertNull(bucketLevelTriggerRunResult.error) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt deleted file mode 100644 index d284fc70e..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorActionTests.kt +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class ExecuteMonitorActionTests : OpenSearchTestCase() { - - fun `test execute monitor action name`() { - assertNotNull(ExecuteMonitorAction.INSTANCE.name()) - assertEquals(ExecuteMonitorAction.INSTANCE.name(), ExecuteMonitorAction.NAME) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt deleted file mode 100644 index f54b6fea6..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorRequestTests.kt +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.common.unit.TimeValue -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase - -class ExecuteMonitorRequestTests : OpenSearchTestCase() { - - fun `test execute monitor request with id`() { - - val req = ExecuteMonitorRequest(false, TimeValue.timeValueSeconds(100L), "1234", null) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = ExecuteMonitorRequest(sin) - assertEquals("1234", newReq.monitorId) - assertEquals(false, newReq.dryrun) - assertNull(newReq.monitor) - assertEquals(req.monitor, newReq.monitor) - } - - fun `test execute monitor request with monitor`() { - val monitor = randomQueryLevelMonitor().copy(inputs = listOf(SearchInput(emptyList(), SearchSourceBuilder()))) - val req = ExecuteMonitorRequest(false, TimeValue.timeValueSeconds(100L), null, monitor) - assertNotNull(req.monitor) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = ExecuteMonitorRequest(sin) - assertNull(newReq.monitorId) - assertEquals(false, newReq.dryrun) - assertNotNull(newReq.monitor) - assertEquals(req.monitor, newReq.monitor) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt deleted file mode 100644 index 10ccd7038..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/ExecuteMonitorResponseTests.kt +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.junit.Assert -import org.opensearch.alerting.randomBucketLevelMonitorRunResult -import org.opensearch.alerting.randomQueryLevelMonitorRunResult -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase - -class ExecuteMonitorResponseTests : OpenSearchTestCase() { - - fun `test exec query-level monitor response`() { - val req = ExecuteMonitorResponse(randomQueryLevelMonitorRunResult()) - Assert.assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = ExecuteMonitorResponse(sin) - assertNotNull(newReq.monitorRunResult) - assertEquals("test-monitor", newReq.monitorRunResult.monitorName) - assertNotNull(newReq.monitorRunResult.inputResults) - } - - fun `test exec bucket-level monitor response`() { - val req = ExecuteMonitorResponse(randomBucketLevelMonitorRunResult()) - Assert.assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = ExecuteMonitorResponse(sin) - assertNotNull(newReq.monitorRunResult) - assertEquals("test-monitor", newReq.monitorRunResult.monitorName) - assertNotNull(newReq.monitorRunResult.inputResults) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt deleted file mode 100644 index c18e33790..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class GetDestinationsActionTests : OpenSearchTestCase() { - - fun `test get destinations action name`() { - assertNotNull(GetDestinationsAction.INSTANCE.name()) - assertEquals(GetDestinationsAction.INSTANCE.name(), GetDestinationsAction.NAME) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt deleted file mode 100644 index 7c76621f9..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsRequestTests.kt +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.commons.alerting.model.Table -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.test.OpenSearchTestCase - -class GetDestinationsRequestTests : OpenSearchTestCase() { - - fun `test get destination request`() { - - val table = Table("asc", "sortString", null, 1, 0, "") - val req = GetDestinationsRequest("1234", 1L, FetchSourceContext.FETCH_SOURCE, table, "slack") - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetDestinationsRequest(sin) - assertEquals("1234", newReq.destinationId) - assertEquals(1L, newReq.version) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - assertEquals(table, newReq.table) - assertEquals("slack", newReq.destinationType) - } - - fun `test get destination request without src context`() { - - val table = Table("asc", "sortString", null, 1, 0, "") - val req = GetDestinationsRequest("1234", 1L, null, table, "slack") - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetDestinationsRequest(sin) - assertEquals("1234", newReq.destinationId) - assertEquals(1L, newReq.version) - assertEquals(null, newReq.srcContext) - assertEquals(table, newReq.table) - assertEquals("slack", newReq.destinationType) - } - - fun `test get destination request without destinationId`() { - - val table = Table("asc", "sortString", null, 1, 0, "") - val req = GetDestinationsRequest(null, 1L, FetchSourceContext.FETCH_SOURCE, table, "slack") - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetDestinationsRequest(sin) - assertEquals(null, newReq.destinationId) - assertEquals(1L, newReq.version) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - assertEquals(table, newReq.table) - assertEquals("slack", newReq.destinationType) - } - - fun `test get destination request with filter`() { - - val table = Table("asc", "sortString", null, 1, 0, "") - val req = GetDestinationsRequest(null, 1L, FetchSourceContext.FETCH_SOURCE, table, "slack") - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetDestinationsRequest(sin) - assertEquals(null, newReq.destinationId) - assertEquals(1L, newReq.version) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - assertEquals(table, newReq.table) - assertEquals("slack", newReq.destinationType) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt deleted file mode 100644 index ed837bdce..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetDestinationsResponseTests.kt +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.Slack -import org.opensearch.alerting.util.DestinationType -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.rest.RestStatus -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant -import java.util.Collections - -class GetDestinationsResponseTests : OpenSearchTestCase() { - - fun `test get destination response with no destinations`() { - val req = GetDestinationsResponse(RestStatus.OK, 0, Collections.emptyList()) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetDestinationsResponse(sin) - assertEquals(0, newReq.totalDestinations) - assertTrue(newReq.destinations.isEmpty()) - assertEquals(RestStatus.OK, newReq.status) - } - - fun `test get destination response with a destination`() { - val slack = Slack("url") - val destination = Destination( - "id", - 0L, - 0, - 0, - 0, - DestinationType.SLACK, - "name", - null, - Instant.MIN, - null, - slack, - null, - null - ) - - val req = GetDestinationsResponse(RestStatus.OK, 1, listOf(destination)) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetDestinationsResponse(sin) - assertEquals(1, newReq.totalDestinations) - assertEquals(destination, newReq.destinations[0]) - assertEquals(RestStatus.OK, newReq.status) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt deleted file mode 100644 index cb26b182e..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class GetEmailAccountActionTests : OpenSearchTestCase() { - - fun `test get email account name`() { - assertNotNull(GetEmailAccountAction.INSTANCE.name()) - assertEquals(GetEmailAccountAction.INSTANCE.name(), GetEmailAccountAction.NAME) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt deleted file mode 100644 index 02631a38b..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountRequestTests.kt +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.rest.RestRequest -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.test.OpenSearchTestCase - -class GetEmailAccountRequestTests : OpenSearchTestCase() { - - fun `test get email account request`() { - - val req = GetEmailAccountRequest("1234", 1L, RestRequest.Method.GET, FetchSourceContext.FETCH_SOURCE) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetEmailAccountRequest(sin) - assertEquals("1234", newReq.emailAccountID) - assertEquals(1L, newReq.version) - assertEquals(RestRequest.Method.GET, newReq.method) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - } - - fun `test head email account request`() { - - val req = GetEmailAccountRequest("1234", 2L, RestRequest.Method.HEAD, FetchSourceContext.FETCH_SOURCE) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetEmailAccountRequest(sin) - assertEquals("1234", newReq.emailAccountID) - assertEquals(2L, newReq.version) - assertEquals(RestRequest.Method.HEAD, newReq.method) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt deleted file mode 100644 index ed60c3439..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetEmailAccountResponseTests.kt +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.randomEmailAccount -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.rest.RestStatus -import org.opensearch.test.OpenSearchTestCase - -class GetEmailAccountResponseTests : OpenSearchTestCase() { - - fun `test get email account response`() { - - val res = GetEmailAccountResponse("1234", 1L, 2L, 0L, RestStatus.OK, null) - assertNotNull(res) - - val out = BytesStreamOutput() - res.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRes = GetEmailAccountResponse(sin) - assertEquals("1234", newRes.id) - assertEquals(1L, newRes.version) - assertEquals(RestStatus.OK, newRes.status) - assertEquals(null, newRes.emailAccount) - } - - fun `test get email account with email account`() { - - val emailAccount = randomEmailAccount(name = "test_email_account") - val res = GetEmailAccountResponse("1234", 1L, 2L, 0L, RestStatus.OK, emailAccount) - assertNotNull(res) - - val out = BytesStreamOutput() - res.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRes = GetEmailAccountResponse(sin) - assertEquals("1234", newRes.id) - assertEquals(1L, newRes.version) - assertEquals(RestStatus.OK, newRes.status) - assertNotNull(newRes.emailAccount) - assertEquals("test_email_account", newRes.emailAccount?.name) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt deleted file mode 100644 index 647de76c3..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class GetEmailGroupActionTests : OpenSearchTestCase() { - - fun `test get email group name`() { - assertNotNull(GetEmailGroupAction.INSTANCE.name()) - assertEquals(GetEmailGroupAction.INSTANCE.name(), GetEmailGroupAction.NAME) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt deleted file mode 100644 index 7fa8b2037..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupRequestTests.kt +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.rest.RestRequest -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.test.OpenSearchTestCase - -class GetEmailGroupRequestTests : OpenSearchTestCase() { - - fun `test get email group request`() { - - val req = GetEmailGroupRequest("1234", 1L, RestRequest.Method.GET, FetchSourceContext.FETCH_SOURCE) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetEmailGroupRequest(sin) - assertEquals("1234", newReq.emailGroupID) - assertEquals(1L, newReq.version) - assertEquals(RestRequest.Method.GET, newReq.method) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - } - - fun `test head email group request`() { - - val req = GetEmailGroupRequest("1234", 1L, RestRequest.Method.HEAD, FetchSourceContext.FETCH_SOURCE) - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetEmailGroupRequest(sin) - assertEquals("1234", newReq.emailGroupID) - assertEquals(1L, newReq.version) - assertEquals(RestRequest.Method.HEAD, newReq.method) - assertEquals(FetchSourceContext.FETCH_SOURCE, newReq.srcContext) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt deleted file mode 100644 index 19612fe4a..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetEmailGroupResponseTests.kt +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.alerting.randomEmailGroup -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.rest.RestStatus -import org.opensearch.test.OpenSearchTestCase - -class GetEmailGroupResponseTests : OpenSearchTestCase() { - - fun `test get email group response`() { - - val res = GetEmailGroupResponse("1234", 1L, 2L, 0L, RestStatus.OK, null) - assertNotNull(res) - - val out = BytesStreamOutput() - res.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRes = GetEmailGroupResponse(sin) - assertEquals("1234", newRes.id) - assertEquals(1L, newRes.version) - assertEquals(RestStatus.OK, newRes.status) - assertEquals(null, newRes.emailGroup) - } - - fun `test get email group with email group`() { - - val emailGroup = randomEmailGroup(name = "test-email-group") - val res = GetEmailGroupResponse("1234", 1L, 2L, 0L, RestStatus.OK, emailGroup) - assertNotNull(res) - - val out = BytesStreamOutput() - res.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRes = GetEmailGroupResponse(sin) - assertEquals("1234", newRes.id) - assertEquals(1L, newRes.version) - assertEquals(RestStatus.OK, newRes.status) - assertNotNull(newRes.emailGroup) - assertEquals("test-email-group", newRes.emailGroup?.name) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt b/alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt deleted file mode 100644 index d1bd6f7e3..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/GetFindingsRequestTests.kt +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.commons.alerting.action.GetFindingsRequest -import org.opensearch.commons.alerting.model.Table -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase - -class GetFindingsRequestTests : OpenSearchTestCase() { - - fun `test get findings request`() { - - val table = Table("asc", "sortString", null, 1, 0, "") - - val req = GetFindingsRequest("2121", table, "1", "finding_index_name") - assertNotNull(req) - - val out = BytesStreamOutput() - req.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newReq = GetFindingsRequest(sin) - - assertEquals("1", newReq.monitorId) - assertEquals("2121", newReq.findingId) - assertEquals("finding_index_name", newReq.findingIndex) - assertEquals(table, newReq.table) - } - - fun `test validate returns null`() { - val table = Table("asc", "sortString", null, 1, 0, "") - - val req = GetFindingsRequest("2121", table, "1", "active") - assertNotNull(req) - assertNull(req.validate()) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt deleted file mode 100644 index 14942c977..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/SearchEmailAccountActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class SearchEmailAccountActionTests : OpenSearchTestCase() { - - fun `test search email account action name`() { - assertNotNull(SearchEmailAccountAction.INSTANCE.name()) - assertEquals(SearchEmailAccountAction.INSTANCE.name(), SearchEmailAccountAction.NAME) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt b/alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt deleted file mode 100644 index 6cd01cf9d..000000000 --- a/alerting/bin/test/org/opensearch/alerting/action/SearchEmailGroupActionTests.kt +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.action - -import org.opensearch.test.OpenSearchTestCase - -class SearchEmailGroupActionTests : OpenSearchTestCase() { - - fun `test search email group action name`() { - assertNotNull(SearchEmailGroupAction.INSTANCE.name()) - assertEquals(SearchEmailGroupAction.INSTANCE.name(), SearchEmailGroupAction.NAME) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt b/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt deleted file mode 100644 index 60021e20b..000000000 --- a/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregationBuilderTests.kt +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.aggregation.bucketselectorext - -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter -import org.opensearch.plugins.SearchPlugin -import org.opensearch.script.Script -import org.opensearch.script.ScriptType -import org.opensearch.search.aggregations.BasePipelineAggregationTestCase -import org.opensearch.search.aggregations.bucket.terms.IncludeExclude -import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy - -class BucketSelectorExtAggregationBuilderTests : BasePipelineAggregationTestCase() { - override fun plugins(): List { - return listOf(AlertingPlugin()) - } - - override fun createTestAggregatorFactory(): BucketSelectorExtAggregationBuilder { - val name = randomAlphaOfLengthBetween(3, 20) - val bucketsPaths: MutableMap = HashMap() - val numBucketPaths = randomIntBetween(1, 10) - for (i in 0 until numBucketPaths) { - bucketsPaths[randomAlphaOfLengthBetween(1, 20)] = randomAlphaOfLengthBetween(1, 40) - } - val script: Script - if (randomBoolean()) { - script = mockScript("script") - } else { - val params: MutableMap = HashMap() - if (randomBoolean()) { - params["foo"] = "bar" - } - val type = randomFrom(*ScriptType.values()) - script = Script( - type, - if (type == ScriptType.STORED) null else randomFrom("my_lang", Script.DEFAULT_SCRIPT_LANG), - "script", params - ) - } - val parentBucketPath = randomAlphaOfLengthBetween(3, 20) - val filter = BucketSelectorExtFilter(IncludeExclude("foo.*", "bar.*")) - val factory = BucketSelectorExtAggregationBuilder( - name, bucketsPaths, - script, parentBucketPath, filter - ) - if (randomBoolean()) { - factory.gapPolicy(randomFrom(*GapPolicy.values())) - } - return factory - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt b/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt deleted file mode 100644 index 257a0a705..000000000 --- a/alerting/bin/test/org/opensearch/alerting/aggregation/bucketselectorext/BucketSelectorExtAggregatorTests.kt +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.aggregation.bucketselectorext - -import org.apache.lucene.document.Document -import org.apache.lucene.document.SortedNumericDocValuesField -import org.apache.lucene.document.SortedSetDocValuesField -import org.apache.lucene.index.DirectoryReader -import org.apache.lucene.search.MatchAllDocsQuery -import org.apache.lucene.search.Query -import org.apache.lucene.tests.index.RandomIndexWriter -import org.apache.lucene.util.BytesRef -import org.hamcrest.CoreMatchers -import org.opensearch.common.CheckedConsumer -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtFilter -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorIndices -import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType -import org.opensearch.index.mapper.MappedFieldType -import org.opensearch.index.mapper.NumberFieldMapper -import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType -import org.opensearch.index.query.MatchAllQueryBuilder -import org.opensearch.script.MockScriptEngine -import org.opensearch.script.Script -import org.opensearch.script.ScriptEngine -import org.opensearch.script.ScriptModule -import org.opensearch.script.ScriptService -import org.opensearch.script.ScriptType -import org.opensearch.search.aggregations.Aggregation -import org.opensearch.search.aggregations.Aggregator -import org.opensearch.search.aggregations.AggregatorTestCase -import org.opensearch.search.aggregations.bucket.filter.FilterAggregationBuilder -import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder -import org.opensearch.search.aggregations.bucket.filter.InternalFilter -import org.opensearch.search.aggregations.bucket.filter.InternalFilters -import org.opensearch.search.aggregations.bucket.terms.IncludeExclude -import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder -import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder -import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder -import java.io.IOException -import java.util.Collections -import java.util.function.Consumer -import java.util.function.Function - -class BucketSelectorExtAggregatorTests : AggregatorTestCase() { - - private var SCRIPTNAME = "bucket_selector_script" - private var paramName = "the_avg" - private var paramValue = 19.0 - - override fun getMockScriptService(): ScriptService { - - val scriptEngine = MockScriptEngine( - MockScriptEngine.NAME, - Collections.singletonMap( - SCRIPTNAME, - Function, Any> { script: Map -> - script[paramName].toString().toDouble() == paramValue - } - ), - emptyMap() - ) - val engines: Map = Collections.singletonMap(scriptEngine.type, scriptEngine) - return ScriptService(Settings.EMPTY, engines, ScriptModule.CORE_CONTEXTS) - } - - @Throws(Exception::class) - fun `test bucket selector script`() { - val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) - val fieldType1: MappedFieldType = KeywordFieldType("the_field") - - val filters: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) - .subAggregation( - TermsAggregationBuilder("the_terms").field("the_field") - .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) - ) - .subAggregation( - BucketSelectorExtAggregationBuilder( - "test_bucket_selector_ext", - Collections.singletonMap("the_avg", "the_avg.value"), - Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), - "the_terms", - null - ) - ) - paramName = "the_avg" - paramValue = 19.0 - testCase( - filters, MatchAllDocsQuery(), - CheckedConsumer { iw: RandomIndexWriter -> - var doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) - doc.add(SortedNumericDocValuesField("number_field", 20)) - iw.addDocument(doc) - doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) - doc.add(SortedNumericDocValuesField("number_field", 19)) - iw.addDocument(doc) - }, - Consumer { f: InternalFilters -> - assertThat( - (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], - CoreMatchers.equalTo(1) - ) - }, - fieldType, fieldType1 - ) - } - - @Throws(Exception::class) - fun `test bucket selector filter include`() { - val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) - val fieldType1: MappedFieldType = KeywordFieldType("the_field") - - val selectorAgg1: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) - .subAggregation( - TermsAggregationBuilder("the_terms").field("the_field") - .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) - ) - .subAggregation( - BucketSelectorExtAggregationBuilder( - "test_bucket_selector_ext", - Collections.singletonMap("the_avg", "the_avg.value"), - Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), - "the_terms", - BucketSelectorExtFilter(IncludeExclude(arrayOf("test1"), arrayOf())) - ) - ) - - val selectorAgg2: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) - .subAggregation( - TermsAggregationBuilder("the_terms").field("the_field") - .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) - ) - .subAggregation( - BucketSelectorExtAggregationBuilder( - "test_bucket_selector_ext", - Collections.singletonMap("the_avg", "the_avg.value"), - Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), - "the_terms", - BucketSelectorExtFilter(IncludeExclude(arrayOf("test2"), arrayOf())) - ) - ) - - paramName = "the_avg" - paramValue = 19.0 - - testCase( - selectorAgg1, MatchAllDocsQuery(), - CheckedConsumer { iw: RandomIndexWriter -> - var doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) - doc.add(SortedNumericDocValuesField("number_field", 20)) - iw.addDocument(doc) - doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) - doc.add(SortedNumericDocValuesField("number_field", 19)) - iw.addDocument(doc) - }, - Consumer { f: InternalFilters -> - assertThat( - (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices.size, - CoreMatchers.equalTo(0) - ) - }, - fieldType, fieldType1 - ) - - testCase( - selectorAgg2, MatchAllDocsQuery(), - CheckedConsumer { iw: RandomIndexWriter -> - var doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) - doc.add(SortedNumericDocValuesField("number_field", 20)) - iw.addDocument(doc) - doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) - doc.add(SortedNumericDocValuesField("number_field", 19)) - iw.addDocument(doc) - }, - Consumer { f: InternalFilters -> - assertThat( - (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], - CoreMatchers.equalTo(1) - ) - }, - fieldType, fieldType1 - ) - } - - @Throws(Exception::class) - fun `test bucket selector filter exclude`() { - val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) - val fieldType1: MappedFieldType = KeywordFieldType("the_field") - - val selectorAgg1: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) - .subAggregation( - TermsAggregationBuilder("the_terms").field("the_field") - .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) - ) - .subAggregation( - BucketSelectorExtAggregationBuilder( - "test_bucket_selector_ext", - Collections.singletonMap("the_avg", "the_avg.value"), - Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), - "the_terms", - BucketSelectorExtFilter(IncludeExclude(arrayOf(), arrayOf("test2"))) - ) - ) - paramName = "the_avg" - paramValue = 19.0 - testCase( - selectorAgg1, MatchAllDocsQuery(), - CheckedConsumer { iw: RandomIndexWriter -> - var doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) - doc.add(SortedNumericDocValuesField("number_field", 20)) - iw.addDocument(doc) - doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) - doc.add(SortedNumericDocValuesField("number_field", 19)) - iw.addDocument(doc) - }, - Consumer { f: InternalFilters -> - assertThat( - (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices.size, - CoreMatchers.equalTo(0) - ) - }, - fieldType, fieldType1 - ) - } - - @Throws(Exception::class) - fun `test bucket selector filter numeric key`() { - val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) - val fieldType1: MappedFieldType = KeywordFieldType("the_field") - - val selectorAgg1: FiltersAggregationBuilder = FiltersAggregationBuilder("placeholder", MatchAllQueryBuilder()) - .subAggregation( - TermsAggregationBuilder("number_agg").field("number_field") - .subAggregation(ValueCountAggregationBuilder("count").field("number_field")) - ) - .subAggregation( - BucketSelectorExtAggregationBuilder( - "test_bucket_selector_ext", - Collections.singletonMap("count", "count"), - Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), - "number_agg", - BucketSelectorExtFilter(IncludeExclude(doubleArrayOf(19.0), doubleArrayOf())) - ) - ) - - paramName = "count" - paramValue = 1.0 - testCase( - selectorAgg1, MatchAllDocsQuery(), - CheckedConsumer { iw: RandomIndexWriter -> - var doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) - doc.add(SortedNumericDocValuesField("number_field", 20)) - iw.addDocument(doc) - doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) - doc.add(SortedNumericDocValuesField("number_field", 19)) - iw.addDocument(doc) - }, - Consumer { f: InternalFilters -> - assertThat( - (f.buckets[0].aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], - CoreMatchers.equalTo(0) - ) - }, - fieldType, fieldType1 - ) - } - - @Throws(Exception::class) - fun `test bucket selector nested parent path`() { - val fieldType: MappedFieldType = NumberFieldType("number_field", NumberFieldMapper.NumberType.INTEGER) - val fieldType1: MappedFieldType = KeywordFieldType("the_field") - - val selectorAgg1: FilterAggregationBuilder = FilterAggregationBuilder("placeholder", MatchAllQueryBuilder()) - .subAggregation( - FilterAggregationBuilder("parent_agg", MatchAllQueryBuilder()) - .subAggregation( - TermsAggregationBuilder("term_agg").field("the_field") - .subAggregation(AvgAggregationBuilder("the_avg").field("number_field")) - ) - ) - .subAggregation( - BucketSelectorExtAggregationBuilder( - "test_bucket_selector_ext", - Collections.singletonMap("the_avg", "the_avg.value"), - Script(ScriptType.INLINE, MockScriptEngine.NAME, SCRIPTNAME, emptyMap()), - "parent_agg>term_agg", - null - ) - ) - paramName = "the_avg" - paramValue = 19.0 - testCaseInternalFilter( - selectorAgg1, MatchAllDocsQuery(), - CheckedConsumer { iw: RandomIndexWriter -> - var doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test1"))) - - doc.add(SortedNumericDocValuesField("number_field", 20)) - iw.addDocument(doc) - doc = Document() - doc.add(SortedSetDocValuesField("the_field", BytesRef("test2"))) - - doc.add(SortedNumericDocValuesField("number_field", 19)) - iw.addDocument(doc) - }, - Consumer { f: InternalFilter -> - assertThat( - (f.aggregations.get("test_bucket_selector_ext") as BucketSelectorIndices).bucketIndices[0], - CoreMatchers.equalTo(1) - ) - }, - fieldType, fieldType1 - ) - } - - @Throws(IOException::class) - private fun testCase( - aggregationBuilder: FiltersAggregationBuilder, - query: Query, - buildIndex: CheckedConsumer, - verify: Consumer, - vararg fieldType: MappedFieldType - ) { - newDirectory().use { directory -> - val indexWriter = RandomIndexWriter(random(), directory) - buildIndex.accept(indexWriter) - indexWriter.close() - DirectoryReader.open(directory).use { indexReader -> - val indexSearcher = newIndexSearcher(indexReader) - val filters: InternalFilters - filters = searchAndReduce(indexSearcher, query, aggregationBuilder, *fieldType) - verify.accept(filters) - } - } - } - - @Throws(IOException::class) - private fun testCaseInternalFilter( - aggregationBuilder: FilterAggregationBuilder, - query: Query, - buildIndex: CheckedConsumer, - verify: Consumer, - vararg fieldType: MappedFieldType - ) { - newDirectory().use { directory -> - val indexWriter = RandomIndexWriter(random(), directory) - buildIndex.accept(indexWriter) - indexWriter.close() - DirectoryReader.open(directory).use { indexReader -> - val indexSearcher = newIndexSearcher(indexReader) - val filters: InternalFilter - filters = searchAndReduce(indexSearcher, query, aggregationBuilder, *fieldType) - verify.accept(filters) - } - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt b/alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt deleted file mode 100644 index da402c5b7..000000000 --- a/alerting/bin/test/org/opensearch/alerting/alerts/AlertIndicesIT.kt +++ /dev/null @@ -1,371 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.alerts - -import org.apache.http.entity.ContentType.APPLICATION_JSON -import org.apache.http.entity.StringEntity -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.ALWAYS_RUN -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.NEVER_RUN -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.randomDocumentLevelMonitor -import org.opensearch.alerting.randomDocumentLevelTrigger -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.alerting.randomQueryLevelTrigger -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.common.xcontent.XContentType -import org.opensearch.common.xcontent.json.JsonXContent.jsonXContent -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.rest.RestStatus -import org.opensearch.test.OpenSearchTestCase -import java.util.concurrent.TimeUnit - -class AlertIndicesIT : AlertingRestTestCase() { - - fun `test create alert index`() { - executeMonitor(randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN)))) - - assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - } - - fun `test create finding index`() { - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - - executeMonitor(monitor.id) - - assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - } - - fun `test update alert index mapping with new schema version`() { - wipeAllODFEIndices() - assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) - assertIndexDoesNotExist(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - - putAlertMappings( - AlertIndices.alertMapping().trimStart('{').trimEnd('}') - .replace("\"schema_version\": 5", "\"schema_version\": 0") - ) - assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 0) - verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 0) - wipeAllODFEIndices() - executeMonitor(createRandomMonitor()) - assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 8) - verifyIndexSchemaVersion(AlertIndices.ALERT_INDEX, 5) - verifyIndexSchemaVersion(AlertIndices.ALERT_HISTORY_WRITE_INDEX, 5) - } - - fun `test update finding index mapping with new schema version`() { - wipeAllODFEIndices() - assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - - putFindingMappings( - AlertIndices.findingMapping().trimStart('{').trimEnd('}') - .replace("\"schema_version\": 4", "\"schema_version\": 0") - ) - assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 0) - wipeAllODFEIndices() - - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id) - assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - verifyIndexSchemaVersion(ScheduledJob.SCHEDULED_JOBS_INDEX, 8) - verifyIndexSchemaVersion(AlertIndices.FINDING_HISTORY_WRITE_INDEX, 4) - } - - fun `test alert index gets recreated automatically if deleted`() { - wipeAllODFEIndices() - assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) - val trueMonitor = randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN))) - - executeMonitor(trueMonitor) - assertIndexExists(AlertIndices.ALERT_INDEX) - assertIndexExists(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - wipeAllODFEIndices() - assertIndexDoesNotExist(AlertIndices.ALERT_INDEX) - assertIndexDoesNotExist(AlertIndices.ALERT_HISTORY_WRITE_INDEX) - - val executeResponse = executeMonitor(trueMonitor) - val xcp = createParser(XContentType.JSON.xContent(), executeResponse.entity.content) - val output = xcp.map() - assertNull("Error running a monitor after wiping alert indices", output["error"]) - } - - fun `test finding index gets recreated automatically if deleted`() { - wipeAllODFEIndices() - assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - - executeMonitor(trueMonitor.id) - assertIndexExists(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - wipeAllODFEIndices() - assertIndexDoesNotExist(AlertIndices.FINDING_HISTORY_WRITE_INDEX) - - createTestIndex(testIndex) - val executeResponse = executeMonitor(trueMonitor) - val xcp = createParser(XContentType.JSON.xContent(), executeResponse.entity.content) - val output = xcp.map() - assertNull("Error running a monitor after wiping finding indices", output["error"]) - } - - fun `test rollover alert history index`() { - // Update the rollover check to be every 1 second and the index max age to be 1 second - client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") - client().updateSettings(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.key, "1s") - - val trueMonitor = randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN))) - executeMonitor(trueMonitor) - - // Allow for a rollover index. - Thread.sleep(2000) - assertTrue("Did not find 3 alert indices", getAlertIndices().size >= 3) - } - - fun `test rollover finding history index`() { - // Update the rollover check to be every 1 second and the index max age to be 1 second - client().updateSettings(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.key, "1s") - client().updateSettings(AlertingSettings.FINDING_HISTORY_INDEX_MAX_AGE.key, "1s") - - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id) - - // Allow for a rollover index. - Thread.sleep(2000) - assertTrue("Did not find 2 alert indices", getFindingIndices().size >= 2) - } - - fun `test alert history disabled`() { - resetHistorySettings() - - val trigger1 = randomQueryLevelTrigger(condition = ALWAYS_RUN) - val monitor1 = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger1))) - executeMonitor(monitor1.id) - - // Check if alert is active - val activeAlert1 = searchAlerts(monitor1) - assertEquals("1 alert should be active", 1, activeAlert1.size) - - // Change trigger and re-execute monitor to mark alert as COMPLETED - updateMonitor(monitor1.copy(triggers = listOf(trigger1.copy(condition = NEVER_RUN)), id = monitor1.id), true) - executeMonitor(monitor1.id) - - val completedAlert1 = searchAlerts(monitor1, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() - assertNotNull("Alert is not completed", completedAlert1.endTime) - - assertEquals(1, getAlertHistoryDocCount()) - - // Disable alert history - client().updateSettings(AlertingSettings.ALERT_HISTORY_ENABLED.key, "false") - - val trigger2 = randomQueryLevelTrigger(condition = ALWAYS_RUN) - val monitor2 = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger2))) - executeMonitor(monitor2.id) - - // Check if second alert is active - val activeAlert2 = searchAlerts(monitor2) - assertEquals("1 alert should be active", 1, activeAlert2.size) - - // Mark second alert as COMPLETED - updateMonitor(monitor2.copy(triggers = listOf(trigger2.copy(condition = NEVER_RUN)), id = monitor2.id), true) - executeMonitor(monitor2.id) - - // For the second alert, since history is now disabled, searching for the completed alert should return an empty List - // since a COMPLETED alert will be removed from the alert index and not added to the history index - val completedAlert2 = searchAlerts(monitor2, AlertIndices.ALL_ALERT_INDEX_PATTERN) - assertTrue("Alert is not completed", completedAlert2.isEmpty()) - - // Get history entry count again and ensure the new alert was not added - assertEquals(1, getAlertHistoryDocCount()) - } - - fun `test short retention period`() { - resetHistorySettings() - - // Create monitor and execute - val trigger = randomQueryLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - executeMonitor(monitor.id) - - // Check if alert is active and alert index is created - val activeAlert = searchAlerts(monitor) - assertEquals("1 alert should be active", 1, activeAlert.size) - - waitUntil { return@waitUntil getAlertIndices().size == 2 } - - assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) - // History index is created but is empty - assertEquals(0, getAlertHistoryDocCount()) - - // Mark alert as COMPLETED - updateMonitor(monitor.copy(triggers = listOf(trigger.copy(condition = NEVER_RUN)), id = monitor.id), true) - executeMonitor(monitor.id) - - // Verify alert is completed - val completedAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() - assertNotNull("Alert is not completed", completedAlert.endTime) - - // The completed alert should be removed from the active alert index and added to the history index - assertEquals(1, getAlertHistoryDocCount()) - - // Update rollover check and max docs as well as decreasing the retention period - client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") - client().updateSettings(AlertingSettings.ALERT_HISTORY_MAX_DOCS.key, 1) - client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "1s") - - // Give some time for history to be rolled over and cleared - OpenSearchTestCase.waitUntil({ - val alertIndices = getAlertIndices().size - val docCount = getAlertHistoryDocCount() - if (alertIndices > 2 || docCount > 0) { - return@waitUntil false - } - return@waitUntil true - }, 30, TimeUnit.SECONDS) - - // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. - // This leaves two indices: alert index and an empty history write index - assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) - assertEquals(0, getAlertHistoryDocCount()) - } - - fun `test short finding retention period`() { - resetHistorySettings() - - // Create monitor and execute - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_field" : "us-west-2" - }""" - indexDoc(testIndex, "1", testDoc) - - executeMonitor(monitor.id) - - // Check if alert is active and alert index is created - val activeAlert = searchAlerts(monitor) - assertEquals("1 alert should be active", 1, activeAlert.size) - - waitUntil { return@waitUntil getAlertIndices().size == 2 } - - assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) - // History index is created but is empty - assertEquals(0, getAlertHistoryDocCount()) - - // Mark doc level alert as Acknowledged - acknowledgeAlerts(monitor, activeAlert[0]) - - // Verify alert is completed - val ackAlert = searchAlerts(monitor, AlertIndices.ALL_ALERT_INDEX_PATTERN).single() - assertNotNull("Alert is not acknowledged", ackAlert.acknowledgedTime) - - // The completed alert should be removed from the active alert index and added to the history index - assertEquals(1, getAlertHistoryDocCount()) - - // Update rollover check and max docs as well as decreasing the retention period - client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "1s") - client().updateSettings(AlertingSettings.ALERT_HISTORY_MAX_DOCS.key, 1) - client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "1s") - - // Give some time for history to be rolled over and cleared - OpenSearchTestCase.waitUntil({ - val alertIndices = getAlertIndices().size - val docCount = getAlertHistoryDocCount() - if (alertIndices > 2 || docCount > 0) { - return@waitUntil false - } - return@waitUntil true - }, 30, TimeUnit.SECONDS) - - // Given the max_docs and retention settings above, the history index will rollover and the non-write index will be deleted. - // This leaves two indices: alert index and an empty history write index - assertEquals("Did not find 2 alert indices", 2, getAlertIndices().size) - assertEquals(0, getAlertHistoryDocCount()) - } - - private fun assertIndexExists(index: String) { - val response = client().makeRequest("HEAD", index) - assertEquals("Index $index does not exist.", RestStatus.OK, response.restStatus()) - } - - private fun assertIndexDoesNotExist(index: String) { - val response = client().makeRequest("HEAD", index) - assertEquals("Index $index does not exist.", RestStatus.NOT_FOUND, response.restStatus()) - } - - private fun resetHistorySettings() { - client().updateSettings(AlertingSettings.ALERT_HISTORY_ENABLED.key, "true") - client().updateSettings(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.key, "60s") - client().updateSettings(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.key, "60s") - client().updateSettings(AlertingSettings.FINDING_HISTORY_ENABLED.key, "true") - client().updateSettings(AlertingSettings.FINDING_HISTORY_ROLLOVER_PERIOD.key, "60s") - client().updateSettings(AlertingSettings.FINDING_HISTORY_RETENTION_PERIOD.key, "60s") - } - - private fun getAlertIndices(): List { - val response = client().makeRequest("GET", "/_cat/indices/${AlertIndices.ALL_ALERT_INDEX_PATTERN}?format=json") - val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) - val responseList = xcp.list() - val indices = mutableListOf() - responseList.filterIsInstance>().forEach { indices.add(it["index"] as String) } - - return indices - } - - private fun getFindingIndices(): List { - val response = client().makeRequest("GET", "/_cat/indices/${AlertIndices.ALL_FINDING_INDEX_PATTERN}?format=json") - val xcp = createParser(XContentType.JSON.xContent(), response.entity.content) - val responseList = xcp.list() - val indices = mutableListOf() - responseList.filterIsInstance>().forEach { indices.add(it["index"] as String) } - - return indices - } - - private fun getAlertHistoryDocCount(): Long { - val request = """ - { - "query": { - "match_all": {} - } - } - """.trimIndent() - val response = adminClient().makeRequest( - "POST", "${AlertIndices.ALERT_HISTORY_ALL}/_search", emptyMap(), - StringEntity(request, APPLICATION_JSON) - ) - assertEquals("Request to get alert history failed", RestStatus.OK, response.restStatus()) - return SearchResponse.fromXContent(createParser(jsonXContent, response.entity.content)).hits.totalHits!!.value - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt b/alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt deleted file mode 100644 index 937be869d..000000000 --- a/alerting/bin/test/org/opensearch/alerting/bwc/AlertingBackwardsCompatibilityIT.kt +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.bwc - -import org.apache.http.entity.ContentType.APPLICATION_JSON -import org.apache.http.entity.StringEntity -import org.opensearch.alerting.ALERTING_BASE_URI -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.makeRequest -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.core.rest.RestStatus -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder - -class AlertingBackwardsCompatibilityIT : AlertingRestTestCase() { - - companion object { - private val CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.bwcsuite")) - private val CLUSTER_NAME = System.getProperty("tests.clustername") - } - - override fun preserveIndicesUponCompletion(): Boolean = true - - override fun preserveReposUponCompletion(): Boolean = true - - override fun preserveTemplatesUponCompletion(): Boolean = true - - override fun preserveODFEIndicesAfterTest(): Boolean = true - - override fun restClientSettings(): Settings { - return Settings.builder() - .put(super.restClientSettings()) - // increase the timeout here to 90 seconds to handle long waits for a green - // cluster health. the waits for green need to be longer than a minute to - // account for delayed shards - .put(CLIENT_SOCKET_TIMEOUT, "90s") - .build() - } - - @Throws(Exception::class) - @Suppress("UNCHECKED_CAST") - fun `test backwards compatibility`() { - val uri = getPluginUri() - val responseMap = getAsMap(uri)["nodes"] as Map> - for (response in responseMap.values) { - val plugins = response["plugins"] as List> - val pluginNames = plugins.map { plugin -> plugin["name"] }.toSet() - when (CLUSTER_TYPE) { - ClusterType.OLD -> { - assertTrue(pluginNames.contains("opensearch-alerting")) - createBasicMonitor() - } - ClusterType.MIXED -> { - assertTrue(pluginNames.contains("opensearch-alerting")) - verifyMonitorExists(ALERTING_BASE_URI) - // TODO: Need to move the base URI being used here into a constant and rename ALERTING_BASE_URI to - // MONITOR_BASE_URI - verifyMonitorStats("/_plugins/_alerting") - } - ClusterType.UPGRADED -> { - assertTrue(pluginNames.contains("opensearch-alerting")) - verifyMonitorExists(ALERTING_BASE_URI) - // TODO: Change the next execution time of the Monitor manually instead since this inflates - // the test execution by a lot (might have to wait for Job Scheduler plugin integration first) - // Waiting a minute to ensure the Monitor ran again at least once before checking if the job is running - // on time - Thread.sleep(60000) - verifyMonitorStats("/_plugins/_alerting") - } - } - break - } - } - - private enum class ClusterType { - OLD, - MIXED, - UPGRADED; - - companion object { - fun parse(value: String): ClusterType { - return when (value) { - "old_cluster" -> OLD - "mixed_cluster" -> MIXED - "upgraded_cluster" -> UPGRADED - else -> throw AssertionError("Unknown cluster type: $value") - } - } - } - } - - private fun getPluginUri(): String { - return when (CLUSTER_TYPE) { - ClusterType.OLD -> "_nodes/$CLUSTER_NAME-0/plugins" - ClusterType.MIXED -> { - when (System.getProperty("tests.rest.bwcsuite_round")) { - "second" -> "_nodes/$CLUSTER_NAME-1/plugins" - "third" -> "_nodes/$CLUSTER_NAME-2/plugins" - else -> "_nodes/$CLUSTER_NAME-0/plugins" - } - } - ClusterType.UPGRADED -> "_nodes/plugins" - } - } - - @Throws(Exception::class) - private fun createBasicMonitor() { - val indexName = "test_bwc_index" - val bwcMonitorString = """ - { - "type": "monitor", - "name": "test_bwc_monitor", - "enabled": true, - "schedule": { - "period": { - "interval": 1, - "unit": "MINUTES" - } - }, - "inputs": [{ - "search": { - "indices": ["$indexName"], - "query": { - "size": 0, - "aggregations": {}, - "query": { - "match_all": {} - } - } - } - }], - "triggers": [{ - "name": "abc", - "severity": "1", - "condition": { - "script": { - "source": "ctx.results[0].hits.total.value > 100000", - "lang": "painless" - } - }, - "actions": [] - }] - } - """.trimIndent() - createIndex(indexName, Settings.EMPTY) - - val createResponse = client().makeRequest( - method = "POST", - endpoint = "$ALERTING_BASE_URI?refresh=true", - params = emptyMap(), - entity = StringEntity(bwcMonitorString, APPLICATION_JSON) - ) - - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - assertNotEquals("Create monitor response is missing id", Monitor.NO_ID, createdId) - assertTrue("Create monitor response has incorrect version", createdVersion > 0) - } - - @Throws(Exception::class) - @Suppress("UNCHECKED_CAST") - private fun verifyMonitorExists(uri: String) { - val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString() - val searchResponse = client().makeRequest( - "GET", - "$uri/_search", - emptyMap(), - StringEntity(search, APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Unexpected number of Monitors returned", 1, numberDocsFound) - } - - @Throws(Exception::class) - @Suppress("UNCHECKED_CAST") - /** - * Monitor stats will check if the Monitor scheduled job is running on time but does not necessarily mean that the - * Monitor execution itself did not fail. - */ - private fun verifyMonitorStats(uri: String) { - val statsResponse = client().makeRequest( - "GET", - "$uri/stats", - emptyMap() - ) - assertEquals("Monitor stats failed", RestStatus.OK, statsResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), statsResponse.entity.content) - val responseMap = xcp.map() - val nodesCount = responseMap["_nodes"]!! as Map - val totalNodes = nodesCount["total"] - val successfulNodes = nodesCount["successful"] - val nodesOnSchedule = responseMap["nodes_on_schedule"]!! - assertEquals("Incorrect number of total nodes", 3, totalNodes) - assertEquals("Some nodes in stats response failed", totalNodes, successfulNodes) - assertEquals("Not all nodes are on schedule", totalNodes, nodesOnSchedule) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt b/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt deleted file mode 100644 index 7ebc82697..000000000 --- a/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionParserTests.kt +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition - -import org.junit.Assert -import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser -import org.opensearch.test.OpenSearchTestCase - -class ChainedAlertsExpressionParserTests : OpenSearchTestCase() { - - fun `test trigger expression posix parsing simple AND`() { - val eqString = "(monitor[id=abc] && monitor[id=xyz])" - val equation = ChainedAlertExpressionParser(eqString).parse() - val expectedEquation = "monitor[id=abc] monitor[id=xyz] && " - Assert.assertTrue(expectedEquation == equation.toString()) - } - - fun `test trigger expression posix parsing simple AND without parentheses`() { - val eqString = "monitor[id=abc] && monitor[id=xyz]" - val equation = ChainedAlertExpressionParser(eqString).parse() - val expectedEquation = "monitor[id=abc] monitor[id=xyz] && " - Assert.assertTrue(expectedEquation == equation.toString()) - } - - fun `test trigger expression posix parsing multiple AND`() { - val eqString = "(monitor[id=abc] && monitor[id=def]) && monitor[id=ghi]" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=abc] monitor[id=def] && monitor[id=ghi] && ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple AND with parenthesis`() { - val eqString = "(monitor[id=sigma-123] && monitor[id=sigma-456]) && (monitor[id=sigma-789] && monitor[id=id-2aw34])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals( - "monitor[id=sigma-123] monitor[id=sigma-456] && monitor[id=sigma-789] monitor[id=id-2aw34] && && ", - equation.toString() - ) - } - - fun `test trigger expression posix parsing simple OR`() { - val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] || ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple OR`() { - val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456]) || monitor[id=sigma-789]" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] || monitor[id=sigma-789] || ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple OR with parenthesis`() { - val eqString = "(monitor[id=sigma-123] || monitor[id=sigma-456]) || (monitor[id=sigma-789] || monitor[id=id-2aw34])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals( - "monitor[id=sigma-123] monitor[id=sigma-456] || monitor[id=sigma-789] monitor[id=id-2aw34] || || ", - equation.toString() - ) - } - - fun `test trigger expression posix parsing simple NOT`() { - val eqString = "(monitor[id=sigma-123] || !monitor[id=sigma-456])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=sigma-123] monitor[id=sigma-456] ! || ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple NOT`() { - val eqString = "(monitor[id=sigma-123] && !monitor[tag=tag-456]) && !(monitor[id=sigma-789])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=sigma-123] monitor[tag=tag-456] ! && monitor[id=sigma-789] ! && ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple operators with parenthesis`() { - val eqString = "(monitor[id=sigma-123] && monitor[tag=sev1]) || !(!monitor[id=sigma-789] || monitor[id=id-2aw34])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals( - "monitor[id=sigma-123] monitor[tag=sev1] && monitor[id=sigma-789] ! monitor[id=id-2aw34] || ! || ", - equation.toString() - ) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt b/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt deleted file mode 100644 index a0851d58d..000000000 --- a/alerting/bin/test/org/opensearch/alerting/chainedAlertCondition/ChainedAlertsExpressionResolveTests.kt +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.chainedAlertCondition - -import org.junit.Assert -import org.opensearch.alerting.chainedAlertCondition.parsers.ChainedAlertExpressionParser -import org.opensearch.test.OpenSearchTestCase - -class ChainedAlertsExpressionResolveTests : OpenSearchTestCase() { - - fun `test chained alert trigger expression evaluation simple AND`() { - val eqString = "(monitor[id=123] && monitor[id=456])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=123] monitor[id=456] && ", equation.toString()) - val alertGeneratingMonitors: Set = setOf( - "123", - "456" - ) - Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) - val alertGeneratingMonitors2: Set = setOf( - "123", - "789" - ) - Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) - } - - fun `test chained alert trigger expression evaluation AND with NOT`() { - val eqString = "(monitor[id=123] && !monitor[id=456])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=123] monitor[id=456] ! && ", equation.toString()) - val alertGeneratingMonitors: Set = setOf( - "123", - "456" - ) - Assert.assertFalse(equation.evaluate(alertGeneratingMonitors)) - val alertGeneratingMonitors1: Set = setOf( - "123", - "223" - ) - Assert.assertTrue(equation.evaluate(alertGeneratingMonitors1)) - } - - fun `test chained alert trigger expression evaluation simple OR`() { - val eqString = "(monitor[id=123] || monitor[id=456])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=123] monitor[id=456] || ", equation.toString()) - val alertGeneratingMonitors: Set = setOf( - "123", - "456" - ) - Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) - val alertGeneratingMonitors2: Set = setOf( - "234", - "567" - ) - Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) - } - - fun `test chained alert trigger expression evaluation OR with NOT`() { - val eqString = "(monitor[id=123] || !monitor[id=456])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=123] monitor[id=456] ! || ", equation.toString()) - val alertGeneratingMonitors: Set = setOf( - "123", - "456" - ) - Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) - val alertGeneratingMonitors2: Set = setOf( - "456" - ) - Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) - } - - fun `test chained alert trigger expression evaluation simple NOT`() { - val eqString = "!(monitor[id=456])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals("monitor[id=456] ! ", equation.toString()) - val alertGeneratingMonitors: Set = setOf( - "123" - ) - Assert.assertTrue(equation.evaluate(alertGeneratingMonitors)) - val alertGeneratingMonitors2: Set = setOf( - "456" - ) - Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) - } - - fun `test chained alert trigger expression evaluation with multiple operators with parenthesis`() { - val eqString = "(monitor[id=123] && monitor[id=456]) || !(!monitor[id=789] || monitor[id=abc])" - val equation = ChainedAlertExpressionParser(eqString).parse() - Assert.assertEquals( - "monitor[id=123] monitor[id=456] && monitor[id=789] ! monitor[id=abc] || ! || ", - equation.toString() - ) - // part 1 evaluates, part 2 evaluates - val alertGeneratingMonitors1: Set = setOf( - "123", - "456", - "789", - "abc" - ) - Assert.assertTrue(equation.evaluate(alertGeneratingMonitors1)) - // part 1 not evaluates, part 2 not evaluates - val alertGeneratingMonitors2: Set = setOf( - "789", - "abc" - ) - Assert.assertFalse(equation.evaluate(alertGeneratingMonitors2)) - // part 1 not evaluates, part 2 evaluates - val alertGeneratingMonitors3: Set = setOf( - "789" - ) - Assert.assertTrue(equation.evaluate(alertGeneratingMonitors3)) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt b/alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt deleted file mode 100644 index 08fba74cb..000000000 --- a/alerting/bin/test/org/opensearch/alerting/model/AlertTests.kt +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.junit.Assert -import org.opensearch.alerting.randomAlert -import org.opensearch.alerting.randomAlertWithAggregationResultBucket -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.test.OpenSearchTestCase - -class AlertTests : OpenSearchTestCase() { - fun `test alert as template args`() { - val alert = randomAlert().copy(acknowledgedTime = null, lastNotificationTime = null) - - val templateArgs = alert.asTemplateArg() - - assertEquals("Template args id does not match", templateArgs[Alert.ALERT_ID_FIELD], alert.id) - assertEquals("Template args version does not match", templateArgs[Alert.ALERT_VERSION_FIELD], alert.version) - assertEquals("Template args state does not match", templateArgs[Alert.STATE_FIELD], alert.state.toString()) - assertEquals("Template args error message does not match", templateArgs[Alert.ERROR_MESSAGE_FIELD], alert.errorMessage) - assertEquals("Template args acknowledged time does not match", templateArgs[Alert.ACKNOWLEDGED_TIME_FIELD], null) - assertEquals("Template args end time does not", templateArgs[Alert.END_TIME_FIELD], alert.endTime?.toEpochMilli()) - assertEquals("Template args start time does not", templateArgs[Alert.START_TIME_FIELD], alert.startTime.toEpochMilli()) - assertEquals("Template args last notification time does not match", templateArgs[Alert.LAST_NOTIFICATION_TIME_FIELD], null) - assertEquals("Template args severity does not match", templateArgs[Alert.SEVERITY_FIELD], alert.severity) - } - - fun `test agg alert as template args`() { - val alert = randomAlertWithAggregationResultBucket().copy(acknowledgedTime = null, lastNotificationTime = null) - - val templateArgs = alert.asTemplateArg() - - assertEquals("Template args id does not match", templateArgs[Alert.ALERT_ID_FIELD], alert.id) - assertEquals("Template args version does not match", templateArgs[Alert.ALERT_VERSION_FIELD], alert.version) - assertEquals("Template args state does not match", templateArgs[Alert.STATE_FIELD], alert.state.toString()) - assertEquals("Template args error message does not match", templateArgs[Alert.ERROR_MESSAGE_FIELD], alert.errorMessage) - assertEquals("Template args acknowledged time does not match", templateArgs[Alert.ACKNOWLEDGED_TIME_FIELD], null) - assertEquals("Template args end time does not", templateArgs[Alert.END_TIME_FIELD], alert.endTime?.toEpochMilli()) - assertEquals("Template args start time does not", templateArgs[Alert.START_TIME_FIELD], alert.startTime.toEpochMilli()) - assertEquals("Template args last notification time does not match", templateArgs[Alert.LAST_NOTIFICATION_TIME_FIELD], null) - assertEquals("Template args severity does not match", templateArgs[Alert.SEVERITY_FIELD], alert.severity) - Assert.assertEquals( - "Template args bucketKeys do not match", - templateArgs[Alert.BUCKET_KEYS], alert.aggregationResultBucket?.bucketKeys?.joinToString(",") - ) - Assert.assertEquals( - "Template args parentBucketPath does not match", - templateArgs[Alert.PARENTS_BUCKET_PATH], alert.aggregationResultBucket?.parentBucketPath - ) - } - - fun `test alert acknowledged`() { - val ackAlert = randomAlert().copy(state = Alert.State.ACKNOWLEDGED) - assertTrue("Alert is not acknowledged", ackAlert.isAcknowledged()) - - val activeAlert = randomAlert().copy(state = Alert.State.ACTIVE) - assertFalse("Alert is acknowledged", activeAlert.isAcknowledged()) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt b/alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt deleted file mode 100644 index 7dac05b2a..000000000 --- a/alerting/bin/test/org/opensearch/alerting/model/DestinationTests.kt +++ /dev/null @@ -1,310 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.ADMIN -import org.opensearch.alerting.model.destination.Chime -import org.opensearch.alerting.model.destination.CustomWebhook -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.Slack -import org.opensearch.alerting.model.destination.email.Email -import org.opensearch.alerting.model.destination.email.Recipient -import org.opensearch.alerting.parser -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.util.DestinationType -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase -import java.time.Instant - -class DestinationTests : OpenSearchTestCase() { - - fun `test chime destination`() { - val chime = Chime("http://abc.com") - assertEquals("Url is manipulated", chime.url, "http://abc.com") - } - - fun `test chime destination with out url`() { - try { - Chime("") - fail("Creating a chime destination with empty url did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test slack destination`() { - val slack = Slack("http://abc.com") - assertEquals("Url is manipulated", slack.url, "http://abc.com") - } - - fun `test slack destination with out url`() { - try { - Slack("") - fail("Creating a slack destination with empty url did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test email destination without recipients`() { - try { - Email("", emptyList()) - fail("Creating an email destination with empty recipients did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test email recipient with valid email`() { - Recipient( - Recipient.RecipientType.EMAIL, - null, - "test@email.com" - ) - } - - fun `test email recipient with invalid email fails`() { - try { - Recipient( - Recipient.RecipientType.EMAIL, - null, - "invalid@email" - ) - fail("Creating an email recipient with an invalid email did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test custom webhook destination with url and no host`() { - val customWebhook = CustomWebhook("http://abc.com", null, null, -1, null, null, emptyMap(), emptyMap(), null, null) - assertEquals("Url is manipulated", customWebhook.url, "http://abc.com") - } - - fun `test custom webhook destination with host and no url`() { - try { - val customWebhook = CustomWebhook(null, null, "abc.com", 80, null, null, emptyMap(), emptyMap(), null, null) - assertEquals("host is manipulated", customWebhook.host, "abc.com") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test custom webhook destination with url and host`() { - // In this case, url will be given priority - val customWebhook = CustomWebhook("http://abc.com", null, null, -1, null, null, emptyMap(), emptyMap(), null, null) - assertEquals("Url is manipulated", customWebhook.url, "http://abc.com") - } - - fun `test custom webhook destination with no url and no host`() { - try { - CustomWebhook("", null, null, 80, null, null, emptyMap(), emptyMap(), null, null) - fail("Creating a custom webhook destination with empty url did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test chime destination create using stream`() { - val chimeDest = Destination( - "1234", 0L, 1, 1, 1, DestinationType.CHIME, "TestChimeDest", - randomUser(), Instant.now(), Chime("test.com"), null, null, null - ) - - val out = BytesStreamOutput() - chimeDest.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newDest = Destination.readFrom(sin) - - assertNotNull(newDest) - assertEquals("1234", newDest.id) - assertEquals(0, newDest.version) - assertEquals(1, newDest.schemaVersion) - assertEquals(DestinationType.CHIME, newDest.type) - assertEquals("TestChimeDest", newDest.name) - assertNotNull(newDest.lastUpdateTime) - assertNotNull(newDest.chime) - assertNull(newDest.slack) - assertNull(newDest.customWebhook) - assertNull(newDest.email) - } - - fun `test slack destination create using stream`() { - val slackDest = Destination( - "2345", 1L, 2, 1, 1, DestinationType.SLACK, "TestSlackDest", - randomUser(), Instant.now(), null, Slack("mytest.com"), null, null - ) - - val out = BytesStreamOutput() - slackDest.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newDest = Destination.readFrom(sin) - - assertNotNull(newDest) - assertEquals("2345", newDest.id) - assertEquals(1, newDest.version) - assertEquals(2, newDest.schemaVersion) - assertEquals(DestinationType.SLACK, newDest.type) - assertEquals("TestSlackDest", newDest.name) - assertNotNull(newDest.lastUpdateTime) - assertNull(newDest.chime) - assertNotNull(newDest.slack) - assertNull(newDest.customWebhook) - assertNull(newDest.email) - } - - fun `test customwebhook destination create using stream`() { - val customWebhookDest = Destination( - "2345", - 1L, - 2, - 1, - 1, - DestinationType.SLACK, - "TestSlackDest", - randomUser(), - Instant.now(), - null, - null, - CustomWebhook( - "test.com", - "schema", - "localhost", - 162, - "/tmp/", - "POST", - mutableMapOf(), - mutableMapOf(), - ADMIN, - ADMIN - ), - null - ) - val out = BytesStreamOutput() - customWebhookDest.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newDest = Destination.readFrom(sin) - - assertNotNull(newDest) - assertEquals("2345", newDest.id) - assertEquals(1, newDest.version) - assertEquals(2, newDest.schemaVersion) - assertEquals(DestinationType.SLACK, newDest.type) - assertEquals("TestSlackDest", newDest.name) - assertNotNull(newDest.lastUpdateTime) - assertNull(newDest.chime) - assertNull(newDest.slack) - assertNotNull(newDest.customWebhook) - assertNull(newDest.email) - } - - fun `test customwebhook destination create using stream with optionals`() { - val customWebhookDest = Destination( - "2345", - 1L, - 2, - 1, - 1, - DestinationType.SLACK, - "TestSlackDest", - randomUser(), - Instant.now(), - null, - null, - CustomWebhook( - "test.com", - null, - "localhost", - 162, - null, - "POST", - mutableMapOf(), - mutableMapOf(), - null, - null - ), - null - ) - val out = BytesStreamOutput() - customWebhookDest.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newDest = Destination.readFrom(sin) - - assertNotNull(newDest) - assertEquals("2345", newDest.id) - assertEquals(1, newDest.version) - assertEquals(2, newDest.schemaVersion) - assertEquals(DestinationType.SLACK, newDest.type) - assertEquals("TestSlackDest", newDest.name) - assertNotNull(newDest.lastUpdateTime) - assertNull(newDest.chime) - assertNull(newDest.slack) - assertNotNull(newDest.customWebhook) - assertNull(newDest.email) - } - - fun `test email destination create using stream`() { - val recipients = listOf( - Recipient( - Recipient.RecipientType.EMAIL, - null, - "test@email.com" - ) - ) - val mailDest = Destination( - "2345", - 1L, - 2, - 1, - 1, - DestinationType.EMAIL, - "TestEmailDest", - randomUser(), - Instant.now(), - null, - null, - null, - Email("3456", recipients) - ) - - val out = BytesStreamOutput() - mailDest.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newDest = Destination.readFrom(sin) - - assertNotNull(newDest) - assertEquals("2345", newDest.id) - assertEquals(1, newDest.version) - assertEquals(2, newDest.schemaVersion) - assertEquals(DestinationType.EMAIL, newDest.type) - assertEquals("TestEmailDest", newDest.name) - assertNotNull(newDest.lastUpdateTime) - assertNull(newDest.chime) - assertNull(newDest.slack) - assertNull(newDest.customWebhook) - assertNotNull(newDest.email) - - assertEquals("3456", newDest.email!!.emailAccountID) - assertEquals(recipients, newDest.email!!.recipients) - } - - fun `test chime destination without user`() { - val userString = "{\"type\":\"chime\",\"name\":\"TestChimeDest\",\"schema_version\":1," + - "\"last_update_time\":1600063313658,\"chime\":{\"url\":\"test.com\"}}" - val parsedDest = Destination.parse(parser(userString)) - assertNull(parsedDest.user) - } - - fun `test chime destination with user`() { - val userString = "{\"type\":\"chime\",\"name\":\"TestChimeDest\",\"user\":{\"name\":\"joe\",\"backend_roles\"" + - ":[\"ops\",\"backup\"],\"roles\":[\"ops_role, backup_role\"],\"custom_attribute_names\":[\"test_attr=test\"]}," + - "\"schema_version\":1,\"last_update_time\":1600063313658,\"chime\":{\"url\":\"test.com\"}}" - val parsedDest = Destination.parse(parser(userString)) - assertNotNull(parsedDest.user) - } - - fun `test chime destination with user as null`() { - val userString = "{\"type\":\"chime\",\"name\":\"TestChimeDest\",\"user\":null,\"schema_version\":1," + - "\"last_update_time\":1600063313658,\"chime\":{\"url\":\"test.com\"}}" - val parsedDest = Destination.parse(parser(userString)) - assertNull(parsedDest.user) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt b/alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt deleted file mode 100644 index d3e436378..000000000 --- a/alerting/bin/test/org/opensearch/alerting/model/EmailAccountTests.kt +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.test.OpenSearchTestCase - -class EmailAccountTests : OpenSearchTestCase() { - - fun `test email account`() { - val emailAccount = EmailAccount( - name = "test", - email = "test@email.com", - host = "smtp.com", - port = 25, - method = EmailAccount.MethodType.NONE, - username = null, - password = null - ) - assertEquals("Email account name was changed", emailAccount.name, "test") - assertEquals("Email account email was changed", emailAccount.email, "test@email.com") - assertEquals("Email account host was changed", emailAccount.host, "smtp.com") - assertEquals("Email account port was changed", emailAccount.port, 25) - assertEquals("Email account method was changed", emailAccount.method, EmailAccount.MethodType.NONE) - } - - fun `test email account with invalid name`() { - try { - EmailAccount( - name = "invalid-name", - email = "test@email.com", - host = "smtp.com", - port = 25, - method = EmailAccount.MethodType.NONE, - username = null, - password = null - ) - fail("Creating an email account with an invalid name did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test email account with invalid email`() { - try { - EmailAccount( - name = "test", - email = "test@.com", - host = "smtp.com", - port = 25, - method = EmailAccount.MethodType.NONE, - username = null, - password = null - ) - fail("Creating an email account with an invalid email did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt b/alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt deleted file mode 100644 index a0c215059..000000000 --- a/alerting/bin/test/org/opensearch/alerting/model/EmailGroupTests.kt +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.model.destination.email.EmailEntry -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.test.OpenSearchTestCase - -class EmailGroupTests : OpenSearchTestCase() { - - fun `test email group`() { - val emailGroup = EmailGroup( - name = "test", - emails = listOf(EmailEntry("test@email.com")) - ) - assertEquals("Email group name was changed", emailGroup.name, "test") - assertEquals("Email group emails count was changed", emailGroup.emails.size, 1) - assertEquals("Email group email entry was changed", emailGroup.emails[0].email, "test@email.com") - } - - fun `test email group get emails as list of string`() { - val emailGroup = EmailGroup( - name = "test", - emails = listOf( - EmailEntry("test@email.com"), - EmailEntry("test2@email.com") - ) - ) - - assertEquals( - "List of email strings does not match email entries", - listOf("test@email.com", "test2@email.com"), emailGroup.getEmailsAsListOfString() - ) - } - - fun `test email group with invalid name fails`() { - try { - EmailGroup( - name = "invalid name", - emails = listOf(EmailEntry("test@email.com")) - ) - fail("Creating an email group with an invalid name did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } - - fun `test email group with invalid email fails`() { - try { - EmailGroup( - name = "test", - emails = listOf(EmailEntry("invalid.com")) - ) - fail("Creating an email group with an invalid email did not fail.") - } catch (ignored: IllegalArgumentException) { - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt b/alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt deleted file mode 100644 index f77ca3ddc..000000000 --- a/alerting/bin/test/org/opensearch/alerting/model/FindingTests.kt +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.randomFinding -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.test.OpenSearchTestCase - -class FindingTests : OpenSearchTestCase() { - fun `test finding asTemplateArgs`() { - // GIVEN - val finding = randomFinding() - - // WHEN - val templateArgs = finding.asTemplateArg() - - // THEN - assertEquals("Template args 'id' field does not match:", templateArgs[Finding.FINDING_ID_FIELD], finding.id) - assertEquals( - "Template args 'relatedDocIds' field does not match:", - templateArgs[Finding.RELATED_DOC_IDS_FIELD], - finding.relatedDocIds - ) - assertEquals("Template args 'monitorId' field does not match:", templateArgs[Finding.MONITOR_ID_FIELD], finding.monitorId) - assertEquals( - "Template args 'monitorName' field does not match:", - templateArgs[Finding.MONITOR_NAME_FIELD], - finding.monitorName - ) - assertEquals("Template args 'queries' field does not match:", templateArgs[Finding.QUERIES_FIELD], finding.docLevelQueries) - assertEquals( - "Template args 'timestamp' field does not match:", - templateArgs[Finding.TIMESTAMP_FIELD], - finding.timestamp.toEpochMilli() - ) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt b/alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt deleted file mode 100644 index 6851c471d..000000000 --- a/alerting/bin/test/org/opensearch/alerting/model/WriteableTests.kt +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.randomActionRunResult -import org.opensearch.alerting.randomBucketLevelMonitorRunResult -import org.opensearch.alerting.randomBucketLevelTriggerRunResult -import org.opensearch.alerting.randomDocumentLevelMonitorRunResult -import org.opensearch.alerting.randomDocumentLevelTriggerRunResult -import org.opensearch.alerting.randomEmailAccount -import org.opensearch.alerting.randomEmailGroup -import org.opensearch.alerting.randomInputRunResults -import org.opensearch.alerting.randomQueryLevelMonitorRunResult -import org.opensearch.alerting.randomQueryLevelTriggerRunResult -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase - -class WriteableTests : OpenSearchTestCase() { - - fun `test actionrunresult as stream`() { - val actionRunResult = randomActionRunResult() - val out = BytesStreamOutput() - actionRunResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newActionRunResult = ActionRunResult(sin) - assertEquals("Round tripping ActionRunResult doesn't work", actionRunResult, newActionRunResult) - } - - fun `test query-level triggerrunresult as stream`() { - val runResult = randomQueryLevelTriggerRunResult() - val out = BytesStreamOutput() - runResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRunResult = QueryLevelTriggerRunResult(sin) - assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) - } - - fun `test bucket-level triggerrunresult as stream`() { - val runResult = randomBucketLevelTriggerRunResult() - val out = BytesStreamOutput() - runResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRunResult = BucketLevelTriggerRunResult(sin) - assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) - } - - fun `test doc-level triggerrunresult as stream`() { - val runResult = randomDocumentLevelTriggerRunResult() - val out = BytesStreamOutput() - runResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRunResult = DocumentLevelTriggerRunResult(sin) - assertEquals("Round tripping ActionRunResult doesn't work", runResult, newRunResult) - } - - fun `test inputrunresult as stream`() { - val runResult = randomInputRunResults() - val out = BytesStreamOutput() - runResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRunResult = InputRunResults.readFrom(sin) - assertEquals("Round tripping InputRunResults doesn't work", runResult, newRunResult) - } - - fun `test query-level monitorrunresult as stream`() { - val runResult = randomQueryLevelMonitorRunResult() - val out = BytesStreamOutput() - runResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRunResult = MonitorRunResult(sin) - assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) - } - - fun `test bucket-level monitorrunresult as stream`() { - val runResult = randomBucketLevelMonitorRunResult() - val out = BytesStreamOutput() - runResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRunResult = MonitorRunResult(sin) - assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) - } - - fun `test doc-level monitorrunresult as stream`() { - val runResult = randomDocumentLevelMonitorRunResult() - val out = BytesStreamOutput() - runResult.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newRunResult = MonitorRunResult(sin) - assertEquals("Round tripping MonitorRunResult doesn't work", runResult, newRunResult) - } - - fun `test searchinput as stream`() { - val input = SearchInput(emptyList(), SearchSourceBuilder()) - val out = BytesStreamOutput() - input.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newInput = SearchInput(sin) - assertEquals("Round tripping MonitorRunResult doesn't work", input, newInput) - } - - fun `test emailaccount as stream`() { - val emailAccount = randomEmailAccount() - val out = BytesStreamOutput() - emailAccount.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newEmailAccount = EmailAccount.readFrom(sin) - assertEquals("Round tripping EmailAccount doesn't work", emailAccount, newEmailAccount) - } - - fun `test emailgroup as stream`() { - val emailGroup = randomEmailGroup() - val out = BytesStreamOutput() - emailGroup.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newEmailGroup = EmailGroup.readFrom(sin) - assertEquals("Round tripping EmailGroup doesn't work", emailGroup, newEmailGroup) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt b/alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt deleted file mode 100644 index 7d07af331..000000000 --- a/alerting/bin/test/org/opensearch/alerting/model/XContentTests.kt +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.model - -import org.opensearch.alerting.builder -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.parser -import org.opensearch.alerting.randomActionExecutionResult -import org.opensearch.alerting.randomAlert -import org.opensearch.alerting.randomEmailAccount -import org.opensearch.alerting.randomEmailGroup -import org.opensearch.alerting.toJsonString -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.commons.alerting.model.ActionExecutionResult -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.util.string -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.test.OpenSearchTestCase - -class XContentTests : OpenSearchTestCase() { - - fun `test alert parsing`() { - val alert = randomAlert() - - val alertString = alert.toXContentWithUser(builder()).string() - val parsedAlert = Alert.parse(parser(alertString)) - - assertEquals("Round tripping alert doesn't work", alert, parsedAlert) - } - - fun `test alert parsing without user`() { - val alertStr = "{\"id\":\"\",\"version\":-1,\"monitor_id\":\"\",\"schema_version\":0,\"monitor_version\":1," + - "\"monitor_name\":\"ARahqfRaJG\",\"trigger_id\":\"fhe1-XQBySl0wQKDBkOG\",\"trigger_name\":\"ffELMuhlro\"," + - "\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\"" + - ":[{\"action_id\":\"ghe1-XQBySl0wQKDBkOG\",\"last_execution_time\":1601917224583,\"throttled_count\":-1478015168}," + - "{\"action_id\":\"gxe1-XQBySl0wQKDBkOH\",\"last_execution_time\":1601917224583,\"throttled_count\":-768533744}]," + - "\"start_time\":1601917224599,\"last_notification_time\":null,\"end_time\":null,\"acknowledged_time\":null}" - val parsedAlert = Alert.parse(parser(alertStr)) - assertNull(parsedAlert.monitorUser) - } - - fun `test alert parsing with user as null`() { - val alertStr = "{\"id\":\"\",\"version\":-1,\"monitor_id\":\"\",\"schema_version\":0,\"monitor_version\":1,\"monitor_user\":null," + - "\"monitor_name\":\"ARahqfRaJG\",\"trigger_id\":\"fhe1-XQBySl0wQKDBkOG\",\"trigger_name\":\"ffELMuhlro\"," + - "\"state\":\"ACTIVE\",\"error_message\":null,\"alert_history\":[],\"severity\":\"1\",\"action_execution_results\"" + - ":[{\"action_id\":\"ghe1-XQBySl0wQKDBkOG\",\"last_execution_time\":1601917224583,\"throttled_count\":-1478015168}," + - "{\"action_id\":\"gxe1-XQBySl0wQKDBkOH\",\"last_execution_time\":1601917224583,\"throttled_count\":-768533744}]," + - "\"start_time\":1601917224599,\"last_notification_time\":null,\"end_time\":null,\"acknowledged_time\":null}" - val parsedAlert = Alert.parse(parser(alertStr)) - assertNull(parsedAlert.monitorUser) - } - - fun `test action execution result parsing`() { - val actionExecutionResult = randomActionExecutionResult() - - val actionExecutionResultString = actionExecutionResult.toXContent(builder(), ToXContent.EMPTY_PARAMS).string() - val parsedActionExecutionResultString = ActionExecutionResult.parse(parser(actionExecutionResultString)) - - assertEquals("Round tripping alert doesn't work", actionExecutionResult, parsedActionExecutionResultString) - } - - fun `test email account parsing`() { - val emailAccount = randomEmailAccount() - - val emailAccountString = emailAccount.toJsonString() - val parsedEmailAccount = EmailAccount.parse(parser(emailAccountString)) - assertEquals("Round tripping EmailAccount doesn't work", emailAccount, parsedEmailAccount) - } - - fun `test email group parsing`() { - val emailGroup = randomEmailGroup() - - val emailGroupString = emailGroup.toJsonString() - val parsedEmailGroup = EmailGroup.parse(parser(emailGroupString)) - assertEquals("Round tripping EmailGroup doesn't work", emailGroup, parsedEmailGroup) - } - - fun `test MonitorMetadata`() { - val monitorMetadata = MonitorMetadata( - id = "monitorId-metadata", - monitorId = "monitorId", - lastActionExecutionTimes = emptyList(), - lastRunContext = emptyMap(), - sourceToQueryIndexMapping = mutableMapOf() - ) - val monitorMetadataString = monitorMetadata.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS).string() - val parsedMonitorMetadata = MonitorMetadata.parse(parser(monitorMetadataString)) - assertEquals("Round tripping MonitorMetadata doesn't work", monitorMetadata, parsedMonitorMetadata) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt deleted file mode 100644 index 00e7c197d..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/DestinationRestApiIT.kt +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.junit.Assert -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.model.destination.Chime -import org.opensearch.alerting.model.destination.CustomWebhook -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.Slack -import org.opensearch.alerting.model.destination.email.Email -import org.opensearch.alerting.model.destination.email.Recipient -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.util.DestinationType -import org.opensearch.test.junit.annotations.TestLogging -import java.time.Instant - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class DestinationRestApiIT : AlertingRestTestCase() { - - fun `test creating a chime destination`() { - val chime = Chime("http://abc.com") - val destination = Destination( - type = DestinationType.CHIME, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = chime, - slack = null, - customWebhook = null, - email = null - ) - val createdDestination = createDestination(destination = destination) - assertEquals("Incorrect destination name", createdDestination.name, "test") - assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CHIME) - Assert.assertNotNull("chime object should not be null", createdDestination.chime) - } - - fun `test creating a custom webhook destination with url`() { - val customWebhook = CustomWebhook("http://abc.com", null, null, 80, null, "PUT", emptyMap(), emptyMap(), null, null) - val destination = Destination( - type = DestinationType.CUSTOM_WEBHOOK, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = customWebhook, - email = null - ) - val createdDestination = createDestination(destination = destination) - assertEquals("Incorrect destination name", createdDestination.name, "test") - assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CUSTOM_WEBHOOK) - Assert.assertNotNull("custom webhook object should not be null", createdDestination.customWebhook) - } - - fun `test creating a custom webhook destination with host`() { - val customWebhook = CustomWebhook( - "", "http", "abc.com", 80, "a/b/c", "PATCH", - mapOf("foo" to "1", "bar" to "2"), mapOf("h1" to "1", "h2" to "2"), null, null - ) - val destination = Destination( - type = DestinationType.CUSTOM_WEBHOOK, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = customWebhook, - email = null - ) - val createdDestination = createDestination(destination = destination) - assertEquals("Incorrect destination name", createdDestination.name, "test") - assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CUSTOM_WEBHOOK) - assertEquals("Incorrect destination host", createdDestination.customWebhook?.host, "abc.com") - assertEquals("Incorrect destination port", createdDestination.customWebhook?.port, 80) - assertEquals("Incorrect destination path", createdDestination.customWebhook?.path, "a/b/c") - assertEquals("Incorrect destination scheme", createdDestination.customWebhook?.scheme, "http") - assertEquals("Incorrect destination method", createdDestination.customWebhook?.method, "PATCH") - Assert.assertNotNull("custom webhook object should not be null", createdDestination.customWebhook) - } - - fun `test creating an email destination`() { - val recipient = Recipient(type = Recipient.RecipientType.EMAIL, emailGroupID = null, email = "test@email.com") - val email = Email("", listOf(recipient)) - val destination = Destination( - type = DestinationType.EMAIL, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = null, - email = email - ) - - val createdDestination = createDestination(destination = destination) - Assert.assertNotNull("Email object should not be null", createdDestination.email) - assertEquals("Incorrect destination name", createdDestination.name, "test") - assertEquals("Incorrect destination type", createdDestination.type, DestinationType.EMAIL) - assertEquals( - "Incorrect email destination recipient type", - createdDestination.email?.recipients?.get(0)?.type, - Recipient.RecipientType.EMAIL - ) - assertEquals( - "Incorrect email destination recipient email", - createdDestination.email?.recipients?.get(0)?.email, - "test@email.com" - ) - } - - fun `test get destination`() { - val destination = createDestination() - val getDestinationResponse = getDestination(destination) - assertEquals(destination.id, getDestinationResponse["id"]) - assertEquals(destination.type.value, getDestinationResponse["type"]) - assertEquals(destination.seqNo, getDestinationResponse["seq_no"]) - assertEquals(destination.lastUpdateTime.toEpochMilli(), getDestinationResponse["last_update_time"]) - assertEquals(destination.primaryTerm, getDestinationResponse["primary_term"]) - } - - fun `test get destinations with slack destination type`() { - val slack = Slack("url") - val dest = Destination( - type = DestinationType.SLACK, - name = "testSlack", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = slack, - customWebhook = null, - email = null - ) - - val inputMap = HashMap() - inputMap["missing"] = "_last" - inputMap["destinationType"] = "slack" - - val destination = createDestination(dest) - val destination2 = createDestination() - val getDestinationsResponse = getDestinations(inputMap) - - assertEquals(1, getDestinationsResponse.size) - val getDestinationResponse = getDestinationsResponse[0] - - assertEquals(destination.id, getDestinationResponse["id"]) - assertNotEquals(destination2.id, getDestinationResponse["id"]) - assertEquals(destination.type.value, getDestinationResponse["type"]) - assertEquals(destination.seqNo, getDestinationResponse["seq_no"]) - assertEquals(destination.lastUpdateTime.toEpochMilli(), getDestinationResponse["last_update_time"]) - assertEquals(destination.primaryTerm, getDestinationResponse["primary_term"]) - } - - fun `test get destinations matching a given name`() { - val slack = Slack("url") - val dest = Destination( - type = DestinationType.SLACK, - name = "testSlack", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = slack, - customWebhook = null, - email = null - ) - - val inputMap = HashMap() - inputMap["searchString"] = "testSlack" - - val destination = createDestination(dest) - val destination2 = createDestination() - val getDestinationsResponse = getDestinations(inputMap) - - assertEquals(1, getDestinationsResponse.size) - val getDestinationResponse = getDestinationsResponse[0] - - assertEquals(destination.id, getDestinationResponse["id"]) - assertNotEquals(destination2.id, getDestinationResponse["id"]) - assertEquals(destination.type.value, getDestinationResponse["type"]) - assertEquals(destination.seqNo, getDestinationResponse["seq_no"]) - assertEquals(destination.lastUpdateTime.toEpochMilli(), getDestinationResponse["last_update_time"]) - assertEquals(destination.primaryTerm, getDestinationResponse["primary_term"]) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt deleted file mode 100644 index fc730f20b..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/EmailAccountRestApiIT.kt +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.http.entity.ContentType -import org.apache.http.nio.entity.NStringEntity -import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_ACCOUNT_BASE_URI -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.randomEmailAccount -import org.opensearch.client.ResponseException -import org.opensearch.common.xcontent.XContentType -import org.opensearch.core.rest.RestStatus -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase -import org.opensearch.test.junit.annotations.TestLogging - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class EmailAccountRestApiIT : AlertingRestTestCase() { - - fun `test creating an email account`() { - val emailAccount = EmailAccount( - name = "test", - email = "test@email.com", - host = "smtp.com", - port = 25, - method = EmailAccount.MethodType.NONE, - username = null, - password = null - ) - val createdEmailAccount = createEmailAccount(emailAccount = emailAccount) - assertEquals("Incorrect email account name", createdEmailAccount.name, "test") - assertEquals("Incorrect email account email", createdEmailAccount.email, "test@email.com") - assertEquals("Incorrect email account host", createdEmailAccount.host, "smtp.com") - assertEquals("Incorrect email account port", createdEmailAccount.port, 25) - assertEquals("Incorrect email account method", createdEmailAccount.method, EmailAccount.MethodType.NONE) - } - - fun `test creating an email account with PUT fails`() { - try { - val emailAccount = randomEmailAccount() - client().makeRequest("PUT", EMAIL_ACCOUNT_BASE_URI, emptyMap(), emailAccount.toHttpEntity()) - fail("Expected 405 Method Not Allowed response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.METHOD_NOT_ALLOWED, e.response.restStatus()) - } - } - - fun `test creating an email account when email destination is disallowed fails`() { - try { - removeEmailFromAllowList() - createRandomEmailAccount() - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - } - - fun `test getting an email account`() { - val emailAccount = createRandomEmailAccount() - val storedEmailAccount = getEmailAccount(emailAccount.id) - assertEquals("Indexed and retrieved email account differ", emailAccount, storedEmailAccount) - } - - fun `test getting an email account that doesn't exist`() { - try { - getEmailAccount(randomAlphaOfLength(20)) - fail("Expected response exception") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) - } - } - - fun `test getting an email account when email destination is disallowed fails`() { - val emailAccount = createRandomEmailAccount() - - try { - removeEmailFromAllowList() - getEmailAccount(emailAccount.id) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - } - - fun `test checking if an email account exists`() { - val emailAccount = createRandomEmailAccount() - - val headResponse = client().makeRequest("HEAD", "$EMAIL_ACCOUNT_BASE_URI/${emailAccount.id}") - assertEquals("Unable to HEAD email account", RestStatus.OK, headResponse.restStatus()) - assertNull("Response contains unexpected body", headResponse.entity) - } - - fun `test checking if a non-existent email account exists`() { - val headResponse = client().makeRequest("HEAD", "$EMAIL_ACCOUNT_BASE_URI/foobar") - assertEquals("Unexpected status", RestStatus.NOT_FOUND, headResponse.restStatus()) - } - - fun `test querying an email account that exists`() { - val emailAccount = createRandomEmailAccount() - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailAccount.id)).toString() - val searchResponse = client().makeRequest( - "GET", - "$EMAIL_ACCOUNT_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberOfDocsFound = hits["total"]?.get("value") - assertEquals("Email account not found during search", 1, numberOfDocsFound) - } - - fun `test querying an email account that exists with POST`() { - val emailAccount = createRandomEmailAccount() - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailAccount.id)).toString() - val searchResponse = client().makeRequest( - "POST", - "$EMAIL_ACCOUNT_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberOfDocsFound = hits["total"]?.get("value") - assertEquals("Email account not found during search", 1, numberOfDocsFound) - } - - fun `test querying an email account that doesn't exist`() { - // Create a random email account to create the ScheduledJob index. Otherwise the test will fail with a 404 index not found error. - createRandomEmailAccount() - val search = SearchSourceBuilder() - .query( - QueryBuilders.termQuery( - OpenSearchTestCase.randomAlphaOfLength(5), - OpenSearchTestCase.randomAlphaOfLength(5) - ) - ).toString() - - val searchResponse = client().makeRequest( - "GET", - "$EMAIL_ACCOUNT_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search email account failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberOfDocsFound = hits["total"]?.get("value") - assertEquals("Email account found during search when no document was present", 0, numberOfDocsFound) - } - - fun `test querying an email account when email destination is disallowed fails`() { - val emailAccount = createRandomEmailAccount() - - try { - removeEmailFromAllowList() - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailAccount.id)).toString() - client().makeRequest( - "GET", - "$EMAIL_ACCOUNT_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt deleted file mode 100644 index 94c880c61..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/EmailGroupRestApiIT.kt +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.http.entity.ContentType -import org.apache.http.nio.entity.NStringEntity -import org.opensearch.alerting.AlertingPlugin.Companion.EMAIL_GROUP_BASE_URI -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.destination.email.EmailEntry -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.randomEmailGroup -import org.opensearch.client.ResponseException -import org.opensearch.common.xcontent.XContentType -import org.opensearch.core.rest.RestStatus -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase -import org.opensearch.test.junit.annotations.TestLogging - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class EmailGroupRestApiIT : AlertingRestTestCase() { - - fun `test creating an email group`() { - val emailGroup = EmailGroup( - name = "test", - emails = listOf(EmailEntry("test@email.com")) - ) - val createdEmailGroup = createEmailGroup(emailGroup = emailGroup) - assertEquals("Incorrect email group name", createdEmailGroup.name, "test") - assertEquals("Incorrect email group email entry", createdEmailGroup.emails[0].email, "test@email.com") - } - - fun `test creating an email group with PUT fails`() { - try { - val emailGroup = randomEmailGroup() - client().makeRequest("PUT", EMAIL_GROUP_BASE_URI, emptyMap(), emailGroup.toHttpEntity()) - fail("Expected 405 Method Not Allowed respone") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.METHOD_NOT_ALLOWED, e.response.restStatus()) - } - } - - fun `test creating an email group when email destination is disallowed fails`() { - try { - removeEmailFromAllowList() - createRandomEmailGroup() - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - } - - fun `test getting an email group`() { - val emailGroup = createRandomEmailGroup() - val storedEmailGroup = getEmailGroup(emailGroup.id) - assertEquals("Indexed and retrieved email group differ", emailGroup, storedEmailGroup) - } - - fun `test getting an email group that doesn't exist`() { - try { - getEmailGroup(randomAlphaOfLength(20)) - fail("Expected response exception") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) - } - } - - fun `test getting an email group when email destination is disallowed fails`() { - val emailGroup = createRandomEmailGroup() - - try { - removeEmailFromAllowList() - getEmailGroup(emailGroup.id) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - } - - fun `test checking if an email group exists`() { - val emailGroup = createRandomEmailGroup() - - val headResponse = client().makeRequest("HEAD", "$EMAIL_GROUP_BASE_URI/${emailGroup.id}") - assertEquals("Unable to HEAD email group", RestStatus.OK, headResponse.restStatus()) - assertNull("Response contains unexpected body", headResponse.entity) - } - - fun `test checking if a non-existent email group exists`() { - val headResponse = client().makeRequest("HEAD", "$EMAIL_GROUP_BASE_URI/foobar") - assertEquals("Unexpected status", RestStatus.NOT_FOUND, headResponse.restStatus()) - } - - fun `test querying an email group that exists`() { - val emailGroup = createRandomEmailGroup() - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailGroup.id)).toString() - val searchResponse = client().makeRequest( - "GET", - "$EMAIL_GROUP_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberOfDocsFound = hits["total"]?.get("value") - assertEquals("Email group not found during search", 1, numberOfDocsFound) - } - - fun `test querying an email group that exists with POST`() { - val emailGroup = createRandomEmailGroup() - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailGroup.id)).toString() - val searchResponse = client().makeRequest( - "POST", - "$EMAIL_GROUP_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberOfDocsFound = hits["total"]?.get("value") - assertEquals("Email group not found during search", 1, numberOfDocsFound) - } - - fun `test querying an email group that doesn't exist`() { - // Create a random email group to create the ScheduledJob index. Otherwise the test will fail with a 404 index not found error. - createRandomEmailGroup() - val search = SearchSourceBuilder() - .query( - QueryBuilders.termQuery( - OpenSearchTestCase.randomAlphaOfLength(5), - OpenSearchTestCase.randomAlphaOfLength(5) - ) - ).toString() - - val searchResponse = client().makeRequest( - "GET", - "$EMAIL_GROUP_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search email group failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberOfDocsFound = hits["total"]?.get("value") - assertEquals("Email group found during search when no document was present", 0, numberOfDocsFound) - } - - fun `test querying an email group when email destination is disallowed fails`() { - val emailGroup = createRandomEmailGroup() - - try { - removeEmailFromAllowList() - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", emailGroup.id)).toString() - client().makeRequest( - "GET", - "$EMAIL_GROUP_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt deleted file mode 100644 index 1839bc807..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/FindingsRestApiIT.kt +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.opensearch.alerting.ALWAYS_RUN -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.randomDocumentLevelMonitor -import org.opensearch.alerting.randomDocumentLevelTrigger -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.test.junit.annotations.TestLogging - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class FindingsRestApiIT : AlertingRestTestCase() { - - fun `test find Finding where doc is not retrieved`() { - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) - createFinding(matchingDocIds = listOf("someId")) - val response = searchFindings() - assertEquals(1, response.totalFindings) - assertEquals(1, response.findings[0].documents.size) - assertFalse(response.findings[0].documents[0].found) - } - - fun `test find Finding where doc is retrieved`() { - val testIndex = createTestIndex() - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_field" : "us-west-2" - }""" - indexDoc(testIndex, "someId", testDoc) - val testDoc2 = """{ - "message" : "This is an error2 from IAD region", - "test_field" : "us-west-3" - }""" - indexDoc(testIndex, "someId2", testDoc2) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) - - val findingWith1 = createFinding(matchingDocIds = listOf("someId"), index = testIndex) - val findingWith2 = createFinding(matchingDocIds = listOf("someId", "someId2"), index = testIndex) - val response = searchFindings() - assertEquals(2, response.totalFindings) - for (findingWithDoc in response.findings) { - if (findingWithDoc.finding.id == findingWith1) { - assertEquals(1, findingWithDoc.documents.size) - assertTrue(findingWithDoc.documents[0].found) - assertEquals(testDoc, findingWithDoc.documents[0].document) - } else if (findingWithDoc.finding.id == findingWith2) { - assertEquals(2, findingWithDoc.documents.size) - assertTrue(findingWithDoc.documents[0].found) - assertTrue(findingWithDoc.documents[1].found) - assertEquals(testDoc, findingWithDoc.documents[0].document) - assertEquals(testDoc2, findingWithDoc.documents[1].document) - } else { - fail("Found a finding that should not have been retrieved") - } - } - } - - fun `test find Finding for specific finding by id`() { - val testIndex = createTestIndex() - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_field" : "us-west-2" - }""" - indexDoc(testIndex, "someId", testDoc) - val testDoc2 = """{ - "message" : "This is an error2 from IAD region", - "test_field" : "us-west-3" - }""" - indexDoc(testIndex, "someId2", testDoc2) - - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) - - createFinding(matchingDocIds = listOf("someId"), index = testIndex) - val findingId = createFinding(matchingDocIds = listOf("someId", "someId2"), index = testIndex) - val response = searchFindings(mapOf(Pair("findingId", findingId))) - assertEquals(1, response.totalFindings) - assertEquals(findingId, response.findings[0].finding.id) - assertEquals(2, response.findings[0].documents.size) - assertTrue(response.findings[0].documents[0].found) - assertTrue(response.findings[0].documents[1].found) - assertEquals(testDoc, response.findings[0].documents[0].document) - assertEquals(testDoc2, response.findings[0].documents[1].document) - } - - fun `test find Finding by tag`() { - val testIndex = createTestIndex() - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_field" : "us-west-2" - }""" - indexDoc(testIndex, "someId", testDoc) - val testDoc2 = """{ - "message" : "This is an error2 from IAD region", - "test_field" : "us-west-3" - }""" - indexDoc(testIndex, "someId2", testDoc2) - - val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) - - createFinding(matchingDocIds = listOf("someId"), index = testIndex) - val findingId = createFinding( - matchingDocIds = listOf("someId", "someId2"), - index = testIndex, - docLevelQueries = listOf(docLevelQuery) - ) - val response = searchFindings(mapOf(Pair("searchString", "sigma"))) - assertEquals(1, response.totalFindings) - assertEquals(findingId, response.findings[0].finding.id) - assertEquals(2, response.findings[0].documents.size) - assertTrue(response.findings[0].documents[0].found) - assertTrue(response.findings[0].documents[1].found) - assertEquals(testDoc, response.findings[0].documents[0].document) - assertEquals(testDoc2, response.findings[0].documents[1].document) - } - - fun `test find Finding by name`() { - val testIndex = createTestIndex() - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_field" : "us-west-2" - }""" - indexDoc(testIndex, "someId", testDoc) - val testDoc2 = """{ - "message" : "This is an error2 from IAD region", - "test_field" : "us-west-3" - }""" - indexDoc(testIndex, "someId2", testDoc2) - - val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) - - createFinding(matchingDocIds = listOf("someId"), index = testIndex) - val findingId = createFinding( - matchingDocIds = listOf("someId", "someId2"), - index = testIndex, - docLevelQueries = listOf(docLevelQuery) - ) - val response = searchFindings(mapOf(Pair("searchString", "realQuery"))) - assertEquals(1, response.totalFindings) - assertEquals(findingId, response.findings[0].finding.id) - assertEquals(2, response.findings[0].documents.size) - assertTrue(response.findings[0].documents[0].found) - assertTrue(response.findings[0].documents[1].found) - assertEquals(testDoc, response.findings[0].documents[0].document) - assertEquals(testDoc2, response.findings[0].documents[1].document) - } - - fun `test find Finding by monitor id`() { - val testIndex = createTestIndex() - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_field" : "us-west-2" - }""" - indexDoc(testIndex, "someId", testDoc) - val testDoc2 = """{ - "message" : "This is an error2 from IAD region", - "test_field" : "us-west-3" - }""" - indexDoc(testIndex, "someId2", testDoc2) - - val docLevelQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "realQuery", fields = listOf(), tags = listOf("sigma")) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docLevelQuery)) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val trueMonitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - executeMonitor(trueMonitor.id, mapOf(Pair("dryrun", "true"))) - - createFinding(matchingDocIds = listOf("someId"), index = testIndex) - val findingId = createFinding( - monitorId = "monitorToFind", - matchingDocIds = listOf("someId", "someId2"), - index = testIndex, - docLevelQueries = listOf(docLevelQuery) - ) - val response = searchFindings(mapOf(Pair("searchString", "monitorToFind"))) - assertEquals(1, response.totalFindings) - assertEquals(findingId, response.findings[0].finding.id) - assertEquals(2, response.findings[0].documents.size) - assertTrue(response.findings[0].documents[0].found) - assertTrue(response.findings[0].documents[1].found) - assertEquals(testDoc, response.findings[0].documents[0].document) - assertEquals(testDoc2, response.findings[0].documents[1].document) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt deleted file mode 100644 index c4ed196c7..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/MonitorRestApiIT.kt +++ /dev/null @@ -1,1412 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ -package org.opensearch.alerting.resthandler - -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.message.BasicHeader -import org.apache.http.nio.entity.NStringEntity -import org.opensearch.alerting.ALERTING_BASE_URI -import org.opensearch.alerting.ALWAYS_RUN -import org.opensearch.alerting.ANOMALY_DETECTOR_INDEX -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.LEGACY_OPENDISTRO_ALERTING_BASE_URI -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.anomalyDetectorIndexMapping -import org.opensearch.alerting.core.settings.ScheduledJobSettings -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.destination.Chime -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.randomADMonitor -import org.opensearch.alerting.randomAction -import org.opensearch.alerting.randomAlert -import org.opensearch.alerting.randomAnomalyDetector -import org.opensearch.alerting.randomAnomalyDetectorWithUser -import org.opensearch.alerting.randomBucketLevelMonitor -import org.opensearch.alerting.randomBucketLevelTrigger -import org.opensearch.alerting.randomDocumentLevelMonitor -import org.opensearch.alerting.randomDocumentLevelTrigger -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.alerting.randomQueryLevelTrigger -import org.opensearch.alerting.randomThrottle -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.settings.AlertingSettings -import org.opensearch.alerting.toJsonString -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.ResponseException -import org.opensearch.client.WarningFailureException -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.CronSchedule -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.DocumentLevelTrigger -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.QueryLevelTrigger -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.search.aggregations.AggregationBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.sort.SortOrder -import org.opensearch.test.OpenSearchTestCase -import org.opensearch.test.junit.annotations.TestLogging -import org.opensearch.test.rest.OpenSearchRestTestCase -import java.time.Instant -import java.time.ZoneId -import java.time.temporal.ChronoUnit -import java.util.concurrent.TimeUnit - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class MonitorRestApiIT : AlertingRestTestCase() { - - val USE_TYPED_KEYS = ToXContent.MapParams(mapOf("with_type" to "true")) - - @Throws(Exception::class) - fun `test plugin is loaded`() { - val response = entityAsMap(OpenSearchRestTestCase.client().makeRequest("GET", "_nodes/plugins")) - val nodesInfo = response["nodes"] as Map> - for (nodeInfo in nodesInfo.values) { - val plugins = nodeInfo["plugins"] as List> - for (plugin in plugins) { - if (plugin["name"] == "opensearch-alerting") { - return - } - } - } - fail("Plugin not installed") - } - - fun `test parsing monitor as a scheduled job`() { - val monitor = createRandomMonitor() - - val builder = monitor.toXContentWithUser(XContentBuilder.builder(XContentType.JSON.xContent()), USE_TYPED_KEYS) - val string = BytesReference.bytes(builder).utf8ToString() - val xcp = createParser(XContentType.JSON.xContent(), string) - val scheduledJob = ScheduledJob.parse(xcp, monitor.id, monitor.version) - assertEquals(monitor, scheduledJob) - } - - @Throws(Exception::class) - fun `test creating a monitor`() { - val monitor = randomQueryLevelMonitor() - - val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - } - - @Throws(Exception::class) - fun `test creating a bucket monitor`() { - val monitor = randomBucketLevelMonitor() - - val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - } - - fun `test creating a monitor with legacy ODFE`() { - val monitor = randomQueryLevelMonitor() - val createResponse = client().makeRequest("POST", LEGACY_OPENDISTRO_ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - } - - fun `test creating a monitor with action threshold greater than max threshold`() { - val monitor = randomMonitorWithThrottle(100000, ChronoUnit.MINUTES) - - try { - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - } - - fun `test creating a monitor with action threshold less than min threshold`() { - val monitor = randomMonitorWithThrottle(-1) - - try { - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - } - - fun `test creating a monitor with updating action threshold`() { - adminClient().updateSettings("plugins.alerting.action_throttle_max_value", TimeValue.timeValueHours(1)) - - val monitor = randomMonitorWithThrottle(2, ChronoUnit.HOURS) - - try { - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - adminClient().updateSettings("plugins.alerting.action_throttle_max_value", TimeValue.timeValueHours(24)) - } - - fun `test creating a monitor with PUT fails`() { - try { - val monitor = randomQueryLevelMonitor() - client().makeRequest("PUT", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - fail("Expected 405 Method Not Allowed response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.METHOD_NOT_ALLOWED, e.response.restStatus()) - } - } - - fun `test creating a monitor with illegal index name`() { - try { - val si = SearchInput(listOf("_#*IllegalIndexCharacters"), SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) - val monitor = randomQueryLevelMonitor() - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.copy(inputs = listOf(si)).toHttpEntity()) - } catch (e: ResponseException) { - // When an index with invalid name is mentioned, instead of returning invalid_index_name_exception security plugin throws security_exception. - // Refer: https://github.com/opendistro-for-elasticsearch/security/issues/718 - // Without security plugin we get BAD_REQUEST correctly. With security_plugin we get INTERNAL_SERVER_ERROR, till above issue is fixed. - assertTrue( - "Unexpected status", - listOf(RestStatus.BAD_REQUEST, RestStatus.FORBIDDEN).contains(e.response.restStatus()) - ) - } - } - - fun `test creating an AD monitor without detector index`() { - try { - val monitor = randomADMonitor() - - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - } catch (e: ResponseException) { - // When user create AD monitor without detector index, will throw index not found exception - assertTrue("Unexpected error", e.message!!.contains("Configured indices are not found")) - assertTrue( - "Unexpected status", - listOf(RestStatus.NOT_FOUND).contains(e.response.restStatus()) - ) - } - } - - fun `test creating an AD monitor with detector index created but no detectors`() { - createAnomalyDetectorIndex() - try { - val monitor = randomADMonitor() - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - } catch (e: ResponseException) { - // When user create AD monitor with no detector, will throw exception - assertTrue("Unexpected error", e.message!!.contains("User has no available detectors")) - assertTrue( - "Unexpected status", - listOf(RestStatus.NOT_FOUND).contains(e.response.restStatus()) - ) - } - } - - fun `test creating an AD monitor with no detector has monitor backend role`() { - if (!securityEnabled()) { - createAnomalyDetectorIndex() - // TODO: change to REST API call to test security enabled case - indexDoc(ANOMALY_DETECTOR_INDEX, "1", randomAnomalyDetector()) - indexDoc(ANOMALY_DETECTOR_INDEX, "2", randomAnomalyDetectorWithUser(randomAlphaOfLength(5))) - try { - val monitor = randomADMonitor() - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - } catch (e: ResponseException) { - // When user create AD monitor with no detector has backend role, will throw exception - assertTrue("Unexpected error", e.message!!.contains("User has no available detectors")) - assertTrue( - "Unexpected status", - listOf(RestStatus.NOT_FOUND).contains(e.response.restStatus()) - ) - } - } - } - - /* - fun `test creating an AD monitor with detector has monitor backend role`() { - createAnomalyDetectorIndex() - val backendRole = "test-role" - val user = randomADUser(backendRole) - indexDoc(ANOMALY_DETECTOR_INDEX, "1", randomAnomalyDetector()) - indexDoc(ANOMALY_DETECTOR_INDEX, "2", randomAnomalyDetectorWithUser(randomAlphaOfLength(5))) - indexDoc(ANOMALY_DETECTOR_INDEX, "3", randomAnomalyDetectorWithUser(backendRole = backendRole), refresh = true) - - val monitor = randomADMonitor(user = user) - val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - }*/ - - private fun createAnomalyDetectorIndex() { - try { - createTestIndex(ANOMALY_DETECTOR_INDEX, anomalyDetectorIndexMapping()) - } catch (e: Exception) { - // WarningFailureException is expected as we are creating system index start with dot - assertTrue(e is WarningFailureException) - } - } - - /* Enable this test case after checking for disallowed destination during Monitor creation is added in - fun `test creating a monitor with a disallowed destination type fails`() { - try { - // Create a Chime Destination - val chime = Chime("http://abc.com") - val destination = Destination( - type = DestinationType.CHIME, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = chime, - slack = null, - customWebhook = null, - email = null - ) - val chimeDestination = createDestination(destination = destination) - - // Remove Chime from the allow_list - val allowedDestinations = DestinationType.values().toList() - .filter { destinationType -> destinationType != DestinationType.CHIME } - .joinToString(prefix = "[", postfix = "]") { string -> "\"$string\"" } - client().updateSettings(DestinationSettings.ALLOW_LIST.key, allowedDestinations) - - createMonitor(randomQueryLevelMonitor(triggers = listOf(randomQueryLevelTrigger(destinationId = chimeDestination.id)))) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - } - */ - - @Throws(Exception::class) - fun `test updating search for a monitor`() { - val monitor = createRandomMonitor() - - val updatedSearch = SearchInput( - emptyList(), - SearchSourceBuilder().query(QueryBuilders.termQuery("foo", "bar")) - ) - val updateResponse = client().makeRequest( - "PUT", - monitor.relativeUrl(), - emptyMap(), - monitor.copy(inputs = listOf(updatedSearch)).toHttpEntity() - ) - - assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) - val responseBody = updateResponse.asMap() - assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) - assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) - - val updatedMonitor = getMonitor(monitor.id) - assertEquals("Monitor search not updated", listOf(updatedSearch), updatedMonitor.inputs) - } - - @Throws(Exception::class) - fun `test updating conditions for a monitor`() { - val monitor = createRandomMonitor() - - val updatedTriggers = listOf( - QueryLevelTrigger( - name = "foo", - severity = "1", - condition = Script("return true"), - actions = emptyList() - ) - ) - val updateResponse = client().makeRequest( - "PUT", - monitor.relativeUrl(), - emptyMap(), - monitor.copy(triggers = updatedTriggers).toHttpEntity() - ) - - assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) - val responseBody = updateResponse.asMap() - assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) - assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) - - val updatedMonitor = getMonitor(monitor.id) - assertEquals("Monitor trigger not updated", updatedTriggers, updatedMonitor.triggers) - } - - @Throws(Exception::class) - fun `test updating schedule for a monitor`() { - val monitor = createRandomMonitor() - - val updatedSchedule = CronSchedule(expression = "0 9 * * *", timezone = ZoneId.of("UTC")) - val updateResponse = client().makeRequest( - "PUT", - monitor.relativeUrl(), - emptyMap(), - monitor.copy(schedule = updatedSchedule).toHttpEntity() - ) - - assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) - val responseBody = updateResponse.asMap() - assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) - assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) - - val updatedMonitor = getMonitor(monitor.id) - assertEquals("Monitor trigger not updated", updatedSchedule, updatedMonitor.schedule) - } - - @Throws(Exception::class) - fun `test getting a monitor`() { - val monitor = createRandomMonitor() - - val storedMonitor = getMonitor(monitor.id) - - assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) - } - - @Throws(Exception::class) - fun `test getting a monitor that doesn't exist`() { - try { - getMonitor(randomAlphaOfLength(20)) - fail("expected response exception") - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - } - } - - @Throws(Exception::class) - fun `test checking if a monitor exists`() { - val monitor = createRandomMonitor() - - val headResponse = client().makeRequest("HEAD", monitor.relativeUrl()) - assertEquals("Unable to HEAD monitor", RestStatus.OK, headResponse.restStatus()) - assertNull("Response contains unexpected body", headResponse.entity) - } - - fun `test checking if a non-existent monitor exists`() { - val headResponse = client().makeRequest("HEAD", "$ALERTING_BASE_URI/foobarbaz") - assertEquals("Unexpected status", RestStatus.NOT_FOUND, headResponse.restStatus()) - } - - @Throws(Exception::class) - fun `test deleting a monitor`() { - val monitor = createRandomMonitor() - - val deleteResponse = client().makeRequest("DELETE", monitor.relativeUrl()) - assertEquals("Delete failed", RestStatus.OK, deleteResponse.restStatus()) - - val getResponse = client().makeRequest("HEAD", monitor.relativeUrl()) - assertEquals("Deleted monitor still exists", RestStatus.NOT_FOUND, getResponse.restStatus()) - } - - @Throws(Exception::class) - fun `test deleting a monitor that doesn't exist`() { - try { - client().makeRequest("DELETE", "$ALERTING_BASE_URI/foobarbaz") - fail("expected 404 ResponseException") - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - } - } - - fun `test getting UI metadata monitor not from OpenSearch Dashboards`() { - val monitor = createRandomMonitor(withMetadata = true) - val getMonitor = getMonitor(monitorId = monitor.id) - assertEquals( - "UI Metadata returned but request did not come from OpenSearch Dashboards.", - getMonitor.uiMetadata, - mapOf() - ) - } - - fun `test getting UI metadata monitor from OpenSearch Dashboards`() { - val monitor = createRandomMonitor(refresh = true, withMetadata = true) - val header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards") - val getMonitor = getMonitor(monitorId = monitor.id, header = header) - assertEquals("", monitor.uiMetadata, getMonitor.uiMetadata) - } - - fun `test query a monitor that exists`() { - val monitor = createRandomMonitor(true) - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - val searchResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Monitor not found during search", 1, numberDocsFound) - } - - fun `test query a monitor that exists POST`() { - val monitor = createRandomMonitor(true) - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - val searchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Monitor not found during search", 1, numberDocsFound) - } - - fun `test query a monitor that doesn't exist`() { - // Create a random monitor to create the ScheduledJob index. Otherwise we test will fail with 404 index not found. - createRandomMonitor(refresh = true) - val search = SearchSourceBuilder().query( - QueryBuilders.termQuery( - OpenSearchTestCase.randomAlphaOfLength(5), - OpenSearchTestCase.randomAlphaOfLength(5) - ) - ).toString() - - val searchResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Monitor found during search when no document present.", 0, numberDocsFound) - } - - fun `test query a monitor with UI metadata from OpenSearch Dashboards`() { - val monitor = createRandomMonitor(refresh = true, withMetadata = true) - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - val header = BasicHeader(HttpHeaders.USER_AGENT, "OpenSearch-Dashboards") - val searchResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON), - header - ) - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"] as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Monitor not found during search", 1, numberDocsFound) - - val searchHits = hits["hits"] as List - val hit = searchHits[0] as Map - val monitorHit = hit["_source"] as Map - assertNotNull( - "UI Metadata returned from search but request did not come from OpenSearchDashboards", - monitorHit[Monitor.UI_METADATA_FIELD] - ) - } - - fun `test query a monitor with UI metadata as user`() { - val monitor = createRandomMonitor(refresh = true, withMetadata = true) - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - val searchResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"] as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Monitor not found during search", 1, numberDocsFound) - - val searchHits = hits["hits"] as List - val hit = searchHits[0] as Map - val monitorHit = hit["_source"] as Map - assertNull( - "UI Metadata returned from search but request did not come from OpenSearchDashboards", - monitorHit[Monitor.UI_METADATA_FIELD] - ) - } - - fun `test acknowledge all alert states`() { - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) - val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - val invalidAlert = randomAlert(monitor).copy(id = "foobar") - - val response = acknowledgeAlerts(monitor, acknowledgedAlert, completedAlert, errorAlert, activeAlert, invalidAlert) - val responseMap = response.asMap() - - val activeAlertAcknowledged = searchAlerts(monitor).single { it.id == activeAlert.id } - assertNotNull("Unsuccessful acknowledgement", responseMap["success"] as List) - assertTrue("Alert not in acknowledged response", responseMap["success"].toString().contains(activeAlert.id)) - assertEquals("Alert not acknowledged.", Alert.State.ACKNOWLEDGED, activeAlertAcknowledged.state) - assertNotNull("Alert acknowledged time is NULL", activeAlertAcknowledged.acknowledgedTime) - - val failedResponseList = responseMap["failed"].toString() - assertTrue("Alert in state ${acknowledgedAlert.state} not found in failed list", failedResponseList.contains(acknowledgedAlert.id)) - assertTrue("Alert in state ${completedAlert.state} not found in failed list", failedResponseList.contains(errorAlert.id)) - assertTrue("Alert in state ${errorAlert.state} not found in failed list", failedResponseList.contains(completedAlert.id)) - assertTrue("Invalid alert not found in failed list", failedResponseList.contains(invalidAlert.id)) - assertFalse("Alert in state ${activeAlert.state} found in failed list", failedResponseList.contains(activeAlert.id)) - } - - fun `test acknowledging more than 10 alerts at once`() { - // GIVEN - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val alertsToAcknowledge = (1..15).map { createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) }.toTypedArray() - - // WHEN - val response = acknowledgeAlerts(monitor, *alertsToAcknowledge) - - // THEN - val responseMap = response.asMap() - val expectedAcknowledgedCount = alertsToAcknowledge.size - - val acknowledgedAlerts = responseMap["success"] as List - assertTrue( - "Expected $expectedAcknowledgedCount alerts to be acknowledged successfully.", - acknowledgedAlerts.size == expectedAcknowledgedCount - ) - - val acknowledgedAlertsList = acknowledgedAlerts.toString() - alertsToAcknowledge.forEach { alert -> - assertTrue("Alert with ID ${alert.id} not found in failed list.", acknowledgedAlertsList.contains(alert.id)) - } - - val failedResponse = responseMap["failed"] as List - assertTrue("Expected 0 alerts to fail acknowledgment.", failedResponse.isEmpty()) - } - - fun `test acknowledging more than 10 alerts at once, including acknowledged alerts`() { - // GIVEN - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val alertsGroup1 = (1..15).map { createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) }.toTypedArray() - acknowledgeAlerts(monitor, *alertsGroup1) // Acknowledging the first array of alerts. - - val alertsGroup2 = (1..15).map { createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) }.toTypedArray() - - // Creating an array of alerts that includes alerts that have been already acknowledged, and new alerts. - val alertsToAcknowledge = arrayOf(*alertsGroup1, *alertsGroup2) - - // WHEN - val response = acknowledgeAlerts(monitor, *alertsToAcknowledge) - - // THEN - val responseMap = response.asMap() - val expectedAcknowledgedCount = alertsToAcknowledge.size - alertsGroup1.size - - val acknowledgedAlerts = responseMap["success"] as List - assertTrue( - "Expected $expectedAcknowledgedCount alerts to be acknowledged successfully.", - acknowledgedAlerts.size == expectedAcknowledgedCount - ) - - val acknowledgedAlertsList = acknowledgedAlerts.toString() - alertsGroup2.forEach { alert -> - assertTrue("Alert with ID ${alert.id} not found in failed list.", acknowledgedAlertsList.contains(alert.id)) - } - alertsGroup1.forEach { alert -> - assertFalse("Alert with ID ${alert.id} found in failed list.", acknowledgedAlertsList.contains(alert.id)) - } - - val failedResponse = responseMap["failed"] as List - assertTrue("Expected ${alertsGroup1.size} alerts to fail acknowledgment.", failedResponse.size == alertsGroup1.size) - - val failedResponseList = failedResponse.toString() - alertsGroup1.forEach { alert -> - assertTrue("Alert with ID ${alert.id} not found in failed list.", failedResponseList.contains(alert.id)) - } - alertsGroup2.forEach { alert -> - assertFalse("Alert with ID ${alert.id} found in failed list.", failedResponseList.contains(alert.id)) - } - } - - @Throws(Exception::class) - fun `test acknowledging 0 alerts`() { - // GIVEN - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val alertsToAcknowledge = arrayOf() - - // WHEN & THEN - try { - acknowledgeAlerts(monitor, *alertsToAcknowledge) - fail("Expected acknowledgeAlerts to throw an exception.") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - } - - fun `test get all alerts in all states`() { - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) - val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - val invalidAlert = randomAlert(monitor).copy(id = "foobar") - - val inputMap = HashMap() - inputMap["missing"] = "_last" - - val responseMap = getAlerts(inputMap).asMap() - val alerts = responseMap["alerts"].toString() - - assertEquals(4, responseMap["totalAlerts"]) - assertTrue("Acknowledged alert with id, ${acknowledgedAlert.id}, not found in alert list", alerts.contains(acknowledgedAlert.id)) - assertTrue("Completed alert with id, ${completedAlert.id}, not found in alert list", alerts.contains(completedAlert.id)) - assertTrue("Error alert with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) - assertTrue("Active alert with id, ${activeAlert.id}, not found in alert list", alerts.contains(activeAlert.id)) - assertFalse("Invalid alert with id, ${invalidAlert.id}, found in alert list", alerts.contains(invalidAlert.id)) - } - - fun `test get all alerts with active states`() { - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) - val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - val invalidAlert = randomAlert(monitor).copy(id = "foobar") - - val inputMap = HashMap() - inputMap["alertState"] = Alert.State.ACTIVE.name - - val responseMap = getAlerts(inputMap).asMap() - val alerts = responseMap["alerts"].toString() - - assertEquals(1, responseMap["totalAlerts"]) - assertFalse("Acknowledged alert with id, ${acknowledgedAlert.id}, found in alert list", alerts.contains(acknowledgedAlert.id)) - assertFalse("Completed alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) - assertFalse("Error alert with id, ${errorAlert.id}, found in alert list", alerts.contains(errorAlert.id)) - assertTrue("Active alert with id, ${activeAlert.id}, not found in alert list", alerts.contains(activeAlert.id)) - assertFalse("Invalid alert with id, ${invalidAlert.id}, found in alert list", alerts.contains(invalidAlert.id)) - } - - fun `test get all alerts with severity 1`() { - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED, severity = "1")) - val completedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED, severity = "3")) - val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR, severity = "1")) - val activeAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE, severity = "2")) - - val inputMap = HashMap() - inputMap["severityLevel"] = "1" - - val responseMap = getAlerts(inputMap).asMap() - val alerts = responseMap["alerts"].toString() - - assertEquals(2, responseMap["totalAlerts"]) - assertTrue( - "Acknowledged sev 1 alert with id, ${acknowledgedAlert.id}, not found in alert list", - alerts.contains(acknowledgedAlert.id) - ) - assertFalse("Completed sev 3 alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) - assertTrue("Error sev 1 alert with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) - assertFalse("Active sev 2 alert with id, ${activeAlert.id}, found in alert list", alerts.contains(activeAlert.id)) - } - - fun `test get all alerts for a specific monitor by id`() { - putAlertMappings() // Required as we do not have a create alert API. - val monitor = createRandomMonitor(refresh = true) - val monitor2 = createRandomMonitor(refresh = true) - val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - val completedAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.COMPLETED)) - val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - val activeAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.ACTIVE)) - - val inputMap = HashMap() - inputMap["monitorId"] = monitor.id - - val responseMap = getAlerts(inputMap).asMap() - val alerts = responseMap["alerts"].toString() - - assertEquals(2, responseMap["totalAlerts"]) - assertTrue( - "Acknowledged alert for chosen monitor with id, ${acknowledgedAlert.id}, not found in alert list", - alerts.contains(acknowledgedAlert.id) - ) - assertFalse("Completed sev 3 alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) - assertTrue("Error alert for chosen monitor with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) - assertFalse("Active alert sev 2 with id, ${activeAlert.id}, found in alert list", alerts.contains(activeAlert.id)) - } - - fun `test get alerts by searching monitor name`() { - putAlertMappings() // Required as we do not have a create alert API. - - val monitor = createRandomMonitor(refresh = true) - val monitor2 = createRandomMonitor(refresh = true) - val acknowledgedAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - val completedAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.COMPLETED)) - val errorAlert = createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - val activeAlert = createAlert(randomAlert(monitor2).copy(state = Alert.State.ACTIVE)) - - val inputMap = HashMap() - inputMap["searchString"] = monitor.name - - val responseMap = getAlerts(inputMap).asMap() - val alerts = responseMap["alerts"].toString() - - assertEquals(2, responseMap["totalAlerts"]) - assertTrue( - "Acknowledged alert for matching monitor with id, ${acknowledgedAlert.id}, not found in alert list", - alerts.contains(acknowledgedAlert.id) - ) - assertFalse("Completed sev 3 alert with id, ${completedAlert.id}, found in alert list", alerts.contains(completedAlert.id)) - assertTrue("Error alert for matching monitor with id, ${errorAlert.id}, not found in alert list", alerts.contains(errorAlert.id)) - assertFalse("Active alert sev 2 with id, ${activeAlert.id}, found in alert list", alerts.contains(activeAlert.id)) - } - - fun `test mappings after monitor creation`() { - createRandomMonitor(refresh = true) - - val response = client().makeRequest("GET", "/${ScheduledJob.SCHEDULED_JOBS_INDEX}/_mapping") - val parserMap = createParser(XContentType.JSON.xContent(), response.entity.content).map() as Map> - val mappingsMap = parserMap[ScheduledJob.SCHEDULED_JOBS_INDEX]!!["mappings"] as Map - val expected = createParser( - XContentType.JSON.xContent(), - javaClass.classLoader.getResource("mappings/scheduled-jobs.json").readText() - ) - val expectedMap = expected.map() - - assertEquals("Mappings are different", expectedMap, mappingsMap) - } - - fun `test delete monitor moves alerts`() { - client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) - putAlertMappings() - val monitor = createRandomMonitor(true) - val alert = createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - refreshIndex("*") - val deleteResponse = client().makeRequest("DELETE", "$ALERTING_BASE_URI/${monitor.id}") - assertEquals("Delete request not successful", RestStatus.OK, deleteResponse.restStatus()) - - // Wait 5 seconds for event to be processed and alerts moved - Thread.sleep(5000) - - val alerts = searchAlerts(monitor) - assertEquals("Active alert was not deleted", 0, alerts.size) - - val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) - assertEquals("Alert was not moved to history", 1, historyAlerts.size) - assertEquals( - "Alert data incorrect", - alert.copy(state = Alert.State.DELETED).toJsonString(), - historyAlerts.single().toJsonString() - ) - } - - fun `test delete trigger moves alerts then try to search alert by monitorId to find alert in history index`() { - client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) - putAlertMappings() - val trigger = randomQueryLevelTrigger() - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - val alert = createAlert(randomAlert(monitor).copy(triggerId = trigger.id, state = Alert.State.ACTIVE)) - refreshIndex("*") - val updatedMonitor = monitor.copy(triggers = emptyList()) - val updateResponse = client().makeRequest( - "PUT", - "$ALERTING_BASE_URI/${monitor.id}", - emptyMap(), - updatedMonitor.toHttpEntity() - ) - assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) - - // Wait 5 seconds for event to be processed and alerts moved - Thread.sleep(5000) - - val alerts = searchAlerts(monitor) - assertEquals("Active alert was not deleted", 0, alerts.size) - - // Find alert by id and make sure it checks the history of alerts as well - val inputMap = HashMap() - inputMap["monitorId"] = monitor.id - val responseMap = getAlerts(inputMap).asMap() - - assertEquals(1, responseMap["totalAlerts"]) - } - - fun `test delete trigger moves alerts`() { - client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) - putAlertMappings() - val trigger = randomQueryLevelTrigger() - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(trigger))) - val alert = createAlert(randomAlert(monitor).copy(triggerId = trigger.id, state = Alert.State.ACTIVE)) - refreshIndex("*") - val updatedMonitor = monitor.copy(triggers = emptyList()) - val updateResponse = client().makeRequest( - "PUT", - "$ALERTING_BASE_URI/${monitor.id}", - emptyMap(), - updatedMonitor.toHttpEntity() - ) - assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) - - // Wait 5 seconds for event to be processed and alerts moved - Thread.sleep(5000) - - val alerts = searchAlerts(monitor) - assertEquals("Active alert was not deleted", 0, alerts.size) - - val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) - assertEquals("Alert was not moved to history", 1, historyAlerts.size) - assertEquals( - "Alert data incorrect", - alert.copy(state = Alert.State.DELETED).toJsonString(), - historyAlerts.single().toJsonString() - ) - } - - fun `test delete trigger moves alerts only for deleted trigger`() { - client().updateSettings(ScheduledJobSettings.SWEEPER_ENABLED.key, true) - putAlertMappings() - val triggerToDelete = randomQueryLevelTrigger() - val triggerToKeep = randomQueryLevelTrigger() - val monitor = createMonitor(randomQueryLevelMonitor(triggers = listOf(triggerToDelete, triggerToKeep))) - val alertKeep = createAlert(randomAlert(monitor).copy(triggerId = triggerToKeep.id, state = Alert.State.ACTIVE)) - val alertDelete = createAlert(randomAlert(monitor).copy(triggerId = triggerToDelete.id, state = Alert.State.ACTIVE)) - refreshIndex("*") - val updatedMonitor = monitor.copy(triggers = listOf(triggerToKeep)) - val updateResponse = client().makeRequest( - "PUT", - "$ALERTING_BASE_URI/${monitor.id}", - emptyMap(), - updatedMonitor.toHttpEntity() - ) - assertEquals("Update request not successful", RestStatus.OK, updateResponse.restStatus()) - // Wait until postIndex hook is executed due to monitor update - waitUntil({ - val alerts = searchAlerts(monitor) - if (alerts.size == 1) { - return@waitUntil true - } - return@waitUntil false - }, 60, TimeUnit.SECONDS) - val alerts = searchAlerts(monitor) - // We have two alerts from above, 1 for each trigger, there should be only 1 left in active index - assertEquals("One alert should be in active index", 1, alerts.size) - assertEquals("Wrong alert in active index", alertKeep.toJsonString(), alerts.single().toJsonString()) - - waitUntil({ - val alerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) - if (alerts.size == 1) { - return@waitUntil true - } - return@waitUntil false - }, 60, TimeUnit.SECONDS) - - val historyAlerts = searchAlerts(monitor, AlertIndices.ALERT_HISTORY_WRITE_INDEX) - // Only alertDelete should of been moved to history index - assertEquals("One alert should be in history index", 1, historyAlerts.size) - assertEquals( - "Alert data incorrect", - alertDelete.copy(state = Alert.State.DELETED).toJsonString(), - historyAlerts.single().toJsonString() - ) - } - - fun `test update monitor with wrong version`() { - val monitor = createRandomMonitor(refresh = true) - try { - client().makeRequest( - "PUT", - "${monitor.relativeUrl()}?refresh=true&if_seq_no=1234&if_primary_term=1234", - emptyMap(), - monitor.toHttpEntity() - ) - fail("expected 409 ResponseException") - } catch (e: ResponseException) { - assertEquals(RestStatus.CONFLICT, e.response.restStatus()) - } - } - - fun `test monitor stats disable plugin`() { - // Disable the Monitor plugin. - disableScheduledJob() - - val responseMap = getAlertingStats() - assertAlertingStatsSweeperEnabled(responseMap, false) - assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"]) - val _nodes = responseMap["_nodes"] as Map - validateAlertingStatsNodeResponse(_nodes) - } - - fun `test monitor stats when disabling and re-enabling scheduled jobs with existing monitor`() { - // Enable Monitor jobs - enableScheduledJob() - val monitorId = createMonitor(randomQueryLevelMonitor(enabled = true), refresh = true).id - - var alertingStats = getAlertingStats() - assertAlertingStatsSweeperEnabled(alertingStats, true) - assertEquals("Scheduled job index does not exist", true, alertingStats["scheduled_job_index_exists"]) - assertEquals("Scheduled job index is not yellow", "yellow", alertingStats["scheduled_job_index_status"]) - assertEquals("Nodes are not on schedule", numberOfNodes, alertingStats["nodes_on_schedule"]) - - val _nodes = alertingStats["_nodes"] as Map - validateAlertingStatsNodeResponse(_nodes) - - assertTrue( - "Monitor [$monitorId] was not found scheduled based on the alerting stats response: $alertingStats", - isMonitorScheduled(monitorId, alertingStats) - ) - - // Disable Monitor jobs - disableScheduledJob() - - alertingStats = getAlertingStats() - assertAlertingStatsSweeperEnabled(alertingStats, false) - assertFalse( - "Monitor [$monitorId] was still scheduled based on the alerting stats response: $alertingStats", - isMonitorScheduled(monitorId, alertingStats) - ) - - // Re-enable Monitor jobs - enableScheduledJob() - - // Sleep briefly so sweep can reschedule the Monitor - Thread.sleep(2000) - - alertingStats = getAlertingStats() - assertAlertingStatsSweeperEnabled(alertingStats, true) - assertTrue( - "Monitor [$monitorId] was not re-scheduled based on the alerting stats response: $alertingStats", - isMonitorScheduled(monitorId, alertingStats) - ) - } - - fun `test monitor stats no jobs`() { - // Enable the Monitor plugin. - enableScheduledJob() - - val responseMap = getAlertingStats() - assertAlertingStatsSweeperEnabled(responseMap, true) - assertEquals("Scheduled job index exists but there are no scheduled jobs.", false, responseMap["scheduled_job_index_exists"]) - val _nodes = responseMap["_nodes"] as Map - validateAlertingStatsNodeResponse(_nodes) - } - - fun `test monitor stats jobs`() { - // Enable the Monitor plugin. - enableScheduledJob() - createRandomMonitor(refresh = true) - - val responseMap = getAlertingStats() - assertAlertingStatsSweeperEnabled(responseMap, true) - assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) - assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) - assertEquals("Nodes are not on schedule", numberOfNodes, responseMap["nodes_on_schedule"]) - - val _nodes = responseMap["_nodes"] as Map - validateAlertingStatsNodeResponse(_nodes) - } - - @Throws(Exception::class) - fun `test max number of monitors`() { - client().updateSettings(AlertingSettings.ALERTING_MAX_MONITORS.key, "1") - - createRandomMonitor(refresh = true) - try { - createRandomMonitor(refresh = true) - fail("Request should be rejected as there are too many monitors.") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - } - - fun `test monitor specific metric`() { - // Enable the Monitor plugin. - enableScheduledJob() - createRandomMonitor(refresh = true) - - val responseMap = getAlertingStats("/jobs_info") - assertAlertingStatsSweeperEnabled(responseMap, true) - assertEquals("Scheduled job index does not exist", true, responseMap["scheduled_job_index_exists"]) - assertEquals("Scheduled job index is not yellow", "yellow", responseMap["scheduled_job_index_status"]) - assertEquals("Nodes not on schedule", numberOfNodes, responseMap["nodes_on_schedule"]) - - val _nodes = responseMap["_nodes"] as Map - validateAlertingStatsNodeResponse(_nodes) - } - - fun `test monitor stats incorrect metric`() { - try { - getAlertingStats("/foobarzzz") - fail("Incorrect stats metric should have failed") - } catch (e: ResponseException) { - assertEquals("Failed", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - } - - fun `test monitor stats _all and other metric`() { - try { - getAlertingStats("/_all,jobs_info") - fail("Incorrect stats metric should have failed") - } catch (e: ResponseException) { - assertEquals("Failed", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - } - - private fun randomMonitorWithThrottle(value: Int, unit: ChronoUnit = ChronoUnit.MINUTES): Monitor { - val throttle = randomThrottle(value, unit) - val action = randomAction().copy(throttle = throttle) - val trigger = randomQueryLevelTrigger(actions = listOf(action)) - return randomQueryLevelMonitor(triggers = listOf(trigger)) - } - - @Throws(Exception::class) - fun `test search monitors only`() { - // 1. create monitor - val monitor = randomQueryLevelMonitor() - val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - - // 2. create destination - val chime = Chime("http://abc.com") - val destination = Destination( - type = DestinationType.CHIME, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = chime, - slack = null, - customWebhook = null, - email = null - ) - createDestination(destination) - - // 3. search - must return only monitors. - val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString() - val searchResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Destination objects are also returned by /_search.", 1, numberDocsFound) - - val searchHits = hits["hits"] as List - val hit = searchHits[0] as Map - val monitorHit = hit["_source"] as Map - assertEquals("Type is not monitor", monitorHit[Monitor.TYPE_FIELD], "monitor") - } - - @Throws(Exception::class) - fun `test search monitor with alerting indices only`() { - // 1. search - must return error as invalid index is passed - val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString() - val params: MutableMap = HashMap() - params["index"] = "data-logs" - try { - client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - params, - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - } - } - - @Throws(Exception::class) - fun `test creating a document monitor`() { - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - - val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - assertNotEquals("response is missing Id", Monitor.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - val actualLocation = createResponse.getHeader("Location") - assertEquals("Incorrect Location header", "$ALERTING_BASE_URI/$createdId", actualLocation) - } - - @Throws(Exception::class) - fun `test getting a document level monitor`() { - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor( - randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger), user = null) - ) - - val storedMonitor = getMonitor(monitor.id) - - assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) - } - - @Throws(Exception::class) - fun `test updating conditions for a doc-level monitor`() { - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - - val updatedTriggers = listOf( - DocumentLevelTrigger( - name = "foo", - severity = "1", - condition = Script("return true"), - actions = emptyList() - ) - ) - val updateResponse = client().makeRequest( - "PUT", - monitor.relativeUrl(), - emptyMap(), - monitor.copy(triggers = updatedTriggers).toHttpEntity() - ) - - assertEquals("Update monitor failed", RestStatus.OK, updateResponse.restStatus()) - val responseBody = updateResponse.asMap() - assertEquals("Updated monitor id doesn't match", monitor.id, responseBody["_id"] as String) - assertEquals("Version not incremented", (monitor.version + 1).toInt(), responseBody["_version"] as Int) - - val updatedMonitor = getMonitor(monitor.id) - assertEquals("Monitor trigger not updated", updatedTriggers, updatedMonitor.triggers) - } - - @Throws(Exception::class) - fun `test deleting a document level monitor`() { - val testIndex = createTestIndex() - val docQuery = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput("description", listOf(testIndex), listOf(docQuery)) - - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = createMonitor(randomDocumentLevelMonitor(inputs = listOf(docLevelInput), triggers = listOf(trigger))) - - val deleteResponse = client().makeRequest("DELETE", monitor.relativeUrl()) - assertEquals("Delete failed", RestStatus.OK, deleteResponse.restStatus()) - - val getResponse = client().makeRequest("HEAD", monitor.relativeUrl()) - assertEquals("Deleted monitor still exists", RestStatus.NOT_FOUND, getResponse.restStatus()) - } - - fun `test creating a document monitor with error trigger`() { - val trigger = randomQueryLevelTrigger() - try { - val monitor = randomDocumentLevelMonitor(triggers = listOf(trigger)) - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - fail("Monitor with illegal trigger should be rejected.") - } catch (e: IllegalArgumentException) { - assertEquals( - "a document monitor with error trigger", - "Incompatible trigger [${trigger.id}] for monitor type [${Monitor.MonitorType.DOC_LEVEL_MONITOR}]", - e.message - ) - } - } - - fun `test creating a query monitor with error trigger`() { - val trigger = randomBucketLevelTrigger() - try { - val monitor = randomQueryLevelMonitor(triggers = listOf(trigger)) - client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - fail("Monitor with illegal trigger should be rejected.") - } catch (e: IllegalArgumentException) { - assertEquals( - "a query monitor with error trigger", - "Incompatible trigger [${trigger.id}] for monitor type [${Monitor.MonitorType.QUERY_LEVEL_MONITOR}]", - e.message - ) - } - } - - /** - * This use case is needed by the frontend plugin for displaying alert counts on the Monitors list page. - * https://github.com/opensearch-project/alerting-dashboards-plugin/blob/main/server/services/MonitorService.js#L235 - */ - fun `test get acknowledged, active, error, and ignored alerts counts`() { - putAlertMappings() - val monitorAlertCounts = hashMapOf>() - val numMonitors = randomIntBetween(1, 10) - repeat(numMonitors) { - val monitor = createRandomMonitor(refresh = true) - - val numAcknowledgedAlerts = randomIntBetween(1, 10) - val numActiveAlerts = randomIntBetween(1, 10) - var numCompletedAlerts = randomIntBetween(1, 10) - val numErrorAlerts = randomIntBetween(1, 10) - val numIgnoredAlerts = randomIntBetween(1, numCompletedAlerts) - numCompletedAlerts -= numIgnoredAlerts - - val alertCounts = hashMapOf( - Alert.State.ACKNOWLEDGED.name to numAcknowledgedAlerts, - Alert.State.ACTIVE.name to numActiveAlerts, - Alert.State.COMPLETED.name to numCompletedAlerts, - Alert.State.ERROR.name to numErrorAlerts, - "IGNORED" to numIgnoredAlerts - ) - monitorAlertCounts[monitor.id] = alertCounts - - repeat(numAcknowledgedAlerts) { - createAlert(randomAlert(monitor).copy(acknowledgedTime = Instant.now(), state = Alert.State.ACKNOWLEDGED)) - } - repeat(numActiveAlerts) { - createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - } - repeat(numCompletedAlerts) { - createAlert(randomAlert(monitor).copy(acknowledgedTime = Instant.now(), state = Alert.State.COMPLETED)) - } - repeat(numErrorAlerts) { - createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - } - repeat(numIgnoredAlerts) { - createAlert(randomAlert(monitor).copy(acknowledgedTime = null, state = Alert.State.COMPLETED)) - } - } - - val sourceBuilder = SearchSourceBuilder() - .size(0) - .query(QueryBuilders.termsQuery("monitor_id", monitorAlertCounts.keys)) - .aggregation( - AggregationBuilders - .terms("uniq_monitor_ids").field("monitor_id") - .subAggregation(AggregationBuilders.filter("active", QueryBuilders.termQuery("state", "ACTIVE"))) - .subAggregation(AggregationBuilders.filter("acknowledged", QueryBuilders.termQuery("state", "ACKNOWLEDGED"))) - .subAggregation(AggregationBuilders.filter("errors", QueryBuilders.termQuery("state", "ERROR"))) - .subAggregation( - AggregationBuilders.filter( - "ignored", - QueryBuilders.boolQuery() - .filter(QueryBuilders.termQuery("state", "COMPLETED")) - .mustNot(QueryBuilders.existsQuery("acknowledged_time")) - ) - ) - .subAggregation(AggregationBuilders.max("last_notification_time").field("last_notification_time")) - .subAggregation( - AggregationBuilders.topHits("latest_alert") - .size(1) - .sort("start_time", SortOrder.DESC) - .fetchSource(arrayOf("last_notification_time", "trigger_name"), null) - ) - ) - - val searchResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - hashMapOf("index" to AlertIndices.ALL_ALERT_INDEX_PATTERN), - NStringEntity(sourceBuilder.toString(), ContentType.APPLICATION_JSON) - ) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content).map() - val aggregations = (xcp["aggregations"]!! as Map>) - val uniqMonitorIds = aggregations["uniq_monitor_ids"]!! - val buckets = uniqMonitorIds["buckets"]!! as ArrayList> - - assertEquals("Incorrect number of monitors returned", monitorAlertCounts.keys.size, buckets.size) - buckets.forEach { bucket -> - val id = bucket["key"]!! - val monitorCounts = monitorAlertCounts[id]!! - - val acknowledged = (bucket["acknowledged"]!! as Map)["doc_count"]!! - assertEquals( - "Incorrect ${Alert.State.ACKNOWLEDGED} count returned for monitor $id", - monitorCounts[Alert.State.ACKNOWLEDGED.name], acknowledged - ) - - val active = (bucket["active"]!! as Map)["doc_count"]!! - assertEquals( - "Incorrect ${Alert.State.ACTIVE} count returned for monitor $id", - monitorCounts[Alert.State.ACTIVE.name], active - ) - - val errors = (bucket["errors"]!! as Map)["doc_count"]!! - assertEquals( - "Incorrect ${Alert.State.ERROR} count returned for monitor $id", - monitorCounts[Alert.State.ERROR.name], errors - ) - - val ignored = (bucket["ignored"]!! as Map)["doc_count"]!! - assertEquals( - "Incorrect IGNORED count returned for monitor $id", - monitorCounts["IGNORED"], ignored - ) - } - } - - private fun validateAlertingStatsNodeResponse(nodesResponse: Map) { - assertEquals("Incorrect number of nodes", numberOfNodes, nodesResponse["total"]) - assertEquals("Failed nodes found during monitor stats call", 0, nodesResponse["failed"]) - assertEquals("More than $numberOfNodes successful node", numberOfNodes, nodesResponse["successful"]) - } - - private fun isMonitorScheduled(monitorId: String, alertingStatsResponse: Map): Boolean { - val nodesInfo = alertingStatsResponse["nodes"] as Map - for (nodeId in nodesInfo.keys) { - val nodeInfo = nodesInfo[nodeId] as Map - val jobsInfo = nodeInfo["jobs_info"] as Map - if (jobsInfo.keys.contains(monitorId)) { - return true - } - } - - return false - } - - private fun assertAlertingStatsSweeperEnabled(alertingStatsResponse: Map, expected: Boolean) { - assertEquals( - "Legacy scheduled job enabled field is not set to $expected", - expected, - alertingStatsResponse[statsResponseOpendistroSweeperEnabledField] - ) - assertEquals( - "Scheduled job is not ${if (expected) "enabled" else "disabled"}", - expected, - alertingStatsResponse[statsResponseOpenSearchSweeperEnabledField] - ) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt deleted file mode 100644 index ecfdd6b4f..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureDestinationRestApiIT.kt +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.http.HttpHeaders -import org.apache.http.message.BasicHeader -import org.junit.After -import org.junit.Before -import org.junit.BeforeClass -import org.opensearch.alerting.ALERTING_GET_DESTINATION_ACCESS -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.TEST_HR_BACKEND_ROLE -import org.opensearch.alerting.TEST_HR_INDEX -import org.opensearch.alerting.TEST_HR_ROLE -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.destination.Chime -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.Slack -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.RestClient -import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.core.rest.RestStatus -import org.opensearch.test.junit.annotations.TestLogging -import java.time.Instant - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class SecureDestinationRestApiIT : AlertingRestTestCase() { - - companion object { - - @BeforeClass - @JvmStatic - fun setup() { - // things to execute once and keep around for the class - org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) - } - } - - val user = "userA" - var userClient: RestClient? = null - - @Before - fun create() { - if (userClient == null) { - createUser(user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() - } - } - - @After - fun cleanup() { - userClient?.close() - deleteUser(user) - } - - fun `test create destination with disable filter by`() { - disableFilterBy() - - val chime = Chime("http://abc.com") - val destination = Destination( - type = DestinationType.CHIME, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = chime, - slack = null, - customWebhook = null, - email = null - ) - val createdDestination = createDestination(destination = destination) - assertEquals("Incorrect destination name", createdDestination.name, "test") - assertEquals("Incorrect destination type", createdDestination.type, DestinationType.CHIME) - } - - fun `test get destinations with a destination type and disable filter by`() { - disableFilterBy() - val slack = Slack("url") - val destination = Destination( - type = DestinationType.SLACK, - name = "testSlack", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = slack, - customWebhook = null, - email = null - ) - - // 1. create a destination as admin user - createDestination(destination, true) - - val inputMap = HashMap() - inputMap["missing"] = "_last" - inputMap["destinationType"] = "slack" - - // 2. get destinations as admin user - val adminResponse = getDestinations(client(), inputMap) - assertEquals(1, adminResponse.size) - } - - fun `test get destinations with a destination type and filter by`() { - enableFilterBy() - - val slack = Slack("url") - val destination = Destination( - type = DestinationType.SLACK, - name = "testSlack", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = slack, - customWebhook = null, - email = null - ) - - // 1. create a destination as admin user - createDestination(destination, true) - - val inputMap = HashMap() - inputMap["missing"] = "_last" - inputMap["destinationType"] = "slack" - - // 2. get destinations as admin user - val adminResponse = getDestinations(client(), inputMap) - assertEquals(1, adminResponse.size) - } - - // Destination related tests - - fun `test get destination with an user with get destination role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_DESTINATION_ACCESS) - ) - - createDestination(getTestDestination()) - - try { - val getDestinationResponse = userClient?.makeRequest( - "GET", - AlertingPlugin.DESTINATION_BASE_URI, - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Index Email Group failed", RestStatus.OK, getDestinationResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt deleted file mode 100644 index 00b5475c5..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailAccountRestApiIT.kt +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.entity.StringEntity -import org.apache.http.message.BasicHeader -import org.junit.After -import org.junit.Before -import org.junit.BeforeClass -import org.opensearch.alerting.ALERTING_GET_EMAIL_ACCOUNT_ACCESS -import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE -import org.opensearch.alerting.ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.TEST_HR_BACKEND_ROLE -import org.opensearch.alerting.TEST_HR_INDEX -import org.opensearch.alerting.TEST_HR_ROLE -import org.opensearch.alerting.makeRequest -import org.opensearch.client.ResponseException -import org.opensearch.client.RestClient -import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.core.rest.RestStatus - -val SEARCH_EMAIL_ACCOUNT_DSL = """ - { - "from": 0, - "size": 20, - "sort": { "email_group.name.keyword": "desc" }, - "query": { - "bool": { - "must": { - "match_all": {} - } - } - } - } -""".trimIndent() - -class SecureEmailAccountRestApiIT : AlertingRestTestCase() { - - companion object { - - @BeforeClass - @JvmStatic - fun setup() { - // things to execute once and keep around for the class - org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) - } - } - - val user = "userB" - var userClient: RestClient? = null - - @Before - fun create() { - if (userClient == null) { - createUser(user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() - } - } - - @After - fun cleanup() { - userClient?.close() - deleteUser(user) - } - - // Email account related tests. - - fun `test get email accounts with an user with get email account role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_ACCOUNT_ACCESS) - ) - - val emailAccount = createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) - - try { - val emailAccountResponse = userClient?.makeRequest( - "GET", - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/${emailAccount.id}", - StringEntity( - emailAccount.toJsonString(), - ContentType.APPLICATION_JSON - ), - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - - assertEquals("Get Email failed", RestStatus.OK, emailAccountResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test search email accounts with an user with search email account role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_ACCOUNT_ACCESS) - ) - - createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(10)) - - try { - val searchEmailAccountResponse = userClient?.makeRequest( - "POST", - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", - StringEntity(SEARCH_EMAIL_ACCOUNT_DSL, ContentType.APPLICATION_JSON), - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Search Email failed", RestStatus.OK, searchEmailAccountResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test get email accounts with an user without get email account role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - val emailAccount = createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) - try { - userClient?.makeRequest( - "GET", - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/${emailAccount.id}", - StringEntity( - emailAccount.toJsonString(), - ContentType.APPLICATION_JSON - ), - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - fun `test search email accounts with an user without search email account role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - createRandomEmailAccountWithGivenName(true, randomAlphaOfLength(5)) - try { - userClient?.makeRequest( - "POST", - "${AlertingPlugin.EMAIL_ACCOUNT_BASE_URI}/_search", - StringEntity(SEARCH_EMAIL_ACCOUNT_DSL, ContentType.APPLICATION_JSON), - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt deleted file mode 100644 index 617f652ee..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureEmailGroupsRestApiIT.kt +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.entity.StringEntity -import org.apache.http.message.BasicHeader -import org.junit.After -import org.junit.Before -import org.junit.BeforeClass -import org.opensearch.alerting.ALERTING_GET_EMAIL_GROUP_ACCESS -import org.opensearch.alerting.ALERTING_SEARCH_EMAIL_GROUP_ACCESS -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.TEST_HR_BACKEND_ROLE -import org.opensearch.alerting.TEST_HR_INDEX -import org.opensearch.alerting.TEST_HR_ROLE -import org.opensearch.alerting.makeRequest -import org.opensearch.client.RestClient -import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.core.rest.RestStatus -import org.opensearch.test.junit.annotations.TestLogging - -val SEARCH_EMAIL_GROUP_DSL = """ - { - "from": 0, - "size": 20, - "sort": { "email_group.name.keyword": "desc" }, - "query": { - "bool": { - "must": { - "match_all": {} - } - } - } - } -""".trimIndent() - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class SecureEmailGroupsRestApiIT : AlertingRestTestCase() { - companion object { - - @BeforeClass - @JvmStatic - fun setup() { - // things to execute once and keep around for the class - org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) - } - } - - val user = "userC" - var userClient: RestClient? = null - - @Before - fun create() { - if (userClient == null) { - createUser(user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() - } - } - - @After - fun cleanup() { - userClient?.close() - deleteUser(user) - } - - // Email groups related tests. - - fun `test get email groups with an user with get email group role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_EMAIL_GROUP_ACCESS) - ) - - val emailGroup = createRandomEmailGroupWithGivenName(true, randomAlphaOfLength(5)) - - try { - val getEmailGroupResponse = userClient?.makeRequest( - "GET", - "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/${emailGroup.id}", - StringEntity( - emailGroup.toJsonString(), - ContentType.APPLICATION_JSON - ), - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get Email Group failed", RestStatus.OK, getEmailGroupResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test search email groups with an user with search email group role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_SEARCH_EMAIL_GROUP_ACCESS) - ) - - createRandomEmailGroupWithGivenName(true, randomAlphaOfLength(10)) - - try { - val searchEmailGroupsResponse = userClient?.makeRequest( - "POST", - "${AlertingPlugin.EMAIL_GROUP_BASE_URI}/_search", - StringEntity( - SEARCH_EMAIL_GROUP_DSL, - ContentType.APPLICATION_JSON - ), - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Search Email Group failed", RestStatus.OK, searchEmailGroupsResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt deleted file mode 100644 index 86207bab8..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureMonitorRestApiIT.kt +++ /dev/null @@ -1,1577 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.entity.StringEntity -import org.apache.http.message.BasicHeader -import org.apache.http.nio.entity.NStringEntity -import org.junit.After -import org.junit.Before -import org.junit.BeforeClass -import org.opensearch.alerting.ADMIN -import org.opensearch.alerting.ALERTING_BASE_URI -import org.opensearch.alerting.ALERTING_DELETE_MONITOR_ACCESS -import org.opensearch.alerting.ALERTING_EXECUTE_MONITOR_ACCESS -import org.opensearch.alerting.ALERTING_FULL_ACCESS_ROLE -import org.opensearch.alerting.ALERTING_GET_ALERTS_ACCESS -import org.opensearch.alerting.ALERTING_GET_MONITOR_ACCESS -import org.opensearch.alerting.ALERTING_INDEX_MONITOR_ACCESS -import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE -import org.opensearch.alerting.ALERTING_READ_ONLY_ACCESS -import org.opensearch.alerting.ALERTING_SEARCH_MONITOR_ONLY_ACCESS -import org.opensearch.alerting.ALL_ACCESS_ROLE -import org.opensearch.alerting.ALWAYS_RUN -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.DRYRUN_MONITOR -import org.opensearch.alerting.READALL_AND_MONITOR_ROLE -import org.opensearch.alerting.TERM_DLS_QUERY -import org.opensearch.alerting.TEST_HR_BACKEND_ROLE -import org.opensearch.alerting.TEST_HR_INDEX -import org.opensearch.alerting.TEST_HR_ROLE -import org.opensearch.alerting.TEST_NON_HR_INDEX -import org.opensearch.alerting.assertUserNull -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.randomAction -import org.opensearch.alerting.randomAlert -import org.opensearch.alerting.randomBucketLevelMonitor -import org.opensearch.alerting.randomBucketLevelTrigger -import org.opensearch.alerting.randomDocumentLevelMonitor -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.alerting.randomQueryLevelTrigger -import org.opensearch.alerting.randomTemplateScript -import org.opensearch.client.Response -import org.opensearch.client.ResponseException -import org.opensearch.client.RestClient -import org.opensearch.common.settings.Settings -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.common.xcontent.json.JsonXContent -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.authuser.User -import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder -import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.junit.annotations.TestLogging - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class SecureMonitorRestApiIT : AlertingRestTestCase() { - - companion object { - - @BeforeClass - @JvmStatic - fun setup() { - // things to execute once and keep around for the class - org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) - } - } - - val user = "userD" - var userClient: RestClient? = null - - @Before - fun create() { - if (userClient == null) { - createUser(user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() - } - } - - @After - fun cleanup() { - userClient?.close() - deleteUser(user) - } - - // Create Monitor related security tests - fun `test create monitor with an user with alerting role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) - ) - try { - // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. - val monitor = randomQueryLevelMonitor().copy( - inputs = listOf( - SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) - - assertUserNull(createResponse?.asMap()!!["monitor"] as HashMap) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test create monitor with an user without alerting role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - try { - val monitor = randomQueryLevelMonitor().copy( - inputs = listOf( - SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test create monitor with an user with read-only role`() { - createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) - createUserRolesMapping(ALERTING_READ_ONLY_ACCESS, arrayOf(user)) - - try { - val monitor = randomQueryLevelMonitor().copy( - inputs = listOf( - SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteRoleMapping(ALERTING_READ_ONLY_ACCESS) - } - } - - fun `test query monitors with an user with only search monitor cluster permission`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_SEARCH_MONITOR_ONLY_ACCESS) - ) - val monitor = createRandomMonitor(true) - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - val searchResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - - assertEquals("Search monitor failed", RestStatus.OK, searchResponse.restStatus()) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Monitor not found during search", 1, numberDocsFound) - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test query monitors with an user without search monitor cluster permission`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - try { - val monitor = randomQueryLevelMonitor().copy( - inputs = listOf( - SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test create monitor with an user without index read role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) - ) - try { - val monitor = randomQueryLevelMonitor().copy( - inputs = listOf( - SearchInput( - indices = listOf(TEST_NON_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test create monitor with disable filter by`() { - disableFilterBy() - val monitor = randomQueryLevelMonitor() - val createResponse = client().makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse.restStatus()) - assertUserNull(createResponse.asMap()["monitor"] as HashMap) - } - - fun `test get monitor with an user with get monitor role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) - ) - - val monitor = createRandomMonitor(true) - - try { - val getMonitorResponse = userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${monitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test get monitor with an user without get monitor role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - - val monitor = createRandomMonitor(true) - - try { - userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${monitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun getDocs(response: Response?): Any? { - val hits = createParser( - XContentType.JSON.xContent(), - response?.entity?.content - ).map()["hits"]!! as Map> - return hits["total"]?.get("value") - } - - // Query Monitors related security tests - fun `test update monitor with disable filter by`() { - disableFilterBy() - val monitor = randomQueryLevelMonitor(enabled = true) - - val createdMonitor = createMonitor(monitor = monitor) - - assertNotNull("The monitor was not created", createdMonitor) - assertTrue("The monitor was not enabled", createdMonitor.enabled) - - val monitorV2 = createdMonitor.copy(enabled = false, enabledTime = null) - val updatedMonitor = updateMonitor(monitor = monitorV2) - - assertFalse("The monitor was not disabled", updatedMonitor.enabled) - } - - fun `test update monitor with enable filter by`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - val createdMonitor = createMonitor(monitor = monitor) - - assertNotNull("The monitor was not created", createdMonitor) - assertTrue("The monitor was not enabled", createdMonitor.enabled) - - val monitorV2 = createdMonitor.copy(enabled = false, enabledTime = null) - val updatedMonitor = updateMonitor(monitor = monitorV2) - - assertFalse("The monitor was not disabled", updatedMonitor.enabled) - } - - fun `test create monitor with enable filter by with a user have access and without role has no access`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) - assertNotNull("The monitor was not created", createdMonitor) - - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getMonitorResponse = getUserClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) - - // Remove backend role and ensure no access is granted after - patchUserBackendRoles(getUser, arrayOf("role1")) - try { - getUserClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteUser(getUser) - getUserClient?.close() - } - } - - fun `test create monitor with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - try { - createMonitorWithClient(userClient!!, monitor = monitor, listOf()) - fail("Expected exception since a non-admin user is trying to create a monitor with no backend roles") - } catch (e: ResponseException) { - assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test create monitor as admin with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf()) - assertNotNull("The monitor was not created", createdMonitor) - - try { - userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test create monitor with enable filter by with roles user has no access and throw exception`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - try { - createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) - fail("Expected create monitor to fail as user does not have role1 backend role") - } catch (e: ResponseException) { - assertEquals("Create monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test create monitor as admin with enable filter by with a user have access and without role has no access`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) - assertNotNull("The monitor was not created", createdMonitor) - - // user should have access to the admin monitor - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) - ) - - val getMonitorResponse = userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) - - // Remove good backend role and ensure no access is granted after - patchUserBackendRoles(user, arrayOf("role5")) - try { - userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test update monitor with enable filter by with removing a permission`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) - assertNotNull("The monitor was not created", createdMonitor) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getMonitorResponse = getUserClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) - - // Remove backend role from monitor - val updatedMonitor = updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE)) - - // getUser should no longer have access - try { - getUserClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${updatedMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteUser(getUser) - getUserClient?.close() - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update monitor with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf("role2")) - assertNotNull("The monitor was not created", createdMonitor) - - try { - updateMonitorWithClient(userClient!!, createdMonitor, listOf()) - } catch (e: ResponseException) { - assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update monitor as admin with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) - assertNotNull("The monitor was not created", createdMonitor) - - val getMonitorResponse = userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) - - val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf()) - - try { - userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${updatedMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update monitor with enable filter by with updating with a permission user has no access to and throw exception`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) - assertNotNull("The monitor was not created", createdMonitor) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getMonitorResponse = getUserClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) - - try { - updateMonitorWithClient(userClient!!, createdMonitor, listOf(TEST_HR_BACKEND_ROLE, "role1")) - fail("Expected update monitor to fail as user doesn't have access to role1") - } catch (e: ResponseException) { - assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteUser(getUser) - getUserClient?.close() - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update monitor as another user with enable filter by with removing a permission and adding permission`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) - assertNotNull("The monitor was not created", createdMonitor) - - // Remove backend role from monitor with new user and add role5 - val updateUser = "updateUser" - createUserWithRoles( - updateUser, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role5"), - false - ) - - val updateUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), updateUser, password) - .setSocketTimeout(60000).build() - val updatedMonitor = updateMonitorWithClient(updateUserClient, createdMonitor, listOf("role5")) - - // old user should no longer have access - try { - userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${updatedMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteUser(updateUser) - updateUserClient?.close() - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update monitor as admin with enable filter by with removing a permission`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) - assertNotNull("The monitor was not created", createdMonitor) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role1", "role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_MONITOR_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getMonitorResponse = getUserClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${createdMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get monitor failed", RestStatus.OK, getMonitorResponse?.restStatus()) - - // Remove backend role from monitor - val updatedMonitor = updateMonitorWithClient(client(), createdMonitor, listOf("role4")) - - // original user should no longer have access - try { - userClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${updatedMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - - // get user should no longer have access - try { - getUserClient?.makeRequest( - "GET", - "$ALERTING_BASE_URI/${updatedMonitor.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteUser(getUser) - getUserClient?.close() - } - } - - fun `test delete monitor with disable filter by`() { - disableFilterBy() - val monitor = randomQueryLevelMonitor(enabled = true) - - val createdMonitor = createMonitor(monitor = monitor) - - assertNotNull("The monitor was not created", createdMonitor) - assertTrue("The monitor was not enabled", createdMonitor.enabled) - - deleteMonitor(monitor = createdMonitor) - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() - // search as "admin" - must get 0 docs - val adminSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) - - val adminHits = createParser( - XContentType.JSON.xContent(), - adminSearchResponse.entity.content - ).map()["hits"]!! as Map> - val adminDocsFound = adminHits["total"]?.get("value") - assertEquals("Monitor found during search", 0, adminDocsFound) - } - - fun `test delete monitor with enable filter by`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - val createdMonitor = createMonitor(monitor = monitor) - - assertNotNull("The monitor was not created", createdMonitor) - assertTrue("The monitor was not enabled", createdMonitor.enabled) - - deleteMonitor(monitor = createdMonitor) - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() - // search as "admin" - must get 0 docs - val adminSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) - - val adminHits = createParser( - XContentType.JSON.xContent(), - adminSearchResponse.entity.content - ).map()["hits"]!! as Map> - val adminDocsFound = adminHits["total"]?.get("value") - assertEquals("Monitor found during search", 0, adminDocsFound) - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test query monitors with disable filter by`() { - disableFilterBy() - - // creates monitor as "admin" user. - val monitor = createRandomMonitor(true) - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - - // search as "admin" - must get 1 docs - val adminSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) - assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) - - // search as userOne without alerting roles - must return 403 Forbidden - try { - userClient?.makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - fail("Expected 403 FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - // add alerting roles and search as userOne - must return 1 docs - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_SEARCH_MONITOR_ONLY_ACCESS) - ) - try { - val userOneSearchResponse = userClient?.makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, userOneSearchResponse?.restStatus()) - assertEquals("Monitor not found during search", 1, getDocs(userOneSearchResponse)) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test query monitors with enable filter by`() { - enableFilterBy() - - // creates monitor as "admin" user. - val monitor = createRandomMonitor(true) - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - - // search as "admin" - must get 1 docs - val adminSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) - assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) - - // search as userOne without alerting roles - must return 403 Forbidden - try { - userClient?.makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - fail("Expected 403 FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - - // add alerting roles and search as userOne - must return 0 docs - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) - try { - val userOneSearchResponse = userClient?.makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, userOneSearchResponse?.restStatus()) - assertEquals("Monitor not found during search", 0, getDocs(userOneSearchResponse)) - } finally { - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - } - } - - fun `test execute monitor with an user with execute monitor access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_EXECUTE_MONITOR_ACCESS) - ) - - val monitor = createRandomMonitor(true) - - try { - val executeMonitorResponse = userClient?.makeRequest( - "POST", - "$ALERTING_BASE_URI/${monitor.id}/_execute", - mutableMapOf() - ) - assertEquals("Get monitor failed", RestStatus.OK, executeMonitorResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test execute monitor with an user without execute monitor access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - - val monitor = createRandomMonitor(true) - - try { - userClient?.makeRequest( - "POST", - "$ALERTING_BASE_URI/${monitor.id}/_execute", - mutableMapOf() - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test delete monitor with an user with delete monitor access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_DELETE_MONITOR_ACCESS) - ) - - val monitor = createRandomMonitor(true) - val refresh = true - - try { - val deleteMonitorResponse = userClient?.makeRequest( - "DELETE", - "$ALERTING_BASE_URI/${monitor.id}?refresh=$refresh", - emptyMap(), - monitor.toHttpEntity() - ) - assertEquals("Get monitor failed", RestStatus.OK, deleteMonitorResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test delete monitor with an user without delete monitor access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - - val monitor = createRandomMonitor(true) - val refresh = true - - try { - userClient?.makeRequest( - "DELETE", - "$ALERTING_BASE_URI/${monitor.id}?refresh=$refresh", - emptyMap(), - monitor.toHttpEntity() - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test query all alerts in all states with disabled filter by`() { - disableFilterBy() - putAlertMappings() - val monitor = createRandomMonitor(refresh = true) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - randomAlert(monitor).copy(id = "foobar") - - val inputMap = HashMap() - inputMap["missing"] = "_last" - - // search as "admin" - must get 4 docs - val adminResponseMap = getAlerts(client(), inputMap).asMap() - assertEquals(4, adminResponseMap["totalAlerts"]) - - // search as userOne without alerting roles - must return 403 Forbidden - try { - getAlerts(userClient as RestClient, inputMap).asMap() - fail("Expected 403 FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - - // add alerting roles and search as userOne - must return 0 docs - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) - try { - val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() - assertEquals(4, responseMap["totalAlerts"]) - } finally { - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - } - } - - fun `test query all alerts in all states with filter by`() { - enableFilterBy() - putAlertMappings() - val adminUser = User(ADMIN, listOf(ADMIN), listOf(ALL_ACCESS_ROLE), listOf()) - var monitor = createRandomMonitor(refresh = true).copy(user = adminUser) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - randomAlert(monitor).copy(id = "foobar") - - val inputMap = HashMap() - inputMap["missing"] = "_last" - - // search as "admin" - must get 4 docs - val adminResponseMap = getAlerts(client(), inputMap).asMap() - assertEquals(4, adminResponseMap["totalAlerts"]) - - // search as userOne without alerting roles - must return 403 Forbidden - try { - getAlerts(userClient as RestClient, inputMap).asMap() - fail("Expected 403 FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } - // add alerting roles and search as userOne - must return 0 docs - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) - try { - val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() - assertEquals(0, responseMap["totalAlerts"]) - } finally { - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - } - } - - fun `test query all alerts in all states with filter by1`() { - enableFilterBy() - putAlertMappings() - val adminUser = User(ADMIN, listOf(ADMIN), listOf(ALL_ACCESS_ROLE), listOf()) - var monitor = createRandomMonitor(refresh = true).copy(user = adminUser) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - randomAlert(monitor).copy(id = "foobar") - - val inputMap = HashMap() - inputMap["missing"] = "_last" - inputMap["monitorId"] = monitor.id - - // search as "admin" - must get 4 docs - val adminResponseMap = getAlerts(client(), inputMap).asMap() - assertEquals(4, adminResponseMap["totalAlerts"]) - - // search as userOne without alerting roles - must return 403 Forbidden - try { - getAlerts(userClient as RestClient, inputMap).asMap() - fail("Expected 403 FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } -// createUserWithTestDataAndCustomRole( -// user, -// TEST_HR_INDEX, -// TEST_HR_ROLE, -// listOf(ADMIN), -// getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) -// ) - createUserWithRoles(user, listOf(ALERTING_FULL_ACCESS_ROLE), listOf(ADMIN), false) - // add alerting roles and search as userOne - must return 0 docs -// createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) - try { - val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() - assertEquals(4, responseMap["totalAlerts"]) - } finally { - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - } - } - - fun `test get alerts with an user with get alerts role`() { - putAlertMappings() - val ackAlertsUser = User(ADMIN, listOf(ADMIN), listOf(ALERTING_GET_ALERTS_ACCESS), listOf()) - var monitor = createRandomMonitor(refresh = true).copy(user = ackAlertsUser) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACKNOWLEDGED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.COMPLETED)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ERROR)) - createAlert(randomAlert(monitor).copy(state = Alert.State.ACTIVE)) - randomAlert(monitor).copy(id = "foobar") - - val inputMap = HashMap() - inputMap["missing"] = "_last" - - // search as "admin" - must get 4 docs - val adminResponseMap = getAlerts(client(), inputMap).asMap() - assertEquals(4, adminResponseMap["totalAlerts"]) - - // add alerting roles and search as userOne - must return 1 docs - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_ALERTS_ACCESS) - ) - try { - val responseMap = getAlerts(userClient as RestClient, inputMap).asMap() - assertEquals(4, responseMap["totalAlerts"]) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - // Execute Monitor related security tests - - fun `test execute monitor with elevate permissions`() { - val action = randomAction(template = randomTemplateScript("Hello {{ctx.monitor.name}}"), destinationId = createDestination().id) - val inputs = listOf( - SearchInput( - indices = kotlin.collections.listOf(TEST_NON_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - val monitor = randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = ALWAYS_RUN, actions = listOf(action))), - inputs = inputs - ) - - // Make sure the elevating the permissions fails execute. - val adminUser = User(ADMIN, listOf(ADMIN), listOf(ALL_ACCESS_ROLE), listOf()) - var modifiedMonitor = monitor.copy(user = adminUser) - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) - - try { - val response = executeMonitor(userClient as RestClient, modifiedMonitor, params = DRYRUN_MONITOR) - val output = entityAsMap(response) - val inputResults = output.stringMap("input_results") - assertTrue("Missing monitor error message", (inputResults?.get("error") as String).isNotEmpty()) - assertTrue((inputResults.get("error") as String).contains("no permissions for [indices:data/read/search]")) - } finally { - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - } - } - - fun `test admin all access with enable filter by`() { - enableFilterBy() - createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) - try { - // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. - val monitor = randomQueryLevelMonitor().copy( - inputs = listOf( - SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - - val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) - val monitorJson = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - createResponse?.entity?.content - ).map() - - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitorJson["_id"])).toString() - - // search as "admin" - must get 1 docs - val adminSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) - assertEquals("Monitor not found during search", 1, getDocs(adminSearchResponse)) - - // get as "admin" - must get 1 docs - val id: String = monitorJson["_id"] as String - val adminGetResponse = client().makeRequest( - "GET", - "$ALERTING_BASE_URI/$id", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Get monitor failed", RestStatus.OK, adminGetResponse.restStatus()) - - // delete as "admin" - val adminDeleteResponse = client().makeRequest( - "DELETE", - "$ALERTING_BASE_URI/$id", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Delete monitor failed", RestStatus.OK, adminDeleteResponse.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - } - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test execute query-level monitor with user having partial index permissions`() { - createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) - createTestIndex(TEST_HR_INDEX) - createIndexRoleWithDocLevelSecurity( - TEST_HR_ROLE, - TEST_HR_INDEX, - TERM_DLS_QUERY, - getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) - ) - createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) - - // Add a doc that is accessible to the user - indexDoc( - TEST_HR_INDEX, - "1", - """ - { - "test_field": "a", - "accessible": true - } - """.trimIndent() - ) - - // Add a second doc that is not accessible to the user - indexDoc( - TEST_HR_INDEX, - "2", - """ - { - "test_field": "b", - "accessible": false - } - """.trimIndent() - ) - - val input = SearchInput(indices = listOf(TEST_HR_INDEX), query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery())) - val triggerScript = """ - // make sure there is exactly one hit - return ctx.results[0].hits.hits.size() == 1 - """.trimIndent() - - val trigger = randomQueryLevelTrigger(condition = Script(triggerScript)).copy(actions = listOf()) - val monitor = createMonitorWithClient( - userClient!!, - randomQueryLevelMonitor(inputs = listOf(input), triggers = listOf(trigger)) - ) - - try { - executeMonitor(monitor.id) - val alerts = searchAlerts(monitor) - assertEquals("Incorrect number of alerts", 1, alerts.size) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test execute bucket-level monitor with user having partial index permissions`() { - createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) - createTestIndex(TEST_HR_INDEX) - createIndexRoleWithDocLevelSecurity( - TEST_HR_ROLE, - TEST_HR_INDEX, - TERM_DLS_QUERY, - getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) - ) - createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) - - // Add a doc that is accessible to the user - indexDoc( - TEST_HR_INDEX, - "1", - """ - { - "test_field": "a", - "accessible": true - } - """.trimIndent() - ) - - // Add a second doc that is not accessible to the user - indexDoc( - TEST_HR_INDEX, - "2", - """ - { - "test_field": "b", - "accessible": false - } - """.trimIndent() - ) - - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) - ) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ), - actions = listOf() - ) - val monitor = createMonitorWithClient( - userClient!!, - randomBucketLevelMonitor(inputs = listOf(input), enabled = false, triggers = listOf(trigger)) - ) - - try { - executeMonitor(monitor.id) - val alerts = searchAlerts(monitor) - assertEquals("Incorrect number of alerts", 1, alerts.size) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - /** - * We want to verify that user roles/permissions do not affect clean up of monitors during partial monitor creation failure - */ - fun `test create monitor failure clean up with a user without delete monitor access`() { - enableFilterBy() - createUser(user, listOf(TEST_HR_BACKEND_ROLE, "role2").toTypedArray()) - createTestIndex(TEST_HR_INDEX) - createCustomIndexRole( - ALERTING_INDEX_MONITOR_ACCESS, - TEST_HR_INDEX, - getClusterPermissionsFromCustomRole(ALERTING_INDEX_MONITOR_ACCESS) - ) - createUserWithRoles( - user, - listOf(ALERTING_INDEX_MONITOR_ACCESS, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - val docLevelQueryIndex = ".opensearch-alerting-queries-000001" - createIndex( - docLevelQueryIndex, Settings.EMPTY, - """ - "properties" : { - "query": { - "type": "percolator_ext" - }, - "monitor_id": { - "type": "text" - }, - "index": { - "type": "text" - } - } - } - """.trimIndent(), - ".opensearch-alerting-queries" - ) - closeIndex(docLevelQueryIndex) // close index to simulate doc level query indexing failure - try { - val monitor = randomDocumentLevelMonitor( - withMetadata = false, - triggers = listOf(), - inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), emptyList())) - ) - userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - fail("Monitor creation should have failed due to error in indexing doc level queries") - } catch (e: ResponseException) { - val search = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(10).toString() - val searchResponse = client().makeRequest( - "GET", "$ALERTING_BASE_URI/_search", - emptyMap(), - StringEntity(search, ContentType.APPLICATION_JSON) - ) - val xcp = createParser(XContentType.JSON.xContent(), searchResponse.entity.content) - val hits = xcp.map()["hits"]!! as Map> - val numberDocsFound = hits["total"]?.get("value") - assertEquals("Monitors found. Clean up unsuccessful", 0, numberDocsFound) - } finally { - deleteRoleAndRoleMapping(ALERTING_INDEX_MONITOR_ACCESS) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt deleted file mode 100644 index 6d0112c52..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/SecureWorkflowRestApiIT.kt +++ /dev/null @@ -1,1421 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.apache.http.HttpHeaders -import org.apache.http.entity.ContentType -import org.apache.http.message.BasicHeader -import org.apache.http.nio.entity.NStringEntity -import org.junit.After -import org.junit.Before -import org.junit.BeforeClass -import org.opensearch.alerting.ALERTING_BASE_URI -import org.opensearch.alerting.ALERTING_DELETE_WORKFLOW_ACCESS -import org.opensearch.alerting.ALERTING_EXECUTE_WORKFLOW_ACCESS -import org.opensearch.alerting.ALERTING_FULL_ACCESS_ROLE -import org.opensearch.alerting.ALERTING_GET_WORKFLOW_ACCESS -import org.opensearch.alerting.ALERTING_INDEX_MONITOR_ACCESS -import org.opensearch.alerting.ALERTING_INDEX_WORKFLOW_ACCESS -import org.opensearch.alerting.ALERTING_NO_ACCESS_ROLE -import org.opensearch.alerting.ALERTING_READ_ONLY_ACCESS -import org.opensearch.alerting.ALWAYS_RUN -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.READALL_AND_MONITOR_ROLE -import org.opensearch.alerting.TERM_DLS_QUERY -import org.opensearch.alerting.TEST_HR_BACKEND_ROLE -import org.opensearch.alerting.TEST_HR_INDEX -import org.opensearch.alerting.TEST_HR_ROLE -import org.opensearch.alerting.TEST_NON_HR_INDEX -import org.opensearch.alerting.WORKFLOW_ALERTING_BASE_URI -import org.opensearch.alerting.assertUserNull -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.randomBucketLevelMonitor -import org.opensearch.alerting.randomBucketLevelTrigger -import org.opensearch.alerting.randomDocLevelQuery -import org.opensearch.alerting.randomDocumentLevelMonitor -import org.opensearch.alerting.randomDocumentLevelTrigger -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.alerting.randomWorkflow -import org.opensearch.client.Response -import org.opensearch.client.ResponseException -import org.opensearch.client.RestClient -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentType -import org.opensearch.common.xcontent.json.JsonXContent -import org.opensearch.commons.alerting.aggregation.bucketselectorext.BucketSelectorExtAggregationBuilder -import org.opensearch.commons.alerting.model.DataSources -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.rest.SecureRestClientBuilder -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder -import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.junit.annotations.TestLogging -import java.time.Instant - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class SecureWorkflowRestApiIT : AlertingRestTestCase() { - - companion object { - - @BeforeClass - @JvmStatic - fun setup() { - // things to execute once and keep around for the class - org.junit.Assume.assumeTrue(System.getProperty("security", "false")!!.toBoolean()) - } - } - - val user = "userD" - var userClient: RestClient? = null - - @Before - fun create() { - if (userClient == null) { - createUser(user, arrayOf()) - userClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), user, password).setSocketTimeout(60000).build() - } - } - - @After - fun cleanup() { - userClient?.close() - deleteUser(user) - } - - // Create Workflow related security tests - fun `test create workflow with an user with alerting role`() { - val clusterPermissions = listOf( - getClusterPermissionsFromCustomRole(ALERTING_INDEX_WORKFLOW_ACCESS) - ) - - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - clusterPermissions - ) - try { - val monitor = createMonitor( - randomQueryLevelMonitor( - inputs = listOf(SearchInput(listOf(TEST_HR_INDEX), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))), - ), - true - ) - - val workflow = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - - val createResponse = userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - assertEquals("Create workflow failed", RestStatus.CREATED, createResponse?.restStatus()) - - assertUserNull(createResponse?.asMap()!!["workflow"] as HashMap) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test create workflow with an user without alerting role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - try { - val monitor = createRandomMonitor(true) - - val workflow = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - - userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test create workflow with an user with read-only role`() { - createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) - createUserRolesMapping(ALERTING_READ_ONLY_ACCESS, arrayOf(user)) - - try { - val monitor = createRandomMonitor(true) - val workflow = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteRoleMapping(ALERTING_READ_ONLY_ACCESS) - } - } - - fun `test create workflow with delegate with an user without index read role`() { - createTestIndex(TEST_NON_HR_INDEX) - val clusterPermissions = listOf( - getClusterPermissionsFromCustomRole(ALERTING_INDEX_WORKFLOW_ACCESS) - ) - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - clusterPermissions - ) - try { - val query = randomDocLevelQuery(tags = listOf()) - val triggers = listOf(randomDocumentLevelTrigger(condition = Script("query[id=\"${query.id}\"]"))) - - val monitor = createMonitor( - randomDocumentLevelMonitor( - inputs = listOf( - DocLevelMonitorInput( - indices = listOf(TEST_NON_HR_INDEX), - queries = listOf(query) - ) - ), - triggers = triggers - ), - true - ) - - val workflow = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - - userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteIndex(TEST_NON_HR_INDEX) - } - } - - fun `test create workflow with disable filter by`() { - disableFilterBy() - val monitor = createRandomMonitor(true) - val workflow = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) - assertUserNull(createResponse.asMap()["workflow"] as HashMap) - } - - fun `test get workflow with an user with get workflow role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) - ) - - val monitor = createRandomMonitor(true) - val workflow = createWorkflow(randomWorkflow(monitorIds = listOf(monitor.id))) - - try { - val getWorkflowResponse = userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - /* - TODO: https://github.com/opensearch-project/alerting/issues/300 - */ - fun `test get workflow with an user without get monitor role`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - - val monitor = createRandomMonitor(true) - val workflow = createWorkflow(randomWorkflow(monitorIds = listOf(monitor.id))) - - try { - userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun getDocs(response: Response?): Any? { - val hits = createParser( - XContentType.JSON.xContent(), - response?.entity?.content - ).map()["hits"]!! as Map> - return hits["total"]?.get("value") - } - - // Query Monitors related security tests - fun `test update workflow with disable filter by`() { - disableFilterBy() - - val createdMonitor = createMonitor(monitor = randomQueryLevelMonitor(enabled = true)) - val createdWorkflow = createWorkflow( - randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true, enabledTime = Instant.now()) - ) - - assertNotNull("The workflow was not created", createdWorkflow) - assertTrue("The workflow was not enabled", createdWorkflow.enabled) - - val workflowV2 = createdWorkflow.copy(enabled = false, enabledTime = null) - val updatedWorkflow = updateWorkflow(workflowV2) - - assertFalse("The monitor was not disabled", updatedWorkflow.enabled) - } - - fun `test update workflow with enable filter by`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - - val createdMonitor = createMonitorWithClient( - client = client(), - monitor = randomQueryLevelMonitor(enabled = true), - rbacRoles = listOf("admin") - ) - val createdWorkflow = createWorkflow( - randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true, enabledTime = Instant.now()) - ) - - assertNotNull("The workflow was not created", createdWorkflow) - assertTrue("The workflow was not enabled", createdWorkflow.enabled) - - val workflowV2 = createdWorkflow.copy(enabled = false, enabledTime = null) - val updatedWorkflow = updateWorkflow(workflow = workflowV2) - - assertFalse("The monitor was not disabled", updatedWorkflow.enabled) - } - - fun `test create workflow with enable filter by with a user have access and without role has no access`() { - enableFilterBy() - if (!isHttps()) { - return - } - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient( - userClient!!, - monitor = randomQueryLevelMonitor(enabled = true), - listOf(TEST_HR_BACKEND_ROLE, "role2") - ) - - assertNotNull("The monitor was not created", createdMonitor) - - val createdWorkflow = createWorkflowWithClient( - userClient!!, - workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true), - listOf(TEST_HR_BACKEND_ROLE, "role2") - ) - assertNotNull("The workflow was not created", createdWorkflow) - - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getWorkflowResponse = getUserClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) - - // Remove backend role and ensure no access is granted after - patchUserBackendRoles(getUser, arrayOf("role1")) - try { - getUserClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - deleteUser(getUser) - getUserClient?.close() - } - } - - fun `test create workflow with enable filter by with a user with a backend role doesn't have access to monitor`() { - enableFilterBy() - if (!isHttps()) { - return - } - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient( - userClient!!, - monitor = randomQueryLevelMonitor(enabled = true), - listOf("role2") - ) - - assertNotNull("The monitor was not created", createdMonitor) - - val userWithDifferentRole = "role3User" - - createUserWithRoles( - userWithDifferentRole, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role3"), - false - ) - - val userWithDifferentRoleClient = SecureRestClientBuilder( - clusterHosts.toTypedArray(), isHttps(), userWithDifferentRole, password - ) - .setSocketTimeout(60000).build() - - try { - createWorkflowWithClient( - userWithDifferentRoleClient!!, - workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true), - listOf("role3") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - deleteUser(userWithDifferentRole) - userWithDifferentRoleClient?.close() - } - } - - fun `test create workflow with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) - - val workflow = randomWorkflow(monitorIds = listOf(monitor.id)) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - try { - createWorkflowWithClient(userClient!!, workflow, listOf()) - fail("Expected exception since a non-admin user is trying to create a workflow with no backend roles") - } catch (e: ResponseException) { - assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test create workflow as admin with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitor(monitor = monitor) - val createdWorkflow = createWorkflow(randomWorkflow(monitorIds = listOf(createdMonitor.id))) - assertNotNull("The workflow was not created", createdWorkflow) - - try { - - userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test create workflow with enable filter by with roles user has no access and throw exception`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = createMonitor(randomQueryLevelMonitor(enabled = true)) - val workflow = randomWorkflow(monitorIds = listOf(monitor.id)) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - try { - createWorkflowWithClient(userClient!!, workflow = workflow, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) - fail("Expected create workflow to fail as user does not have role1 backend role") - } catch (e: ResponseException) { - assertEquals("Create workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test create workflow as admin with enable filter by with a user have access and without role has no access`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - val createdMonitor = createMonitorWithClient(client(), monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role1", "role2")) - val createdWorkflow = createWorkflowWithClient( - client(), - randomWorkflow(monitorIds = listOf(createdMonitor.id)), - listOf(TEST_HR_BACKEND_ROLE, "role1", "role2") - ) - assertNotNull("The workflow was not created", createdWorkflow) - - // user should have access to the admin monitor - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) - ) - - val getWorkflowResponse = userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) - - // Remove good backend role and ensure no access is granted after - patchUserBackendRoles(user, arrayOf("role5")) - try { - userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test update workflow with enable filter by with removing a permission`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, randomQueryLevelMonitor(), listOf(TEST_HR_BACKEND_ROLE, "role2")) - val createdWorkflow = createWorkflowWithClient( - client = userClient!!, workflow = randomWorkflow(enabled = true, monitorIds = listOf(createdMonitor.id)), - rbacRoles = listOf(TEST_HR_BACKEND_ROLE, "role2") - ) - assertNotNull("The workflow was not created", createdWorkflow) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getWorkflowResponse = getUserClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) - - // Remove backend role from monitor - val updatedWorkflow = updateWorkflowWithClient(userClient!!, createdWorkflow, listOf(TEST_HR_BACKEND_ROLE)) - - // getUser should no longer have access - try { - getUserClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteUser(getUser) - getUserClient?.close() - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update workflow with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf("role2")) - assertNotNull("The monitor was not created", createdMonitor) - - val createdWorkflow = createWorkflowWithClient( - userClient!!, - randomWorkflow(monitorIds = listOf(createdMonitor.id)), - listOf("role2") - ) - - assertNotNull("The workflow was not created", createdWorkflow) - - try { - updateWorkflowWithClient(userClient!!, createdWorkflow, listOf()) - } catch (e: ResponseException) { - assertEquals("Update monitor failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update workflow as admin with enable filter by with no backend roles`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - val createdMonitorResponse = createMonitor(monitor, true) - assertNotNull("The monitor was not created", createdMonitorResponse) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - val workflow = randomWorkflow( - monitorIds = listOf(createdMonitorResponse.id) - ) - - val createdWorkflow = createWorkflowWithClient( - client(), - workflow = workflow, - rbacRoles = listOf(TEST_HR_BACKEND_ROLE) - ) - - assertNotNull("The workflow was not created", createdWorkflow) - - val getWorkflowResponse = userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) - - val updatedWorkflow = updateWorkflowWithClient(client(), createdWorkflow, listOf()) - - try { - userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update workflow with enable filter by with updating with a permission user has no access to and throw exception`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) - assertNotNull("The monitor was not created", createdMonitor) - - val createdWorkflow = createWorkflowWithClient( - userClient!!, - workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id)), listOf(TEST_HR_BACKEND_ROLE, "role2") - ) - - assertNotNull("The workflow was not created", createdWorkflow) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getWorkflowResponse = getUserClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) - - try { - updateWorkflowWithClient(userClient!!, createdWorkflow, listOf(TEST_HR_BACKEND_ROLE, "role1")) - fail("Expected update workflow to fail as user doesn't have access to role1") - } catch (e: ResponseException) { - assertEquals("Update workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteUser(getUser) - getUserClient?.close() - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update workflow as another user with enable filter by with removing a permission and adding permission`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE)) - assertNotNull("The monitor was not created", createdMonitor) - - val createdWorkflow = createWorkflowWithClient( - userClient!!, - workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true) - ) - - assertNotNull("The workflow was not created", createdWorkflow) - - // Remove backend role from workflow with new user and add role5 - val updateUser = "updateUser" - createUserWithRoles( - updateUser, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role5"), - false - ) - - val updateUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), updateUser, password) - .setSocketTimeout(60000).build() - val updatedWorkflow = updateWorkflowWithClient(updateUserClient, createdWorkflow, listOf("role5")) - - // old user should no longer have access - try { - userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteUser(updateUser) - updateUserClient?.close() - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - } - - fun `test update workflow as admin with enable filter by with removing a permission`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val monitor = randomQueryLevelMonitor(enabled = true) - - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val createdMonitor = createMonitorWithClient(userClient!!, monitor = monitor, listOf(TEST_HR_BACKEND_ROLE, "role2")) - assertNotNull("The monitor was not created", createdMonitor) - - val createdWorkflow = createWorkflowWithClient( - userClient!!, - workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id)), - listOf(TEST_HR_BACKEND_ROLE, "role2") - ) - assertNotNull("The workflow was not created", createdWorkflow) - - // getUser should have access to the monitor - val getUser = "getUser" - createUserWithTestDataAndCustomRole( - getUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role1", "role2"), - getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) - ) - val getUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), getUser, password) - .setSocketTimeout(60000).build() - - val getWorkflowResponse = getUserClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - assertEquals("Get workflow failed", RestStatus.OK, getWorkflowResponse?.restStatus()) - - // Remove backend role from monitor - val updatedWorkflow = updateWorkflowWithClient(client(), createdWorkflow, listOf("role4")) - - // original user should no longer have access - try { - userClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf()) - createUserRolesMapping(READALL_AND_MONITOR_ROLE, arrayOf()) - } - - // get user should no longer have access - try { - getUserClient?.makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${updatedWorkflow.id}", - null, - BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json") - ) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteUser(getUser) - getUserClient?.close() - } - } - - fun `test delete workflow with disable filter by`() { - disableFilterBy() - val monitor = randomQueryLevelMonitor(enabled = true) - - val createdMonitor = createMonitor(monitor = monitor) - val createdWorkflow = createWorkflow(workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true)) - - assertNotNull("The workflow was not created", createdWorkflow) - assertTrue("The workflow was not enabled", createdWorkflow.enabled) - - deleteWorkflow(workflow = createdWorkflow, deleteDelegates = true) - - val searchMonitor = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() - // Verify if the delegate monitors are deleted - // search as "admin" - must get 0 docs - val adminMonitorSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(searchMonitor, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminMonitorSearchResponse.restStatus()) - - val adminMonitorHits = createParser( - XContentType.JSON.xContent(), - adminMonitorSearchResponse.entity.content - ).map()["hits"]!! as Map> - val adminMonitorDocsFound = adminMonitorHits["total"]?.get("value") - assertEquals("Monitor found during search", 0, adminMonitorDocsFound) - - // Verify workflow deletion - try { - client().makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - emptyMap(), - null - ) - fail("Workflow found during search") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.NOT_FOUND.status, e.response.statusLine.statusCode) - } - } - - fun `test delete workflow with enable filter by`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - val createdMonitor = createMonitorWithClient( - monitor = randomQueryLevelMonitor(), - client = client(), - rbacRoles = listOf("admin") - ) - - assertNotNull("The monitor was not created", createdMonitor) - - val createdWorkflow = createWorkflow(workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true)) - assertNotNull("The workflow was not created", createdWorkflow) - assertTrue("The workflow was not enabled", createdWorkflow.enabled) - - deleteWorkflow(workflow = createdWorkflow, true) - - // Verify underlying delegates deletion - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", createdMonitor.id)).toString() - // search as "admin" - must get 0 docs - val adminSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) - - val adminHits = createParser( - XContentType.JSON.xContent(), - adminSearchResponse.entity.content - ).map()["hits"]!! as Map> - val adminDocsFound = adminHits["total"]?.get("value") - assertEquals("Monitor found during search", 0, adminDocsFound) - - // Verify workflow deletion - try { - client().makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/${createdWorkflow.id}", - emptyMap(), - null - ) - fail("Workflow found during search") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.NOT_FOUND.status, e.response.statusLine.statusCode) - } - } - - fun `test delete workflow with enable filter with user that doesn't have delete_monitor cluster privilege failed`() { - enableFilterBy() - if (!isHttps()) { - // if security is disabled and filter by is enabled, we can't create monitor - // refer: `test create monitor with enable filter by` - return - } - createUserWithRoles( - user, - listOf(ALERTING_FULL_ACCESS_ROLE, READALL_AND_MONITOR_ROLE), - listOf(TEST_HR_BACKEND_ROLE, "role2"), - false - ) - - val deleteUser = "deleteUser" - createUserWithTestDataAndCustomRole( - deleteUser, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf("role1", "role3"), - listOf( - getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS), - getClusterPermissionsFromCustomRole(ALERTING_GET_WORKFLOW_ACCESS) - ) - ) - val deleteUserClient = SecureRestClientBuilder(clusterHosts.toTypedArray(), isHttps(), deleteUser, password) - .setSocketTimeout(60000).build() - - try { - val createdMonitor = createMonitorWithClient(userClient!!, monitor = randomQueryLevelMonitor()) - - assertNotNull("The monitor was not created", createdMonitor) - - val createdWorkflow = createWorkflowWithClient( - client = userClient!!, - workflow = randomWorkflow(monitorIds = listOf(createdMonitor.id), enabled = true) - ) - assertNotNull("The workflow was not created", createdWorkflow) - assertTrue("The workflow was not enabled", createdWorkflow.enabled) - - try { - deleteWorkflowWithClient(deleteUserClient, workflow = createdWorkflow, true) - fail("Expected Forbidden exception") - } catch (e: ResponseException) { - assertEquals("Get workflow failed", RestStatus.FORBIDDEN.status, e.response.statusLine.statusCode) - } - patchUserBackendRoles(deleteUser, arrayOf("role2")) - - val response = deleteWorkflowWithClient(deleteUserClient!!, workflow = createdWorkflow, true) - assertEquals("Delete workflow failed", RestStatus.OK, response?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - deleteUser(deleteUser) - deleteUserClient?.close() - } - } - - fun `test execute workflow with an user with execute workflow access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_EXECUTE_WORKFLOW_ACCESS) - ) - - val monitor = createRandomMonitor(true) - val workflow = createRandomWorkflow(listOf(monitor.id), true) - - try { - val executeWorkflowResponse = userClient?.makeRequest( - "POST", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}/_execute", - mutableMapOf() - ) - assertEquals("Executing workflow failed", RestStatus.OK, executeWorkflowResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test execute workflow with an user without execute workflow access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - - val monitor = createRandomMonitor(true) - val workflow = createRandomWorkflow(listOf(monitor.id), true) - - try { - userClient?.makeRequest( - "POST", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}/_execute", - mutableMapOf() - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("Execute workflow failed", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test delete workflow with an user with delete workflow access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS) - ) - - val monitor = createRandomMonitor(true) - val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) - val refresh = true - - try { - val deleteWorkflowResponse = userClient?.makeRequest( - "DELETE", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=$refresh", - emptyMap(), - monitor.toHttpEntity() - ) - assertEquals("DELETE workflow failed", RestStatus.OK, deleteWorkflowResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test delete workflow with deleting delegates with an user with delete workflow access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_DELETE_WORKFLOW_ACCESS) - ) - - val monitor = createRandomMonitor(true) - val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) - - try { - val deleteWorkflowResponse = deleteWorkflowWithClient( - userClient!!, - workflow, - deleteDelegates = true, - refresh = true - ) - assertEquals("DELETE workflow failed", RestStatus.OK, deleteWorkflowResponse?.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - // Verify delegate deletion - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", monitor.id)).toString() - // search as "admin" - must get 0 docs - val adminSearchResponse = client().makeRequest( - "POST", - "$ALERTING_BASE_URI/_search", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Search monitor failed", RestStatus.OK, adminSearchResponse.restStatus()) - - val adminHits = createParser( - XContentType.JSON.xContent(), - adminSearchResponse.entity.content - ).map()["hits"]!! as Map> - val adminDocsFound = adminHits["total"]?.get("value") - assertEquals("Monitor found during search", 0, adminDocsFound) - } - - fun `test delete workflow with an user without delete monitor access`() { - createUserWithTestDataAndCustomRole( - user, - TEST_HR_INDEX, - TEST_HR_ROLE, - listOf(TEST_HR_BACKEND_ROLE), - getClusterPermissionsFromCustomRole(ALERTING_NO_ACCESS_ROLE) - ) - - val monitor = createRandomMonitor(true) - val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) - - try { - userClient?.makeRequest( - "DELETE", - "$WORKFLOW_ALERTING_BASE_URI/${workflow.id}?refresh=true", - emptyMap(), - monitor.toHttpEntity() - ) - fail("Expected 403 Method FORBIDDEN response") - } catch (e: ResponseException) { - assertEquals("DELETE workflow failed", RestStatus.FORBIDDEN, e.response.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } - - fun `test admin all access with enable filter by`() { - enableFilterBy() - createUserWithTestData(user, TEST_HR_INDEX, TEST_HR_ROLE, TEST_HR_BACKEND_ROLE) - createUserRolesMapping(ALERTING_FULL_ACCESS_ROLE, arrayOf(user)) - try { - // randomMonitor has a dummy user, api ignores the User passed as part of monitor, it picks user info from the logged-in user. - val monitor = randomQueryLevelMonitor().copy( - inputs = listOf( - SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - - val createResponse = userClient?.makeRequest("POST", ALERTING_BASE_URI, emptyMap(), monitor.toHttpEntity()) - assertEquals("Create monitor failed", RestStatus.CREATED, createResponse?.restStatus()) - val monitorJson = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - createResponse?.entity?.content - ).map() - val monitorId = monitorJson["_id"] as String - - val workflow = randomWorkflow(monitorIds = listOf(monitorId)) - val createWorkflowResponse = userClient?.makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - assertEquals("Create workflow failed", RestStatus.CREATED, createWorkflowResponse?.restStatus()) - - val workflowJson = JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - createWorkflowResponse?.entity?.content - ).map() - - val id: String = workflowJson["_id"] as String - val search = SearchSourceBuilder().query(QueryBuilders.termQuery("_id", id)).toString() - - // get as "admin" - must get 1 docs - val adminGetResponse = client().makeRequest( - "GET", - "$WORKFLOW_ALERTING_BASE_URI/$id", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Get workflow failed", RestStatus.OK, adminGetResponse.restStatus()) - - // delete as "admin" - val adminDeleteResponse = client().makeRequest( - "DELETE", - "$WORKFLOW_ALERTING_BASE_URI/$id", - emptyMap(), - NStringEntity(search, ContentType.APPLICATION_JSON) - ) - assertEquals("Delete workflow failed", RestStatus.OK, adminDeleteResponse.restStatus()) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - deleteRoleMapping(ALERTING_FULL_ACCESS_ROLE) - } - } - - fun `test execute workflow with bucket-level and doc-level chained monitors with user having partial index permissions`() { - createUser(user, arrayOf(TEST_HR_BACKEND_ROLE)) - createTestIndex(TEST_HR_INDEX) - - createIndexRoleWithDocLevelSecurity( - TEST_HR_ROLE, - TEST_HR_INDEX, - TERM_DLS_QUERY, - listOf(ALERTING_INDEX_WORKFLOW_ACCESS, ALERTING_INDEX_MONITOR_ACCESS) - ) - createUserRolesMapping(TEST_HR_ROLE, arrayOf(user)) - - // Add a doc that is accessible to the user - indexDoc( - TEST_HR_INDEX, - "1", - """ - { - "test_field": "a", - "accessible": true - } - """.trimIndent() - ) - - // Add a second doc that is not accessible to the user - indexDoc( - TEST_HR_INDEX, - "2", - """ - { - "test_field": "b", - "accessible": false - } - """.trimIndent() - ) - - indexDoc( - TEST_HR_INDEX, - "3", - """ - { - "test_field": "c", - "accessible": true - } - """.trimIndent() - ) - - val compositeSources = listOf( - TermsValuesSourceBuilder("test_field").field("test_field") - ) - val compositeAgg = CompositeAggregationBuilder("composite_agg", compositeSources) - val input = SearchInput( - indices = listOf(TEST_HR_INDEX), - query = SearchSourceBuilder().size(0).query(QueryBuilders.matchAllQuery()).aggregation(compositeAgg) - ) - val triggerScript = """ - params.docCount > 0 - """.trimIndent() - - var trigger = randomBucketLevelTrigger() - trigger = trigger.copy( - bucketSelector = BucketSelectorExtAggregationBuilder( - name = trigger.id, - bucketsPathsMap = mapOf("docCount" to "_count"), - script = Script(triggerScript), - parentBucketPath = "composite_agg", - filter = null - ), - actions = listOf() - ) - val bucketMonitor = createMonitorWithClient( - userClient!!, - randomBucketLevelMonitor( - inputs = listOf(input), - enabled = false, - triggers = listOf(trigger), - dataSources = DataSources(findingsEnabled = true) - ) - ) - assertNotNull("The bucket monitor was not created", bucketMonitor) - - val docQuery1 = DocLevelQuery(query = "test_field:\"a\"", name = "3", fields = listOf()) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(DocLevelMonitorInput("description", listOf(TEST_HR_INDEX), listOf(docQuery1))), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) - ) - val docMonitor = createMonitorWithClient(userClient!!, monitor1)!! - assertNotNull("The doc level monitor was not created", docMonitor) - - val workflow = randomWorkflow(monitorIds = listOf(bucketMonitor.id, docMonitor.id)) - val workflowResponse = createWorkflowWithClient(userClient!!, workflow) - assertNotNull("The workflow was not created", workflowResponse) - - try { - executeWorkflow(workflowId = workflowResponse.id) - val bucketAlerts = searchAlerts(bucketMonitor) - assertEquals("Incorrect number of alerts", 0, bucketAlerts.size) - - val docAlerts = searchAlerts(docMonitor) - assertEquals("Incorrect number of alerts", 0, docAlerts.size) - } finally { - deleteRoleAndRoleMapping(TEST_HR_ROLE) - } - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt b/alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt deleted file mode 100644 index 8c073c4b6..000000000 --- a/alerting/bin/test/org/opensearch/alerting/resthandler/WorkflowRestApiIT.kt +++ /dev/null @@ -1,1188 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.resthandler - -import org.junit.Assert -import org.opensearch.alerting.ALWAYS_RUN -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.WORKFLOW_ALERTING_BASE_URI -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.randomBucketLevelMonitor -import org.opensearch.alerting.randomChainedAlertTrigger -import org.opensearch.alerting.randomDocumentLevelMonitor -import org.opensearch.alerting.randomDocumentLevelTrigger -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.alerting.randomQueryLevelTrigger -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.randomWorkflow -import org.opensearch.alerting.randomWorkflowWithDelegates -import org.opensearch.client.ResponseException -import org.opensearch.commons.alerting.model.ChainedAlertTrigger -import org.opensearch.commons.alerting.model.ChainedMonitorFindings -import org.opensearch.commons.alerting.model.CompositeInput -import org.opensearch.commons.alerting.model.Delegate -import org.opensearch.commons.alerting.model.DocLevelMonitorInput -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.commons.alerting.model.IntervalSchedule -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.rest.RestStatus -import org.opensearch.index.query.QueryBuilders -import org.opensearch.script.Script -import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.junit.annotations.TestLogging -import java.time.Instant -import java.time.temporal.ChronoUnit -import java.util.Collections -import java.util.Locale -import java.util.UUID - -@TestLogging("level:DEBUG", reason = "Debug for tests.") -@Suppress("UNCHECKED_CAST") -class WorkflowRestApiIT : AlertingRestTestCase() { - - fun `test create workflow success`() { - val index = createTestIndex() - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor) - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - - val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - - assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) - - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - - assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - } - - fun `test create workflow with different monitor types success`() { - val index = createTestIndex() - val docQuery = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val docLevelMonitorResponse = createMonitor(monitor) - - val bucketLevelMonitor = randomBucketLevelMonitor( - inputs = listOf( - SearchInput( - listOf(index), - SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - .aggregation(TermsAggregationBuilder("test_agg").field("test_field")) - ) - ) - ) - val bucketLevelMonitorResponse = createMonitor(bucketLevelMonitor) - - val workflow = randomWorkflow( - monitorIds = listOf(docLevelMonitorResponse.id, bucketLevelMonitorResponse.id), - triggers = listOf( - randomChainedAlertTrigger(condition = Script("trigger1")), - randomChainedAlertTrigger(condition = Script("trigger2")) - ) - ) - - val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - - assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) - - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - - assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - - val workflowById = getWorkflow(createdId) - assertNotNull(workflowById) - - // Verify workflow - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) - assertTrue("incorrect version", workflowById.version > 0) - assertEquals("Workflow name not correct", workflow.name, workflowById.name) - assertEquals("Workflow owner not correct", workflow.owner, workflowById.owner) - assertEquals("Workflow input not correct", workflow.inputs, workflowById.inputs) - - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 2, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", docLevelMonitorResponse.id, delegate1.monitorId) - - val delegate2 = delegates[1] - assertNotNull(delegate2) - assertEquals("Delegate2 order not correct", 2, delegate2.order) - assertEquals("Delegate2 id not correct", bucketLevelMonitorResponse.id, delegate2.monitorId) - assertEquals( - "Delegate2 Chained finding not correct", docLevelMonitorResponse.id, delegate2.chainedMonitorFindings!!.monitorId - ) - - assertEquals(workflowById.triggers.size, 2) - assertTrue(workflowById.triggers[0] is ChainedAlertTrigger) - assertTrue(workflowById.triggers[1] is ChainedAlertTrigger) - assertTrue((workflowById.triggers[0] as ChainedAlertTrigger).condition == Script("trigger1")) - assertTrue((workflowById.triggers[1] as ChainedAlertTrigger).condition == Script("trigger2")) - } - - fun `test create workflow without delegate failure`() { - val workflow = randomWorkflow( - monitorIds = Collections.emptyList() - ) - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Delegates list can not be empty.") - ) - } - } - } - - fun `test create workflow duplicate delegate failure`() { - val workflow = randomWorkflow( - monitorIds = listOf("1", "1", "2") - ) - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Duplicate delegates not allowed") - ) - } - } - } - - fun `test create workflow delegate monitor doesn't exist failure`() { - val index = createTestIndex() - val docQuery = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val docLevelMonitorResponse = createMonitor(monitor) - - val workflow = randomWorkflow( - monitorIds = listOf("-1", docLevelMonitorResponse.id) - ) - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("are not valid monitor ids") - ) - } - } - } - - fun `test create workflow sequence order not correct failure`() { - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(1, "monitor-2"), - Delegate(2, "monitor-3") - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") - ) - } - } - } - - fun `test create workflow chained findings monitor not in sequence failure`() { - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") - ) - } - } - } - - fun `test create workflow chained findings order not correct failure`() { - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") - ) - } - } - } - - fun `test create workflow when monitor index not initialized failure`() { - val delegates = listOf( - Delegate(1, "monitor-1") - ) - val workflow = randomWorkflowWithDelegates( - delegates = delegates - ) - - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Monitors not found") - ) - } - } - } - - fun `test create workflow delegate and chained finding monitor different indices failure`() { - val index = randomAlphaOfLength(10).lowercase(Locale.ROOT) - createTestIndex(index) - - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val docMonitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val docMonitorResponse = createMonitor(docMonitor) - - val index1 = "$index-1" - createTestIndex(index1) - - val docLevelInput1 = DocLevelMonitorInput( - "description", listOf(index1), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - - val docMonitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger) - ) - val docMonitorResponse1 = createMonitor(docMonitor1) - - val workflow = randomWorkflow( - monitorIds = listOf(docMonitorResponse1.id, docMonitorResponse.id) - ) - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("doesn't query all of chained findings monitor's indices") - ) - } - } - } - - fun `test create workflow query monitor chained findings monitor failure`() { - val index = createTestIndex() - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val docMonitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val docMonitorResponse = createMonitor(docMonitor) - - val queryMonitor = randomQueryLevelMonitor() - val queryMonitorResponse = createMonitor(queryMonitor) - - val workflow = randomWorkflow( - monitorIds = listOf(queryMonitorResponse.id, docMonitorResponse.id) - ) - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Query level monitor can't be part of chained findings") - ) - } - } - } - - fun `test create workflow with 26 delegates failure`() { - val monitorsIds = mutableListOf() - for (i in 0..25) { - monitorsIds.add(UUID.randomUUID().toString()) - } - val workflow = randomWorkflow( - monitorIds = monitorsIds - ) - try { - createWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Delegates list can not be larger then 25.") - ) - } - } - } - - fun `test update workflow add monitor success`() { - val index = createTestIndex() - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor) - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - - val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - - assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) - - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - - assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse2 = createMonitor(monitor2) - - val updatedWorkflow = randomWorkflow( - id = createdId, - monitorIds = listOf(monitorResponse.id, monitorResponse2.id) - ) - - val updateResponse = client().makeRequest("PUT", updatedWorkflow.relativeUrl(), emptyMap(), updatedWorkflow.toHttpEntity()) - - assertEquals("Update workflow failed", RestStatus.OK, updateResponse.restStatus()) - - val updateResponseBody = updateResponse.asMap() - val updatedId = updateResponseBody["_id"] as String - val updatedVersion = updateResponseBody["_version"] as Int - - assertNotEquals("response is missing Id", Workflow.NO_ID, updatedId) - assertTrue("incorrect version", updatedVersion > 0) - - val workflowById = getWorkflow(updatedId) - assertNotNull(workflowById) - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 2, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", monitorResponse.id, delegate1.monitorId) - - val delegate2 = delegates[1] - assertNotNull(delegate2) - assertEquals("Delegate2 order not correct", 2, delegate2.order) - assertEquals("Delegate2 id not correct", monitorResponse2.id, delegate2.monitorId) - assertEquals( - "Delegate2 Chained finding not correct", monitorResponse.id, delegate2.chainedMonitorFindings!!.monitorId - ) - } - - fun `test update workflow remove monitor success`() { - val index = createTestIndex() - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse2 = createMonitor(monitor2) - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id, monitorResponse2.id) - ) - - val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - - assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) - - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - - assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - - var workflowById = getWorkflow(createdId) - assertNotNull(workflowById) - // Delegate verification - @Suppress("UNCHECKED_CAST") - var delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 2, delegates.size) - - val updatedWorkflow = randomWorkflow( - id = createdId, - monitorIds = listOf(monitorResponse.id) - ) - - val updateResponse = client().makeRequest("PUT", updatedWorkflow.relativeUrl(), emptyMap(), updatedWorkflow.toHttpEntity()) - - assertEquals("Update workflow failed", RestStatus.OK, updateResponse.restStatus()) - - val updateResponseBody = updateResponse.asMap() - val updatedId = updateResponseBody["_id"] as String - val updatedVersion = updateResponseBody["_version"] as Int - - assertNotEquals("response is missing Id", Workflow.NO_ID, updatedId) - assertTrue("incorrect version", updatedVersion > 0) - - workflowById = getWorkflow(updatedId) - assertNotNull(workflowById) - // Delegate verification - @Suppress("UNCHECKED_CAST") - delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 1, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", monitorResponse.id, delegate1.monitorId) - } - - fun `test update workflow change order of delegate monitors`() { - val index = createTestIndex() - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitor2 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse1 = createMonitor(monitor1) - val monitorResponse2 = createMonitor(monitor2) - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id, monitorResponse2.id) - ) - - val workflowResponse = createWorkflow(workflow) - assertNotNull("Workflow creation failed", workflowResponse) - assertNotNull(workflow) - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowResponse.id) - - var workflowById = getWorkflow(workflowResponse.id) - assertNotNull(workflowById) - - val updatedWorkflowResponse = updateWorkflow( - randomWorkflow( - id = workflowById.id, - monitorIds = listOf(monitorResponse2.id, monitorResponse1.id) - ) - ) - - assertNotNull("Workflow creation failed", updatedWorkflowResponse) - assertNotNull(updatedWorkflowResponse) - assertEquals( - "Workflow id changed", - workflowResponse.id, - updatedWorkflowResponse.id - ) - assertTrue("incorrect version", updatedWorkflowResponse.version > 0) - - workflowById = getWorkflow(updatedWorkflowResponse.id) - - // Verify workflow - assertNotEquals("response is missing Id", Monitor.NO_ID, workflowById.id) - assertTrue("incorrect version", workflowById.version > 0) - assertEquals( - "Workflow name not correct", - updatedWorkflowResponse.name, - workflowById.name - ) - assertEquals( - "Workflow owner not correct", - updatedWorkflowResponse.owner, - workflowById.owner - ) - assertEquals( - "Workflow input not correct", - updatedWorkflowResponse.inputs, - workflowById.inputs - ) - - // Delegate verification - @Suppress("UNCHECKED_CAST") - val delegates = (workflowById.inputs as List)[0].sequence.delegates.sortedBy { it.order } - assertEquals("Delegates size not correct", 2, delegates.size) - - val delegate1 = delegates[0] - assertNotNull(delegate1) - assertEquals("Delegate1 order not correct", 1, delegate1.order) - assertEquals("Delegate1 id not correct", monitorResponse2.id, delegate1.monitorId) - - val delegate2 = delegates[1] - assertNotNull(delegate2) - assertEquals("Delegate2 order not correct", 2, delegate2.order) - assertEquals("Delegate2 id not correct", monitorResponse1.id, delegate2.monitorId) - assertEquals( - "Delegate2 Chained finding not correct", monitorResponse2.id, delegate2.chainedMonitorFindings!!.monitorId - ) - } - - fun `test update workflow doesn't exist failure`() { - val index = createTestIndex() - val docQuery1 = DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(randomDocumentLevelTrigger(condition = ALWAYS_RUN)) - ) - - val monitorResponse1 = createMonitor(monitor1) - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse1.id) - ) - val workflowResponse = createWorkflow(workflow) - assertNotNull("Workflow creation failed", workflowResponse) - - try { - updateWorkflow(workflow.copy(id = "testId")) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.NOT_FOUND, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow with testId is not found") - ) - } - } - val updatedWorkflow = updateWorkflow(workflowResponse.copy(enabled = true, enabledTime = Instant.now())) - assertNotNull(updatedWorkflow) - val getWorkflow = getWorkflow(workflowId = updatedWorkflow.id) - assertTrue(getWorkflow.enabled) - } - - fun `test update workflow duplicate delegate failure`() { - val index = createTestIndex() - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - - val monitorResponse = createMonitor(monitor) - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - - val workflowResponse = createWorkflow(workflow) - assertNotNull("Workflow creation failed", workflowResponse) - - workflow = randomWorkflow( - id = workflowResponse.id, - monitorIds = listOf("1", "1", "2") - ) - try { - updateWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Duplicate delegates not allowed") - ) - } - } - } - - fun `test update workflow delegate monitor doesn't exist failure`() { - val index = createTestIndex() - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor) - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = createWorkflow(workflow) - assertNotNull("Workflow creation failed", workflowResponse) - - workflow = randomWorkflow( - id = workflowResponse.id, - monitorIds = listOf("-1", monitorResponse.id) - ) - - try { - updateWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("are not valid monitor ids") - ) - } - } - } - - fun `test update workflow sequence order not correct failure`() { - val index = createTestIndex() - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor) - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = createWorkflow(workflow) - assertNotNull("Workflow creation failed", workflowResponse) - - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(1, "monitor-2"), - Delegate(2, "monitor-3") - ) - workflow = randomWorkflowWithDelegates( - id = workflowResponse.id, - delegates = delegates - ) - try { - updateWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Sequence ordering of delegate monitor shouldn't contain duplicate order values") - ) - } - } - } - - fun `test update workflow chained findings monitor not in sequence failure`() { - val index = createTestIndex() - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor) - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = createWorkflow(workflow) - assertNotNull("Workflow creation failed", workflowResponse) - - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(2, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(3, "monitor-3", ChainedMonitorFindings("monitor-x")) - ) - workflow = randomWorkflowWithDelegates( - id = workflowResponse.id, - delegates = delegates - ) - - try { - updateWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-x doesn't exist in sequence") - ) - } - } - } - - fun `test update workflow chained findings order not correct failure`() { - val index = createTestIndex() - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(DocLevelQuery(query = "source.ip.v6.v1:12345", name = "3", fields = listOf())) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger) - ) - val monitorResponse = createMonitor(monitor) - - var workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id) - ) - val workflowResponse = createWorkflow(workflow) - assertNotNull("Workflow creation failed", workflowResponse) - - val delegates = listOf( - Delegate(1, "monitor-1"), - Delegate(3, "monitor-2", ChainedMonitorFindings("monitor-1")), - Delegate(2, "monitor-3", ChainedMonitorFindings("monitor-2")) - ) - workflow = randomWorkflowWithDelegates( - id = workflowResponse.id, - delegates = delegates - ) - - try { - updateWorkflow(workflow) - } catch (e: ResponseException) { - assertEquals("Unexpected status", RestStatus.BAD_REQUEST, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning IndexWorkflow Action error ", - it.contains("Chained Findings Monitor monitor-2 should be executed before monitor monitor-3") - ) - } - } - } - - @Throws(Exception::class) - fun `test getting a workflow`() { - val query = randomQueryLevelMonitor() - val monitor = createMonitor(query) - val storedMonitor = getMonitor(monitor.id) - - assertEquals("Indexed and retrieved monitor differ", monitor, storedMonitor) - - val workflow = createRandomWorkflow(monitorIds = listOf(monitor.id)) - - val storedWorkflow = getWorkflow(workflow.id) - - assertEquals("Indexed and retrieved workflow differ", workflow.id, storedWorkflow.id) - val delegates = (storedWorkflow.inputs[0] as CompositeInput).sequence.delegates - assertEquals("Delegate list not correct", 1, delegates.size) - assertEquals("Delegate order id not correct", 1, delegates[0].order) - assertEquals("Delegate id list not correct", monitor.id, delegates[0].monitorId) - } - - @Throws(Exception::class) - fun `test getting a workflow that doesn't exist`() { - try { - getWorkflow(randomAlphaOfLength(20)) - fail("expected response exception") - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - } - } - - fun `test delete workflow`() { - val query = randomQueryLevelMonitor() - val monitor = createMonitor(query) - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - val workflowResponse = createWorkflow(workflowRequest) - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflow(workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - client().makeRequest("DELETE", getWorkflowResponse.relativeUrl()) - - // Verify that the workflow is deleted - try { - getWorkflow(workflowId) - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - } - - fun `test delete workflow delete delegate monitors`() { - val query = randomQueryLevelMonitor() - val monitor = createMonitor(query) - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - val workflowResponse = createWorkflow(workflowRequest) - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflow(workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - client().makeRequest("DELETE", getWorkflowResponse.relativeUrl().plus("?deleteDelegateMonitors=true")) - - // Verify that the workflow is deleted - try { - getWorkflow(workflowId) - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - - // Verify that delegate monitor is deleted - try { - getMonitor(monitor.id) - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Monitor not found.") - ) - } - } - } - - fun `test delete workflow preserve delegate monitors`() { - val query = randomQueryLevelMonitor() - val monitor = createMonitor(query) - - val workflowRequest = randomWorkflow( - monitorIds = listOf(monitor.id) - ) - val workflowResponse = createWorkflow(workflowRequest) - val workflowId = workflowResponse.id - val getWorkflowResponse = getWorkflow(workflowResponse.id) - - assertNotNull(getWorkflowResponse) - assertEquals(workflowId, getWorkflowResponse.id) - - client().makeRequest("DELETE", getWorkflowResponse.relativeUrl().plus("?deleteDelegateMonitors=false")) - - // Verify that the workflow is deleted - try { - getWorkflow(workflowId) - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - e.message?.let { - assertTrue( - "Exception not returning GetWorkflow Action error ", - it.contains("Workflow not found.") - ) - } - } - - // Verify that delegate monitor is not deleted - val delegateMonitor = getMonitor(monitor.id) - assertNotNull(delegateMonitor) - } - - @Throws(Exception::class) - fun `test deleting a workflow that doesn't exist`() { - try { - client().makeRequest("DELETE", "$WORKFLOW_ALERTING_BASE_URI/foobarbaz") - fail("expected 404 ResponseException") - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - } - } - - fun `test chained alerts and audit alerts for workflows with query level monitor`() { - val index = createTestIndex() - val docQuery1 = DocLevelQuery(query = "test_field:\"test_value_1\"", name = "3", fields = listOf()) - val docLevelInput1 = DocLevelMonitorInput("description", listOf(index), listOf(docQuery1)) - val trigger1 = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - var monitor1 = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput1), - triggers = listOf(trigger1), - enabled = false - ) - val monitorResponse = createMonitor(monitor1)!! - var monitor2 = randomQueryLevelMonitor( - triggers = listOf(randomQueryLevelTrigger(condition = Script("return true"))), - enabled = false - ) - - val monitorResponse2 = createMonitor(monitor2)!! - val andTrigger = randomChainedAlertTrigger( - name = "1And2", - condition = Script("monitor[id=${monitorResponse.id}] && monitor[id=${monitorResponse2.id}]") - ) - - val workflow = Workflow( - id = "", - version = 2, - name = "test", - enabled = false, - schedule = IntervalSchedule(5, ChronoUnit.MINUTES), - lastUpdateTime = Instant.now(), - enabledTime = null, - workflowType = Workflow.WorkflowType.COMPOSITE, - user = randomUser(), - schemaVersion = -1, - inputs = listOf( - CompositeInput( - org.opensearch.commons.alerting.model.Sequence( - delegates = listOf( - Delegate(1, monitorResponse.id), - Delegate(2, monitorResponse2.id) - ) - ) - ) - ), - owner = "alerting", - triggers = listOf(andTrigger) - ) - val workflowById = createWorkflow(workflow) - assertNotNull(workflowById) - val workflowId = workflowById.id - - insertSampleTimeSerializedData( - index, - listOf( - "test_value_1" - ) - ) - val searchMonitorResponse = searchMonitors() - logger.error(searchMonitorResponse) - val jobsList = searchMonitorResponse.hits.toList() - var numMonitors = 0 - var numWorkflows = 0 - jobsList.forEach { - val map = it.sourceAsMap - if (map["type"] == "workflow") numWorkflows++ - else if (map["type"] == "monitor") numMonitors++ - } - Assert.assertEquals(numMonitors, 2) - Assert.assertEquals(numWorkflows, 1) - val response = executeWorkflow(workflowId = workflowId, params = emptyMap()) - val executeWorkflowResponse = entityAsMap(response) - logger.info(executeWorkflowResponse) - val executionId = executeWorkflowResponse["execution_id"] - Assert.assertTrue(executeWorkflowResponse.containsKey("trigger_results")) - val workflowTriggerResults = executeWorkflowResponse["trigger_results"] as Map - assertEquals(workflowTriggerResults.size, 1) - assertTrue( - (workflowTriggerResults[andTrigger.id] as Map)["triggered"] as Boolean - ) - val res = getWorkflowAlerts(workflowId = workflowId, getAssociatedAlerts = true) - val getWorkflowAlerts = entityAsMap(res) - Assert.assertTrue(getWorkflowAlerts.containsKey("alerts")) - Assert.assertTrue(getWorkflowAlerts.containsKey("associatedAlerts")) - val alerts = getWorkflowAlerts["alerts"] as List> - assertEquals(alerts.size, 1) - Assert.assertEquals(alerts[0]["execution_id"], executionId) - Assert.assertEquals(alerts[0]["workflow_id"], workflowId) - Assert.assertEquals(alerts[0]["monitor_id"], "") - val associatedAlerts = getWorkflowAlerts["associatedAlerts"] as List> - assertEquals(associatedAlerts.size, 2) - - val res1 = getWorkflowAlerts(workflowId = workflowId, alertId = alerts[0]["id"].toString(), getAssociatedAlerts = true) - val getWorkflowAlerts1 = entityAsMap(res1) - Assert.assertTrue(getWorkflowAlerts1.containsKey("alerts")) - Assert.assertTrue(getWorkflowAlerts1.containsKey("associatedAlerts")) - val alerts1 = getWorkflowAlerts1["alerts"] as List> - assertEquals(alerts1.size, 1) - Assert.assertEquals(alerts1[0]["execution_id"], executionId) - Assert.assertEquals(alerts1[0]["workflow_id"], workflowId) - Assert.assertEquals(alerts1[0]["monitor_id"], "") - val associatedAlerts1 = getWorkflowAlerts1["associatedAlerts"] as List> - assertEquals(associatedAlerts1.size, 2) - - val getAlertsRes = getAlerts() - val getAlertsMap = getAlertsRes.asMap() - Assert.assertTrue(getAlertsMap.containsKey("alerts")) - val getAlertsAlerts = (getAlertsMap["alerts"] as ArrayList>) - assertEquals(getAlertsAlerts.size, 1) - Assert.assertEquals(getAlertsAlerts[0]["execution_id"], executionId) - Assert.assertEquals(getAlertsAlerts[0]["workflow_id"], workflowId) - Assert.assertEquals(getAlertsAlerts[0]["monitor_id"], "") - Assert.assertEquals(getAlertsAlerts[0]["id"], alerts1[0]["id"]) - - val ackRes = acknowledgeChainedAlerts(workflowId, alerts1[0]["id"].toString()) - val acknowledgeChainedAlertsResponse = entityAsMap(ackRes) - val acknowledged = acknowledgeChainedAlertsResponse["success"] as List - Assert.assertEquals(acknowledged[0], alerts1[0]["id"]) - } - - fun `test run workflow as scheduled job success`() { - val index = createTestIndex() - val docQuery1 = DocLevelQuery(query = "test_field:\"us-west-2\"", name = "3", fields = listOf()) - val docLevelInput = DocLevelMonitorInput( - "description", listOf(index), listOf(docQuery1) - ) - val trigger = randomDocumentLevelTrigger(condition = ALWAYS_RUN) - - val monitor = randomDocumentLevelMonitor( - inputs = listOf(docLevelInput), - triggers = listOf(trigger), - enabled = false - ) - val monitorResponse = createMonitor(monitor) - - val workflow = randomWorkflow( - monitorIds = listOf(monitorResponse.id), - enabled = true, - schedule = IntervalSchedule(1, ChronoUnit.MINUTES) - ) - - val createResponse = client().makeRequest("POST", WORKFLOW_ALERTING_BASE_URI, emptyMap(), workflow.toHttpEntity()) - - assertEquals("Create workflow failed", RestStatus.CREATED, createResponse.restStatus()) - - val responseBody = createResponse.asMap() - val createdId = responseBody["_id"] as String - val createdVersion = responseBody["_version"] as Int - - assertNotEquals("response is missing Id", Workflow.NO_ID, createdId) - assertTrue("incorrect version", createdVersion > 0) - assertEquals("Incorrect Location header", "$WORKFLOW_ALERTING_BASE_URI/$createdId", createResponse.getHeader("Location")) - - val testDoc = """{ - "message" : "This is an error from IAD region", - "test_field" : "us-west-2" - }""" - - indexDoc(index, "1", testDoc) - Thread.sleep(80000) - - val findings = searchFindings(monitor.copy(id = monitorResponse.id)) - assertEquals("Findings saved for test monitor", 1, findings.size) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt b/alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt deleted file mode 100644 index 6ee8c4997..000000000 --- a/alerting/bin/test/org/opensearch/alerting/settings/AlertingSettingsTests.kt +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.settings - -import org.junit.Before -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings -import org.opensearch.alerting.core.settings.ScheduledJobSettings -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.test.OpenSearchTestCase -import java.util.concurrent.TimeUnit - -class AlertingSettingsTests : OpenSearchTestCase() { - - private lateinit var plugin: AlertingPlugin - - @Before - fun setup() { - plugin = AlertingPlugin() - } - - fun `test all opendistro settings returned`() { - val settings = plugin.settings - assertTrue( - "Legacy Settings are not returned", - settings.containsAll( - listOf( - LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, - LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, - LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, - LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, - LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, - LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, - LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, - LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD, - LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, - LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, - LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT - ) - ) - ) - } - - fun `test all opensearch settings returned`() { - val settings = plugin.settings - assertTrue( - "Opensearch settings not returned", - settings.containsAll( - listOf( - DestinationSettings.ALLOW_LIST, - DestinationSettings.HOST_DENY_LIST, - AlertingSettings.ALERTING_MAX_MONITORS, - AlertingSettings.INPUT_TIMEOUT, - AlertingSettings.INDEX_TIMEOUT, - AlertingSettings.BULK_TIMEOUT, - AlertingSettings.ALERT_BACKOFF_MILLIS, - AlertingSettings.ALERT_BACKOFF_COUNT, - AlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, - AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, - AlertingSettings.ALERT_HISTORY_ENABLED, - AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, - AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, - AlertingSettings.ALERT_HISTORY_MAX_DOCS, - AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, - AlertingSettings.REQUEST_TIMEOUT, - AlertingSettings.MAX_ACTION_THROTTLE_VALUE, - AlertingSettings.FILTER_BY_BACKEND_ROLES, - ScheduledJobSettings.SWEEP_PERIOD, - ScheduledJobSettings.SWEEP_PAGE_SIZE, - ScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, - ScheduledJobSettings.SWEEP_BACKOFF_MILLIS, - ScheduledJobSettings.SWEEPER_ENABLED, - ScheduledJobSettings.REQUEST_TIMEOUT - ) - ) - ) - } - - fun `test opendistro settings fallback`() { - assertEquals( - AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(Settings.EMPTY), - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(Settings.EMPTY) - ) - assertEquals( - ScheduledJobSettings.REQUEST_TIMEOUT.get(Settings.EMPTY), - LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT.get(Settings.EMPTY) - ) - } - - fun `test settings get Value`() { - val settings = Settings.builder().put("plugins.alerting.move_alerts_backoff_count", 1).build() - assertEquals(AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(settings), 1) - assertEquals(LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(settings), 3) - val scheduledJobSettings = Settings.builder().put("plugins.scheduled_jobs.enabled", false).build() - assertEquals(ScheduledJobSettings.SWEEPER_ENABLED.get(scheduledJobSettings), false) - assertEquals(LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED.get(scheduledJobSettings), true) - } - - fun `test settings get value with legacy Fallback`() { - val settings = Settings.builder() - .put("opendistro.alerting.monitor.max_monitors", 1000) - .put("opendistro.alerting.input_timeout", TimeValue.timeValueSeconds(30)) - .put("opendistro.alerting.index_timeout", TimeValue.timeValueSeconds(60)) - .put("opendistro.alerting.bulk_timeout", TimeValue.timeValueSeconds(120)) - .put("opendistro.alerting.alert_backoff_millis", TimeValue.timeValueMillis(50)) - .put("opendistro.alerting.alert_backoff_count", 2) - .put("opendistro.alerting.move_alerts_backoff_millis", TimeValue.timeValueMillis(250)) - .put("opendistro.alerting.move_alerts_backoff_count", 3) - .put("opendistro.alerting.alert_history_enabled", true) - .put("opendistro.alerting.alert_history_rollover_period", TimeValue.timeValueHours(12)) - .put("opendistro.alerting.alert_history_max_age", TimeValue(30, TimeUnit.DAYS)) - .put("opendistro.alerting.alert_history_max_docs", 1000L) - .put("opendistro.alerting.alert_history_retention_period", TimeValue(60, TimeUnit.DAYS)) - .put("opendistro.alerting.request_timeout", TimeValue.timeValueSeconds(10)) - .put("opendistro.alerting.action_throttle_max_value", TimeValue.timeValueHours(24)) - .put("opendistro.alerting.filter_by_backend_roles", false) - .put("opendistro.scheduled_jobs.enabled", false) - .put("opendistro.scheduled_jobs.request_timeout", TimeValue.timeValueSeconds(10)) - .put("opendistro.scheduled_jobs.sweeper.backoff_millis", TimeValue.timeValueMillis(50)) - .put("opendistro.scheduled_jobs.retry_count", 3) - .put("opendistro.scheduled_jobs.sweeper.period", TimeValue.timeValueMinutes(5)) - .put("opendistro.scheduled_jobs.sweeper.page_size", 100).build() - - assertEquals(AlertingSettings.ALERTING_MAX_MONITORS.get(settings), 1000) - assertEquals(AlertingSettings.INPUT_TIMEOUT.get(settings), TimeValue.timeValueSeconds(30)) - assertEquals(AlertingSettings.INDEX_TIMEOUT.get(settings), TimeValue.timeValueSeconds(60)) - assertEquals(AlertingSettings.BULK_TIMEOUT.get(settings), TimeValue.timeValueSeconds(120)) - assertEquals(AlertingSettings.ALERT_BACKOFF_MILLIS.get(settings), TimeValue.timeValueMillis(50)) - assertEquals(AlertingSettings.ALERT_BACKOFF_COUNT.get(settings), 2) - assertEquals(AlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS.get(settings), TimeValue.timeValueMillis(250)) - assertEquals(AlertingSettings.MOVE_ALERTS_BACKOFF_COUNT.get(settings), 3) - assertEquals(AlertingSettings.ALERT_HISTORY_ENABLED.get(settings), true) - assertEquals(AlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD.get(settings), TimeValue.timeValueHours(12)) - assertEquals(AlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE.get(settings), TimeValue(30, TimeUnit.DAYS)) - assertEquals(AlertingSettings.ALERT_HISTORY_MAX_DOCS.get(settings), 1000L) - assertEquals(AlertingSettings.ALERT_HISTORY_RETENTION_PERIOD.get(settings), TimeValue(60, TimeUnit.DAYS)) - assertEquals(AlertingSettings.REQUEST_TIMEOUT.get(settings), TimeValue.timeValueSeconds(10)) - assertEquals(AlertingSettings.MAX_ACTION_THROTTLE_VALUE.get(settings), TimeValue.timeValueHours(24)) - assertEquals(AlertingSettings.FILTER_BY_BACKEND_ROLES.get(settings), false) - assertEquals(ScheduledJobSettings.SWEEPER_ENABLED.get(settings), false) - assertEquals(ScheduledJobSettings.REQUEST_TIMEOUT.get(settings), TimeValue.timeValueSeconds(10)) - assertEquals(ScheduledJobSettings.SWEEP_BACKOFF_MILLIS.get(settings), TimeValue.timeValueMillis(50)) - assertEquals(ScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT.get(settings), 3) - assertEquals(ScheduledJobSettings.SWEEP_PERIOD.get(settings), TimeValue.timeValueMinutes(5)) - assertEquals(ScheduledJobSettings.SWEEP_PAGE_SIZE.get(settings), 100) - - assertSettingDeprecationsAndWarnings( - arrayOf( - LegacyOpenDistroAlertingSettings.ALERTING_MAX_MONITORS, - LegacyOpenDistroAlertingSettings.INPUT_TIMEOUT, - LegacyOpenDistroAlertingSettings.INDEX_TIMEOUT, - LegacyOpenDistroAlertingSettings.BULK_TIMEOUT, - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_MILLIS, - LegacyOpenDistroAlertingSettings.ALERT_BACKOFF_COUNT, - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_MILLIS, - LegacyOpenDistroAlertingSettings.MOVE_ALERTS_BACKOFF_COUNT, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ENABLED, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_ROLLOVER_PERIOD, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_INDEX_MAX_AGE, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_MAX_DOCS, - LegacyOpenDistroAlertingSettings.ALERT_HISTORY_RETENTION_PERIOD, - LegacyOpenDistroAlertingSettings.REQUEST_TIMEOUT, - LegacyOpenDistroAlertingSettings.MAX_ACTION_THROTTLE_VALUE, - LegacyOpenDistroAlertingSettings.FILTER_BY_BACKEND_ROLES, - LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, - LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT, - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, - LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, - LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD - ) - ) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt b/alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt deleted file mode 100644 index 2e96c1fad..000000000 --- a/alerting/bin/test/org/opensearch/alerting/settings/DestinationSettingsTests.kt +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.settings - -import org.junit.Before -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.common.settings.Settings -import org.opensearch.test.OpenSearchTestCase - -class DestinationSettingsTests : OpenSearchTestCase() { - private lateinit var plugin: AlertingPlugin - - @Before - fun setup() { - plugin = AlertingPlugin() - } - - fun `test all opendistro destination settings returned`() { - val settings = plugin.settings - assertTrue( - "Legacy Settings are not returned", - settings.containsAll( - listOf( - LegacyOpenDistroDestinationSettings.ALLOW_LIST, - LegacyOpenDistroDestinationSettings.HOST_DENY_LIST - ) - ) - ) - } - - fun `test all opensearch destination settings returned`() { - val settings = plugin.settings - assertTrue( - "Opensearch settings not returned", - settings.containsAll( - listOf( - DestinationSettings.ALLOW_LIST, - DestinationSettings.HOST_DENY_LIST - ) - ) - ) - } - - fun `test opendistro settings fallback`() { - assertEquals( - DestinationSettings.ALLOW_LIST.get(Settings.EMPTY), - LegacyOpenDistroDestinationSettings.ALLOW_LIST.get(Settings.EMPTY) - ) - assertEquals( - DestinationSettings.HOST_DENY_LIST.get(Settings.EMPTY), - LegacyOpenDistroDestinationSettings.HOST_DENY_LIST.get(Settings.EMPTY) - ) - } - - fun `test settings get Value with legacy fallback`() { - val settings = Settings.builder() - .putList("opendistro.alerting.destination.allow_list", listOf("1")) - .putList("opendistro.destination.host.deny_list", emptyList()).build() - - assertEquals(DestinationSettings.ALLOW_LIST.get(settings), listOf("1")) - assertEquals(DestinationSettings.HOST_DENY_LIST.get(settings), emptyList()) - - assertSettingDeprecationsAndWarnings( - arrayOf( - LegacyOpenDistroDestinationSettings.ALLOW_LIST, - LegacyOpenDistroDestinationSettings.HOST_DENY_LIST - ) - ) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt b/alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt deleted file mode 100644 index f1f8882f7..000000000 --- a/alerting/bin/test/org/opensearch/alerting/transport/AlertingSingleNodeTestCase.kt +++ /dev/null @@ -1,503 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.transport - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope -import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexRequest -import org.opensearch.action.admin.indices.get.GetIndexRequestBuilder -import org.opensearch.action.admin.indices.get.GetIndexResponse -import org.opensearch.action.admin.indices.refresh.RefreshAction -import org.opensearch.action.admin.indices.refresh.RefreshRequest -import org.opensearch.action.support.IndicesOptions -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.AlertingPlugin -import org.opensearch.alerting.action.ExecuteMonitorAction -import org.opensearch.alerting.action.ExecuteMonitorRequest -import org.opensearch.alerting.action.ExecuteMonitorResponse -import org.opensearch.alerting.action.ExecuteWorkflowAction -import org.opensearch.alerting.action.ExecuteWorkflowRequest -import org.opensearch.alerting.action.ExecuteWorkflowResponse -import org.opensearch.alerting.alerts.AlertIndices -import org.opensearch.alerting.model.MonitorMetadata -import org.opensearch.alerting.model.WorkflowMetadata -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.xcontent.XContentFactory -import org.opensearch.common.xcontent.XContentType -import org.opensearch.common.xcontent.json.JsonXContent -import org.opensearch.commons.alerting.action.AlertingActions -import org.opensearch.commons.alerting.action.DeleteMonitorRequest -import org.opensearch.commons.alerting.action.DeleteWorkflowRequest -import org.opensearch.commons.alerting.action.GetFindingsRequest -import org.opensearch.commons.alerting.action.GetFindingsResponse -import org.opensearch.commons.alerting.action.GetMonitorRequest -import org.opensearch.commons.alerting.action.GetWorkflowAlertsRequest -import org.opensearch.commons.alerting.action.GetWorkflowAlertsResponse -import org.opensearch.commons.alerting.action.GetWorkflowRequest -import org.opensearch.commons.alerting.action.GetWorkflowResponse -import org.opensearch.commons.alerting.action.IndexMonitorRequest -import org.opensearch.commons.alerting.action.IndexMonitorResponse -import org.opensearch.commons.alerting.action.IndexWorkflowRequest -import org.opensearch.commons.alerting.action.IndexWorkflowResponse -import org.opensearch.commons.alerting.model.Alert -import org.opensearch.commons.alerting.model.Finding -import org.opensearch.commons.alerting.model.Monitor -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.commons.alerting.model.Table -import org.opensearch.commons.alerting.model.Workflow -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.index.IndexService -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.TermQueryBuilder -import org.opensearch.index.reindex.ReindexPlugin -import org.opensearch.index.seqno.SequenceNumbers -import org.opensearch.join.ParentJoinPlugin -import org.opensearch.painless.PainlessPlugin -import org.opensearch.plugins.Plugin -import org.opensearch.rest.RestRequest -import org.opensearch.script.mustache.MustachePlugin -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.fetch.subphase.FetchSourceContext -import org.opensearch.test.OpenSearchSingleNodeTestCase -import java.time.Instant -import java.time.ZonedDateTime -import java.time.format.DateTimeFormatter -import java.time.temporal.ChronoUnit -import java.util.Locale -import java.util.concurrent.TimeUnit - -/** - * A test that keep a singleton node started for all tests that can be used to get - * references to Guice injectors in unit tests. - */ -@ThreadLeakScope(ThreadLeakScope.Scope.NONE) -abstract class AlertingSingleNodeTestCase : OpenSearchSingleNodeTestCase() { - - protected val index: String = randomAlphaOfLength(10).lowercase(Locale.ROOT) - - override fun setUp() { - super.setUp() - createTestIndex() - } - - protected fun getAllIndicesFromPattern(pattern: String): List { - val getIndexResponse = ( - client().admin().indices().prepareGetIndex() - .setIndices(pattern) as GetIndexRequestBuilder - ).get() as GetIndexResponse - getIndexResponse - return getIndexResponse.indices().toList() - } - - protected fun executeMonitor(monitor: Monitor, id: String?, dryRun: Boolean = true): ExecuteMonitorResponse? { - val request = ExecuteMonitorRequest(dryRun, TimeValue(Instant.now().toEpochMilli()), id, monitor) - return client().execute(ExecuteMonitorAction.INSTANCE, request).get() - } - - protected fun insertSampleTimeSerializedData(index: String, data: List) { - data.forEachIndexed { i, value -> - val twoMinsAgo = ZonedDateTime.now().minus(2, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.MILLIS) - val testTime = DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(twoMinsAgo) - val testDoc = """ - { - "test_strict_date_time": "$testTime", - "test_field_1": "$value", - "number": "$i" - } - """.trimIndent() - // Indexing documents with deterministic doc id to allow for easy selected deletion during testing - indexDoc(index, (i + 1).toString(), testDoc) - } - } - - @Suppress("UNCHECKED_CAST") - fun Map.stringMap(key: String): Map? { - val map = this as Map> - return map[key] - } - - /** A test index that can be used across tests. Feel free to add new fields but don't remove any. */ - protected fun createTestIndex() { - val mapping = XContentFactory.jsonBuilder() - mapping.startObject() - .startObject("properties") - .startObject("test_strict_date_time") - .field("type", "date") - .field("format", "strict_date_time") - .endObject() - .startObject("test_field_1") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - - createIndex( - index, Settings.EMPTY, mapping - ) - } - - protected fun createTestIndex(index: String) { - val mapping = XContentFactory.jsonBuilder() - mapping.startObject() - .startObject("properties") - .startObject("test_strict_date_time") - .field("type", "date") - .field("format", "strict_date_time") - .endObject() - .startObject("test_field_1") - .field("type", "keyword") - .endObject() - .endObject() - .endObject() - - createIndex( - index, Settings.EMPTY, mapping - ) - } - - private fun createIndex( - index: String?, - settings: Settings?, - mappings: XContentBuilder?, - ): IndexService? { - val createIndexRequestBuilder = client().admin().indices().prepareCreate(index).setSettings(settings) - if (mappings != null) { - createIndexRequestBuilder.setMapping(mappings) - } - return this.createIndex(index, createIndexRequestBuilder) - } - - protected fun indexDoc(index: String, id: String, doc: String) { - client().prepareIndex(index).setId(id) - .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() - } - - protected fun assertIndexExists(index: String) { - val getIndexResponse = - client().admin().indices().getIndex( - GetIndexRequest().indices(index).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) - ).get() - assertTrue(getIndexResponse.indices.size > 0) - } - - protected fun assertIndexNotExists(index: String) { - val getIndexResponse = - client().admin().indices().getIndex( - GetIndexRequest().indices(index).indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_HIDDEN) - ).get() - assertFalse(getIndexResponse.indices.size > 0) - } - - protected fun assertAliasNotExists(alias: String) { - val aliasesResponse = client().admin().indices().getAliases(GetAliasesRequest()).get() - val foundAlias = aliasesResponse.aliases.values.forEach { - it.forEach { - if (it.alias == alias) { - fail("alias exists, but it shouldn't") - } - } - } - } - - protected fun assertAliasExists(alias: String) { - val aliasesResponse = client().admin().indices().getAliases(GetAliasesRequest()).get() - val foundAlias = aliasesResponse.aliases.values.forEach { - it.forEach { - if (it.alias == alias) { - return - } - } - } - fail("alias doesn't exists, but it should") - } - - protected fun createMonitor(monitor: Monitor): IndexMonitorResponse? { - val request = IndexMonitorRequest( - monitorId = Monitor.NO_ID, - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, - primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, - refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), - method = RestRequest.Method.POST, - monitor = monitor - ) - return client().execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, request).actionGet() - } - - protected fun updateMonitor(monitor: Monitor, monitorId: String): IndexMonitorResponse? { - val request = IndexMonitorRequest( - monitorId = monitorId, - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, - primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, - refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), - method = RestRequest.Method.PUT, - monitor = monitor - ) - return client().execute(AlertingActions.INDEX_MONITOR_ACTION_TYPE, request).actionGet() - } - - protected fun deleteMonitor(monitorId: String): Boolean { - client().execute( - AlertingActions.DELETE_MONITOR_ACTION_TYPE, DeleteMonitorRequest(monitorId, WriteRequest.RefreshPolicy.IMMEDIATE) - ).get() - return true - } - - protected fun searchAlerts( - monitorId: String, - indices: String = AlertIndices.ALERT_INDEX, - refresh: Boolean = true, - executionId: String? = null, - ): List { - try { - if (refresh) refreshIndex(indices) - } catch (e: Exception) { - logger.warn("Could not refresh index $indices because: ${e.message}") - return emptyList() - } - val ssb = SearchSourceBuilder() - ssb.version(true) - val bqb = BoolQueryBuilder() - bqb.must(TermQueryBuilder(Alert.MONITOR_ID_FIELD, monitorId)) - if (executionId.isNullOrEmpty() == false) { - bqb.must(TermQueryBuilder(Alert.EXECUTION_ID_FIELD, executionId)) - } - ssb.query(bqb) - val searchResponse = client().prepareSearch(indices).setRouting(monitorId).setSource(ssb).get() - - return searchResponse.hits.hits.map { - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } - Alert.parse(xcp, it.id, it.version) - } - } - - protected fun getWorkflowAlerts( - workflowId: String, - getAssociatedAlerts: Boolean? = true, - alertState: Alert.State? = Alert.State.ACTIVE, - alertIndex: String? = "", - associatedAlertsIndex: String? = "", - alertIds: List? = emptyList(), - table: Table? = Table("asc", "monitor_id", null, 100, 0, null), - ): GetWorkflowAlertsResponse { - return client().execute( - AlertingActions.GET_WORKFLOW_ALERTS_ACTION_TYPE, - GetWorkflowAlertsRequest( - table = table!!, - severityLevel = "ALL", - alertState = alertState!!.name, - alertIndex = alertIndex, - associatedAlertsIndex = associatedAlertsIndex, - monitorIds = emptyList(), - workflowIds = listOf(workflowId), - alertIds = alertIds, - getAssociatedAlerts = getAssociatedAlerts!! - ) - ).get() - } - - protected fun refreshIndex(index: String) { - client().execute(RefreshAction.INSTANCE, RefreshRequest(index)).get() - } - - protected fun searchFindings( - id: String, - indices: String = AlertIndices.ALL_FINDING_INDEX_PATTERN, - refresh: Boolean = true, - ): List { - if (refresh) refreshIndex(indices) - - val ssb = SearchSourceBuilder() - ssb.version(true) - ssb.query(TermQueryBuilder(Alert.MONITOR_ID_FIELD, id)) - val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() - - return searchResponse.hits.hits.map { - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } - Finding.parse(xcp) - }.filter { finding -> finding.monitorId == id } - } - - protected fun getFindings( - findingId: String, - monitorId: String?, - findingIndexName: String?, - ): List { - - val getFindingsRequest = GetFindingsRequest( - findingId, - Table("asc", "monitor_id", null, 100, 0, null), - monitorId, - findingIndexName - ) - val getFindingsResponse: GetFindingsResponse = client().execute(AlertingActions.GET_FINDINGS_ACTION_TYPE, getFindingsRequest).get() - - return getFindingsResponse.findings.map { it.finding }.toList() - } - - protected fun getMonitorResponse( - monitorId: String, - version: Long = 1L, - fetchSourceContext: FetchSourceContext = FetchSourceContext.FETCH_SOURCE, - ) = client().execute( - AlertingActions.GET_MONITOR_ACTION_TYPE, - GetMonitorRequest(monitorId, version, RestRequest.Method.GET, fetchSourceContext) - ).get() - - override fun getPlugins(): List> { - return listOf( - AlertingPlugin::class.java, - ReindexPlugin::class.java, - MustachePlugin::class.java, - PainlessPlugin::class.java, - ParentJoinPlugin::class.java - ) - } - - protected fun deleteIndex(index: String) { - val response = client().admin().indices().delete(DeleteIndexRequest(index)).get() - assertTrue("Unable to delete index", response.isAcknowledged()) - } - - override fun resetNodeAfterTest(): Boolean { - return false - } - - // merged WorkflowSingleNodeTestCase with this class as we are seeing test setup failures - // when multiple test classes implement AlertingSingleNodeTestCase or its child class - protected fun searchWorkflow( - id: String, - indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, - refresh: Boolean = true, - ): Workflow? { - try { - if (refresh) refreshIndex(indices) - } catch (e: Exception) { - logger.warn("Could not refresh index $indices because: ${e.message}") - return null - } - val ssb = SearchSourceBuilder() - ssb.version(true) - ssb.query(TermQueryBuilder("_id", id)) - val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() - - return searchResponse.hits.hits.map { it -> - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } - lateinit var workflow: Workflow - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - xcp.nextToken() - when (xcp.currentName()) { - "workflow" -> workflow = Workflow.parse(xcp) - } - } - workflow.copy(id = it.id, version = it.version) - }.first() - } - - protected fun searchWorkflowMetadata( - id: String, - indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, - refresh: Boolean = true, - ): WorkflowMetadata? { - try { - if (refresh) refreshIndex(indices) - } catch (e: Exception) { - logger.warn("Could not refresh index $indices because: ${e.message}") - return null - } - val ssb = SearchSourceBuilder() - ssb.version(true) - ssb.query(TermQueryBuilder("workflow_metadata.workflow_id", id)) - val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() - - return searchResponse.hits.hits.map { it -> - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } - lateinit var workflowMetadata: WorkflowMetadata - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - xcp.nextToken() - when (xcp.currentName()) { - "workflow_metadata" -> workflowMetadata = WorkflowMetadata.parse(xcp) - } - } - workflowMetadata.copy(id = it.id) - }.first() - } - - protected fun searchMonitorMetadata( - id: String, - indices: String = ScheduledJob.SCHEDULED_JOBS_INDEX, - refresh: Boolean = true, - ): MonitorMetadata? { - try { - if (refresh) refreshIndex(indices) - } catch (e: Exception) { - logger.warn("Could not refresh index $indices because: ${e.message}") - return null - } - val ssb = SearchSourceBuilder() - ssb.version(true) - ssb.query(TermQueryBuilder("_id", id)) - val searchResponse = client().prepareSearch(indices).setRouting(id).setSource(ssb).get() - - return searchResponse.hits.hits.map { it -> - val xcp = createParser(JsonXContent.jsonXContent, it.sourceRef).also { it.nextToken() } - lateinit var monitorMetadata: MonitorMetadata - while (xcp.nextToken() != XContentParser.Token.END_OBJECT) { - xcp.nextToken() - when (xcp.currentName()) { - "metadata" -> monitorMetadata = MonitorMetadata.parse(xcp) - } - } - monitorMetadata.copy(id = it.id) - }.first() - } - - protected fun upsertWorkflow( - workflow: Workflow, - id: String = Workflow.NO_ID, - method: RestRequest.Method = RestRequest.Method.POST, - ): IndexWorkflowResponse? { - val request = IndexWorkflowRequest( - workflowId = id, - seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO, - primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM, - refreshPolicy = WriteRequest.RefreshPolicy.parse("true"), - method = method, - workflow = workflow - ) - - return client().execute(AlertingActions.INDEX_WORKFLOW_ACTION_TYPE, request).actionGet() - } - - protected fun getWorkflowById(id: String): GetWorkflowResponse { - return client().execute( - AlertingActions.GET_WORKFLOW_ACTION_TYPE, - GetWorkflowRequest(id, RestRequest.Method.GET) - ).get() - } - - protected fun deleteWorkflow(workflowId: String, deleteDelegateMonitors: Boolean? = null) { - client().execute( - AlertingActions.DELETE_WORKFLOW_ACTION_TYPE, - DeleteWorkflowRequest(workflowId, deleteDelegateMonitors) - ).get() - } - - protected fun executeWorkflow(workflow: Workflow? = null, id: String? = null, dryRun: Boolean = true): ExecuteWorkflowResponse? { - val request = ExecuteWorkflowRequest(dryRun, TimeValue(Instant.now().toEpochMilli()), id, workflow) - return client().execute(ExecuteWorkflowAction.INSTANCE, request).get() - } - - override fun nodeSettings(): Settings { - return Settings.builder() - .put(super.nodeSettings()) - .put("opendistro.scheduled_jobs.sweeper.period", TimeValue(5, TimeUnit.SECONDS)) - .put("opendistro.scheduled_jobs.enabled", true) - .build() - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt b/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt deleted file mode 100644 index d3f4613fe..000000000 --- a/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionParserTests.kt +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggeraction - -import org.junit.Assert -import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser -import org.opensearch.test.OpenSearchTestCase - -class TriggerExpressionParserTests : OpenSearchTestCase() { - - fun `test trigger expression posix parsing simple AND`() { - val eqString = "(query[name=sigma-123] && query[name=sigma-456])" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple AND`() { - val eqString = "(query[name=sigma-123] && query[name=sigma-456]) && query[name=sigma-789]" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && query[name=sigma-789] && ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple AND with parenthesis`() { - val eqString = "(query[name=sigma-123] && query[name=sigma-456]) && (query[name=sigma-789] && query[name=id-2aw34])" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals( - "query[name=sigma-123] query[name=sigma-456] && query[name=sigma-789] query[name=id-2aw34] && && ", - equation.toString() - ) - } - - fun `test trigger expression posix parsing simple OR`() { - val eqString = "(query[name=sigma-123] || query[name=sigma-456])" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple OR`() { - val eqString = "(query[name=sigma-123] || query[name=sigma-456]) || query[name=sigma-789]" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || query[name=sigma-789] || ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple OR with parenthesis`() { - val eqString = "(query[name=sigma-123] || query[name=sigma-456]) || (query[name=sigma-789] || query[name=id-2aw34])" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals( - "query[name=sigma-123] query[name=sigma-456] || query[name=sigma-789] query[name=id-2aw34] || || ", - equation.toString() - ) - } - - fun `test trigger expression posix parsing simple NOT`() { - val eqString = "(query[name=sigma-123] || !query[name=sigma-456])" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] ! || ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple NOT`() { - val eqString = "(query[name=sigma-123] && !query[tag=tag-456]) && !(query[name=sigma-789])" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals("query[name=sigma-123] query[tag=tag-456] ! && query[name=sigma-789] ! && ", equation.toString()) - } - - fun `test trigger expression posix parsing multiple operators with parenthesis`() { - val eqString = "(query[name=sigma-123] && query[tag=sev1]) || !(!query[name=sigma-789] || query[name=id-2aw34])" - val equation = TriggerExpressionParser(eqString).parse() - Assert.assertEquals( - "query[name=sigma-123] query[tag=sev1] && query[name=sigma-789] ! query[name=id-2aw34] || ! || ", - equation.toString() - ) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt b/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt deleted file mode 100644 index 1dd19d9d1..000000000 --- a/alerting/bin/test/org/opensearch/alerting/triggeraction/TriggerExpressionResolverTests.kt +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.triggeraction - -import org.junit.Assert -import org.opensearch.alerting.triggercondition.parsers.TriggerExpressionParser -import org.opensearch.commons.alerting.model.DocLevelQuery -import org.opensearch.test.OpenSearchTestCase - -class TriggerExpressionResolverTests : OpenSearchTestCase() { - - fun `test trigger expression evaluation simple AND`() { - val eqString = "(query[name=sigma-123] && query[name=sigma-456])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] && ", equation.toString()) - Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation simple AND scenario2`() { - val eqString = "(query[name=sigma-123] && query[id=id1456])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") - queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - Assert.assertEquals("query[name=sigma-123] query[id=id1456] && ", equation.toString()) - Assert.assertEquals(mutableSetOf("3"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation simple AND scenario3`() { - val eqString = "(query[name=sigma-123] && query[tag=sev2])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "8", "7") - queryToDocIds[DocLevelQuery("", "", listOf(), "", mutableListOf("tag=sev2"))] = mutableSetOf("1", "2", "3") - Assert.assertEquals("query[name=sigma-123] query[tag=sev2] && ", equation.toString()) - Assert.assertEquals(emptySet(), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation simple OR`() { - val eqString = "(query[name=sigma-123] || query[name=sigma-456])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] || ", equation.toString()) - Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation simple OR scenario2`() { - val eqString = "(query[name=sigma-123] || query[id=id1456])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") - queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - Assert.assertEquals("query[name=sigma-123] query[id=id1456] || ", equation.toString()) - Assert.assertEquals(mutableSetOf("6", "3", "7", "1", "2", "3"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation simple OR scenario3`() { - val eqString = "(query[name=sigma-123] || query[tag=sev2])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "8", "7") - queryToDocIds[DocLevelQuery("", "", listOf(), "", mutableListOf("tag=sev2"))] = emptySet() - Assert.assertEquals("query[name=sigma-123] query[tag=sev2] || ", equation.toString()) - Assert.assertEquals(mutableSetOf("6", "8", "7"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation simple NOT`() { - val eqString = "!(query[name=sigma-456])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("4", "5", "6") - Assert.assertEquals("query[name=sigma-456] ! ", equation.toString()) - Assert.assertEquals(mutableSetOf("1", "2", "3"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation AND with NOT`() { - val eqString = "(query[name=sigma-123] && !query[name=sigma-456])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3", "11") - queryToDocIds[DocLevelQuery("", "sigma-456", listOf(), "", emptyList())] = mutableSetOf("3", "4", "5") - queryToDocIds[DocLevelQuery("id_new", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") - Assert.assertEquals("query[name=sigma-123] query[name=sigma-456] ! && ", equation.toString()) - Assert.assertEquals(mutableSetOf("1", "2", "11"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation OR with NOT`() { - val eqString = "(query[name=sigma-123] || !query[id=id1456])" - val equation = TriggerExpressionParser(eqString).parse() - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("6", "3", "7") - queryToDocIds[DocLevelQuery("id1456", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "15") - queryToDocIds[DocLevelQuery("id_new", "", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") - Assert.assertEquals("query[name=sigma-123] query[id=id1456] ! || ", equation.toString()) - Assert.assertEquals(mutableSetOf("6", "3", "7", "13"), equation.evaluate(queryToDocIds)) - } - - fun `test trigger expression evaluation with multiple operators with parenthesis`() { - val eqString = "(query[name=sigma-123] && query[tag=sev1]) || !(!query[name=sigma-789] || query[id=id-2aw34])" - val equation = TriggerExpressionParser(eqString).parse() - - val queryToDocIds = mutableMapOf>() - queryToDocIds[DocLevelQuery("", "sigma-123", listOf(), "", emptyList())] = mutableSetOf("1", "2", "3") - queryToDocIds[DocLevelQuery("id_random1", "", listOf(), "", mutableListOf("sev1"))] = mutableSetOf("2", "3", "4") - queryToDocIds[DocLevelQuery("", "sigma-789", listOf(), "", emptyList())] = mutableSetOf("11", "12", "13") - queryToDocIds[DocLevelQuery("id-2aw34", "", listOf(), "", emptyList())] = mutableSetOf("13", "14", "15") - - Assert.assertEquals( - "query[name=sigma-123] query[tag=sev1] && query[name=sigma-789] ! query[id=id-2aw34] || ! || ", - equation.toString() - ) - - Assert.assertEquals(mutableSetOf("2", "3", "11", "12"), equation.evaluate(queryToDocIds)) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt b/alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt deleted file mode 100644 index f596f1b92..000000000 --- a/alerting/bin/test/org/opensearch/alerting/util/AggregationQueryRewriterTests.kt +++ /dev/null @@ -1,335 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.junit.Assert -import org.opensearch.action.search.SearchResponse -import org.opensearch.alerting.model.InputRunResults -import org.opensearch.alerting.model.TriggerAfterKey -import org.opensearch.alerting.randomBucketLevelTrigger -import org.opensearch.alerting.randomBucketSelectorExtAggregationBuilder -import org.opensearch.alerting.randomQueryLevelTrigger -import org.opensearch.cluster.ClusterModule -import org.opensearch.common.CheckedFunction -import org.opensearch.common.xcontent.json.JsonXContent -import org.opensearch.commons.alerting.model.Trigger -import org.opensearch.core.ParseField -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.search.aggregations.Aggregation -import org.opensearch.search.aggregations.AggregationBuilder -import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder -import org.opensearch.search.aggregations.bucket.composite.ParsedComposite -import org.opensearch.search.aggregations.bucket.composite.TermsValuesSourceBuilder -import org.opensearch.search.aggregations.bucket.terms.TermsAggregationBuilder -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase -import java.io.IOException - -class AggregationQueryRewriterTests : OpenSearchTestCase() { - - fun `test RewriteQuery empty previous result`() { - val triggers: MutableList = mutableListOf() - for (i in 0 until 10) { - triggers.add(randomBucketLevelTrigger()) - } - val queryBuilder = SearchSourceBuilder() - val termAgg: AggregationBuilder = TermsAggregationBuilder("testPath").field("sports") - queryBuilder.aggregation(termAgg) - val prevResult = null - AggregationQueryRewriter.rewriteQuery(queryBuilder, prevResult, triggers) - Assert.assertEquals(queryBuilder.aggregations().pipelineAggregatorFactories.size, 10) - } - - fun `skip test RewriteQuery with non-empty previous result`() { - val triggers: MutableList = mutableListOf() - for (i in 0 until 10) { - triggers.add(randomBucketLevelTrigger()) - } - val queryBuilder = SearchSourceBuilder() - val termAgg: AggregationBuilder = CompositeAggregationBuilder( - "testPath", - listOf(TermsValuesSourceBuilder("k1"), TermsValuesSourceBuilder("k2")) - ) - queryBuilder.aggregation(termAgg) - val aggTriggersAfterKey = mutableMapOf() - for (trigger in triggers) { - aggTriggersAfterKey[trigger.id] = TriggerAfterKey(hashMapOf(Pair("k1", "v1"), Pair("k2", "v2")), false) - } - val prevResult = InputRunResults(emptyList(), null, aggTriggersAfterKey) - AggregationQueryRewriter.rewriteQuery(queryBuilder, prevResult, triggers) - Assert.assertEquals(queryBuilder.aggregations().pipelineAggregatorFactories.size, 10) - queryBuilder.aggregations().aggregatorFactories.forEach { - if (it.name.equals("testPath")) { -// val compAgg = it as CompositeAggregationBuilder - // TODO: This is calling forbidden API and causing build failures, need to find an alternative - // instead of trying to access private member variables -// val afterField = CompositeAggregationBuilder::class.java.getDeclaredField("after") -// afterField.isAccessible = true -// Assert.assertEquals(afterField.get(compAgg), hashMapOf(Pair("k1", "v1"), Pair("k2", "v2"))) - } - } - } - - fun `test RewriteQuery with non aggregation trigger`() { - val triggers: MutableList = mutableListOf() - for (i in 0 until 10) { - triggers.add(randomQueryLevelTrigger()) - } - val queryBuilder = SearchSourceBuilder() - val termAgg: AggregationBuilder = TermsAggregationBuilder("testPath").field("sports") - queryBuilder.aggregation(termAgg) - val prevResult = null - AggregationQueryRewriter.rewriteQuery(queryBuilder, prevResult, triggers) - Assert.assertEquals(queryBuilder.aggregations().pipelineAggregatorFactories.size, 0) - } - - fun `test after keys from search response`() { - val responseContent = """ - { - "took" : 97, - "timed_out" : false, - "_shards" : { - "total" : 3, - "successful" : 3, - "skipped" : 0, - "failed" : 0 - }, - "hits" : { - "total" : { - "value" : 20, - "relation" : "eq" - }, - "max_score" : null, - "hits" : [ ] - }, - "aggregations" : { - "composite#testPath" : { - "after_key" : { - "sport" : "Basketball" - }, - "buckets" : [ - { - "key" : { - "sport" : "Basketball" - }, - "doc_count" : 5 - } - ] - } - } - } - """.trimIndent() - - val aggTriggers: MutableList = mutableListOf(randomBucketLevelTrigger()) - val tradTriggers: MutableList = mutableListOf(randomQueryLevelTrigger()) - - val searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, responseContent)) - val afterKeys = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, aggTriggers, null) - Assert.assertEquals(afterKeys[aggTriggers[0].id]?.afterKey, hashMapOf(Pair("sport", "Basketball"))) - - val afterKeys2 = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, tradTriggers, null) - Assert.assertEquals(afterKeys2.size, 0) - } - - fun `test after keys from search responses for multiple bucket paths and different page counts`() { - val firstResponseContent = """ - { - "took" : 0, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 - }, - "hits" : { - "total" : { - "value" : 4675, - "relation" : "eq" - }, - "max_score" : null, - "hits" : [ ] - }, - "aggregations" : { - "composite2#smallerResults" : { - "after_key" : { - "category" : "Women's Shoes" - }, - "buckets" : [ - { - "key" : { - "category" : "Women's Shoes" - }, - "doc_count" : 1136 - } - ] - }, - "composite3#largerResults" : { - "after_key" : { - "user" : "abigail" - }, - "buckets" : [ - { - "key" : { - "user" : "abd" - }, - "doc_count" : 188 - }, - { - "key" : { - "user" : "abigail" - }, - "doc_count" : 128 - } - ] - } - } - } - """.trimIndent() - - val secondResponseContent = """ - { - "took" : 0, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 - }, - "hits" : { - "total" : { - "value" : 4675, - "relation" : "eq" - }, - "max_score" : null, - "hits" : [ ] - }, - "aggregations" : { - "composite2#smallerResults" : { - "buckets" : [ ] - }, - "composite3#largerResults" : { - "after_key" : { - "user" : "boris" - }, - "buckets" : [ - { - "key" : { - "user" : "betty" - }, - "doc_count" : 148 - }, - { - "key" : { - "user" : "boris" - }, - "doc_count" : 74 - } - ] - } - } - } - """.trimIndent() - - val thirdResponseContent = """ - { - "took" : 0, - "timed_out" : false, - "_shards" : { - "total" : 1, - "successful" : 1, - "skipped" : 0, - "failed" : 0 - }, - "hits" : { - "total" : { - "value" : 4675, - "relation" : "eq" - }, - "max_score" : null, - "hits" : [ ] - }, - "aggregations" : { - "composite2#smallerResults" : { - "buckets" : [ ] - }, - "composite3#largerResults" : { - "buckets" : [ ] - } - } - } - """.trimIndent() - - val bucketLevelTriggers: MutableList = mutableListOf( - randomBucketLevelTrigger(bucketSelector = randomBucketSelectorExtAggregationBuilder(parentBucketPath = "smallerResults")), - randomBucketLevelTrigger(bucketSelector = randomBucketSelectorExtAggregationBuilder(parentBucketPath = "largerResults")) - ) - - var searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, firstResponseContent)) - val afterKeys = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, bucketLevelTriggers, null) - assertEquals(hashMapOf(Pair("category", "Women's Shoes")), afterKeys[bucketLevelTriggers[0].id]?.afterKey) - assertEquals(false, afterKeys[bucketLevelTriggers[0].id]?.lastPage) - assertEquals(hashMapOf(Pair("user", "abigail")), afterKeys[bucketLevelTriggers[1].id]?.afterKey) - assertEquals(false, afterKeys[bucketLevelTriggers[1].id]?.lastPage) - - searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, secondResponseContent)) - val afterKeys2 = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, bucketLevelTriggers, afterKeys) - assertEquals(hashMapOf(Pair("category", "Women's Shoes")), afterKeys2[bucketLevelTriggers[0].id]?.afterKey) - assertEquals(true, afterKeys2[bucketLevelTriggers[0].id]?.lastPage) - assertEquals(hashMapOf(Pair("user", "boris")), afterKeys2[bucketLevelTriggers[1].id]?.afterKey) - assertEquals(false, afterKeys2[bucketLevelTriggers[1].id]?.lastPage) - - searchResponse = SearchResponse.fromXContent(createParser(JsonXContent.jsonXContent, thirdResponseContent)) - val afterKeys3 = AggregationQueryRewriter.getAfterKeysFromSearchResponse(searchResponse, bucketLevelTriggers, afterKeys2) - assertEquals(hashMapOf(Pair("category", "Women's Shoes")), afterKeys3[bucketLevelTriggers[0].id]?.afterKey) - assertEquals(true, afterKeys3[bucketLevelTriggers[0].id]?.lastPage) - assertEquals(hashMapOf(Pair("user", "boris")), afterKeys3[bucketLevelTriggers[1].id]?.afterKey) - assertEquals(true, afterKeys3[bucketLevelTriggers[1].id]?.lastPage) - } - - override fun xContentRegistry(): NamedXContentRegistry { - val entries = ClusterModule.getNamedXWriteables() - entries.add( - NamedXContentRegistry.Entry( - Aggregation::class.java, - ParseField(CompositeAggregationBuilder.NAME), - CheckedFunction { parser: XContentParser? -> - ParsedComposite.fromXContent( - parser, - "testPath" - ) - } - ) - ) - entries.add( - NamedXContentRegistry.Entry( - Aggregation::class.java, - ParseField(CompositeAggregationBuilder.NAME + "2"), - CheckedFunction { parser: XContentParser? -> - ParsedComposite.fromXContent( - parser, - "smallerResults" - ) - } - ) - ) - entries.add( - NamedXContentRegistry.Entry( - Aggregation::class.java, - ParseField(CompositeAggregationBuilder.NAME + "3"), - CheckedFunction { parser: XContentParser? -> - ParsedComposite.fromXContent( - parser, - "largerResults" - ) - } - ) - ) - return NamedXContentRegistry(entries) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt b/alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt deleted file mode 100644 index 3555a5c38..000000000 --- a/alerting/bin/test/org/opensearch/alerting/util/AnomalyDetectionUtilsTests.kt +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.opensearch.alerting.ANOMALY_RESULT_INDEX -import org.opensearch.alerting.randomQueryLevelMonitor -import org.opensearch.commons.alerting.model.Input -import org.opensearch.commons.alerting.model.SearchInput -import org.opensearch.commons.authuser.User -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.test.OpenSearchTestCase - -class AnomalyDetectionUtilsTests : OpenSearchTestCase() { - - fun `test is ad monitor`() { - val monitor = randomQueryLevelMonitor( - inputs = listOf( - SearchInput( - listOf(ANOMALY_RESULT_INDEX), - SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - assertTrue(isADMonitor(monitor)) - } - - fun `test not ad monitor if monitor have no inputs`() { - val monitor = randomQueryLevelMonitor( - inputs = listOf() - ) - assertFalse(isADMonitor(monitor)) - } - - fun `test not ad monitor if monitor input is not search input`() { - val monitor = randomQueryLevelMonitor( - inputs = listOf(object : Input { - override fun name(): String { - TODO("Not yet implemented") - } - - override fun writeTo(out: StreamOutput?) { - TODO("Not yet implemented") - } - - override fun toXContent(builder: XContentBuilder?, params: ToXContent.Params?): XContentBuilder { - TODO("Not yet implemented") - } - }) - ) - assertFalse(isADMonitor(monitor)) - } - - fun `test not ad monitor if monitor input has more than 1 indices`() { - val monitor = randomQueryLevelMonitor( - inputs = listOf( - SearchInput( - listOf(randomAlphaOfLength(5), randomAlphaOfLength(5)), - SearchSourceBuilder().query(QueryBuilders.matchAllQuery()) - ) - ) - ) - assertFalse(isADMonitor(monitor)) - } - - fun `test not ad monitor if monitor input's index name is not AD result index`() { - val monitor = randomQueryLevelMonitor( - inputs = listOf(SearchInput(listOf(randomAlphaOfLength(5)), SearchSourceBuilder().query(QueryBuilders.matchAllQuery()))) - ) - assertFalse(isADMonitor(monitor)) - } - - fun `test add user role filter with null user`() { - val searchSourceBuilder = SearchSourceBuilder() - addUserBackendRolesFilter(null, searchSourceBuilder) - assertEquals( - "{\"query\":{\"bool\":{\"must_not\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + - "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true," + - "\"boost\":1.0}}}", - searchSourceBuilder.toString() - ) - } - - fun `test add user role filter with user with empty name`() { - val searchSourceBuilder = SearchSourceBuilder() - addUserBackendRolesFilter(User("", mutableListOf(), mutableListOf(), mutableListOf()), searchSourceBuilder) - assertEquals( - "{\"query\":{\"bool\":{\"must_not\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + - "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true," + - "\"boost\":1.0}}}", - searchSourceBuilder.toString() - ) - } - - fun `test add user role filter with null user backend role`() { - val searchSourceBuilder = SearchSourceBuilder() - addUserBackendRolesFilter( - User( - randomAlphaOfLength(5), - null, - listOf(randomAlphaOfLength(5)), - listOf(randomAlphaOfLength(5)) - ), - searchSourceBuilder - ) - assertEquals( - "{\"query\":{\"bool\":{\"must\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + - "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"must_not\":[{\"nested\":" + - "{\"query\":{\"exists\":{\"field\":\"user.backend_roles.keyword\",\"boost\":1.0}},\"path\":\"user\",\"ignore_unmapped\"" + - ":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}}}", - searchSourceBuilder.toString() - ) - } - - fun `test add user role filter with empty user backend role`() { - val searchSourceBuilder = SearchSourceBuilder() - addUserBackendRolesFilter( - User( - randomAlphaOfLength(5), - listOf(), - listOf(randomAlphaOfLength(5)), - listOf(randomAlphaOfLength(5)) - ), - searchSourceBuilder - ) - assertEquals( - "{\"query\":{\"bool\":{\"must\":[{\"nested\":{\"query\":{\"exists\":{\"field\":\"user\",\"boost\":1.0}}," + - "\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"must_not\":[{\"nested\":" + - "{\"query\":{\"exists\":{\"field\":\"user.backend_roles.keyword\",\"boost\":1.0}},\"path\":\"user\",\"ignore_unmapped\"" + - ":false,\"score_mode\":\"none\",\"boost\":1.0}}],\"adjust_pure_negative\":true,\"boost\":1.0}}}", - searchSourceBuilder.toString() - ) - } - - fun `test add user role filter with normal user backend role`() { - val searchSourceBuilder = SearchSourceBuilder() - val backendRole1 = randomAlphaOfLength(5) - val backendRole2 = randomAlphaOfLength(5) - addUserBackendRolesFilter( - User( - randomAlphaOfLength(5), - listOf(backendRole1, backendRole2), - listOf(randomAlphaOfLength(5)), - listOf(randomAlphaOfLength(5)) - ), - searchSourceBuilder - ) - assertEquals( - "{\"query\":{\"bool\":{\"must\":[{\"nested\":{\"query\":{\"terms\":{\"user.backend_roles.keyword\":" + - "[\"$backendRole1\",\"$backendRole2\"]," + - "\"boost\":1.0}},\"path\":\"user\",\"ignore_unmapped\":false,\"score_mode\":\"none\",\"boost\":1.0}}]," + - "\"adjust_pure_negative\":true,\"boost\":1.0}}}", - searchSourceBuilder.toString() - ) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt b/alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt deleted file mode 100644 index e4db20639..000000000 --- a/alerting/bin/test/org/opensearch/alerting/util/IndexUtilsTests.kt +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util - -import org.opensearch.alerting.parser -import org.opensearch.cluster.metadata.IndexMetadata -import org.opensearch.test.OpenSearchTestCase -import java.lang.NumberFormatException -import kotlin.test.assertFailsWith - -class IndexUtilsTests : OpenSearchTestCase() { - - fun `test get schema version`() { - val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"schema_version\": 1}}" - - val schemaVersion = IndexUtils.getSchemaVersion(message) - assertEquals(1, schemaVersion) - } - - fun `test get schema version without _meta`() { - val message = "{\"user\":{ \"name\":\"test\"}}" - - val schemaVersion = IndexUtils.getSchemaVersion(message) - assertEquals(0, schemaVersion) - } - - fun `test get schema version without schema_version`() { - val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"test\": 1}}" - - val schemaVersion = IndexUtils.getSchemaVersion(message) - assertEquals(0, schemaVersion) - } - - fun `test get schema version with negative schema_version`() { - val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"schema_version\": -1}}" - - assertFailsWith(IllegalArgumentException::class, "Expected IllegalArgumentException") { - IndexUtils.getSchemaVersion(message) - } - } - - fun `test get schema version with wrong schema_version`() { - val message = "{\"user\":{ \"name\":\"test\"},\"_meta\":{\"schema_version\": \"wrong\"}}" - - assertFailsWith(NumberFormatException::class, "Expected NumberFormatException") { - IndexUtils.getSchemaVersion(message) - } - } - - fun `test should update index without original version`() { - val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + - "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + - "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":123," + - "\"settings_version\":123,\"mappings\":{\"_doc\":{\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}" - val newMapping = "{\"_meta\":{\"schema_version\":10},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" - val index: IndexMetadata = IndexMetadata.fromXContent(parser(indexContent)) - - val shouldUpdateIndex = IndexUtils.shouldUpdateIndex(index, newMapping) - assertTrue(shouldUpdateIndex) - } - - fun `test should update index with lagged version`() { - val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + - "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + - "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":123," + - "\"settings_version\":123,\"mappings\":{\"_doc\":{\"_meta\":{\"schema_version\":1},\"properties\":" + - "{\"name\":{\"type\":\"keyword\"}}}}}}" - val newMapping = "{\"_meta\":{\"schema_version\":10},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" - val index: IndexMetadata = IndexMetadata.fromXContent(parser(indexContent)) - - val shouldUpdateIndex = IndexUtils.shouldUpdateIndex(index, newMapping) - assertTrue(shouldUpdateIndex) - } - - fun `test should update index with same version`() { - val indexContent = "{\"testIndex\":{\"settings\":{\"index\":{\"creation_date\":\"1558407515699\"," + - "\"number_of_shards\":\"1\",\"number_of_replicas\":\"1\",\"uuid\":\"t-VBBW6aR6KpJ3XP5iISOA\"," + - "\"version\":{\"created\":\"6040399\"},\"provided_name\":\"data_test\"}},\"mapping_version\":\"1\"," + - "\"settings_version\":\"1\",\"aliases_version\":\"1\",\"mappings\":" + - "{\"_doc\":{\"_meta\":{\"schema_version\":1},\"properties\":{\"name\":{\"type\":\"keyword\"}}}}}}" - val newMapping = "{\"_meta\":{\"schema_version\":1},\"properties\":{\"name\":{\"type\":\"keyword\"}}}" - val xContentParser = parser(indexContent) - val index: IndexMetadata = IndexMetadata.fromXContent(xContentParser) - - val shouldUpdateIndex = IndexUtils.shouldUpdateIndex(index, newMapping) - assertFalse(shouldUpdateIndex) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt deleted file mode 100644 index 9712b4213..000000000 --- a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatIndicesWrappersIT.kt +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.clusterMetricsMonitorHelpers - -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.randomClusterMetricsInput -import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatIndicesResponseWrapper.Companion.WRAPPER_FIELD -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ClusterMetricsInput -import org.opensearch.core.common.Strings -import org.opensearch.test.OpenSearchSingleNodeTestCase - -class CatIndicesWrappersIT : OpenSearchSingleNodeTestCase() { - private val path = ClusterMetricsInput.ClusterMetricType.CAT_INDICES.defaultPath - - fun `test CatIndicesRequestWrapper validate valid pathParams`() { - // GIVEN - val pathParams = "index1,index-name-2,index-3" - - // WHEN - val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) - - // THEN - assertEquals(3, requestWrapper.clusterHealthRequest.indices().size) - assertEquals(3, requestWrapper.clusterStateRequest.indices().size) - assertEquals(3, requestWrapper.indexSettingsRequest.indices().size) - assertEquals(3, requestWrapper.indicesStatsRequest.indices().size) - } - - fun `test CatIndicesRequestWrapper validate without providing pathParams`() { - // GIVEN & WHEN - val requestWrapper = CatIndicesRequestWrapper() - - // THEN - assertNull(requestWrapper.clusterHealthRequest.indices()) - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) - assertNull(requestWrapper.indicesStatsRequest.indices()) - } - - fun `test CatIndicesRequestWrapper validate blank pathParams`() { - // GIVEN - val pathParams = " " - - // WHEN - val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) - - // THEN - assertNull(requestWrapper.clusterHealthRequest.indices()) - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) - assertNull(requestWrapper.indicesStatsRequest.indices()) - } - - fun `test CatIndicesRequestWrapper validate empty pathParams`() { - // GIVEN - val pathParams = "" - - // WHEN - val requestWrapper = CatIndicesRequestWrapper(pathParams = pathParams) - - // THEN - assertNull(requestWrapper.clusterHealthRequest.indices()) - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.indexSettingsRequest.indices()) - assertNull(requestWrapper.indicesStatsRequest.indices()) - } - - fun `test CatIndicesRequestWrapper validate invalid pathParams`() { - // GIVEN - val pathParams = "_index1,index^2" - - // WHEN & THEN - assertThrows(IllegalArgumentException::class.java) { CatIndicesRequestWrapper(pathParams = pathParams) } - } - - suspend fun `test CatIndicesResponseWrapper returns with only indices in pathParams`() { - // GIVEN - val testIndices = (1..5).map { - "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) - }.toMap() - - testIndices.forEach { (indexName, docCount) -> - repeat(docCount) { - val docId = (it + 1).toString() - val docMessage = """ - { - "message": "$indexName doc num $docId" - } - """.trimIndent() - indexDoc(indexName, docId, docMessage) - } - } - - /* - Creating a subset of indices to use for the pathParams to test that all indices on the cluster ARE NOT returned. - */ - val pathParamsIndices = testIndices.keys.toList().subList(1, testIndices.size - 1) - val pathParams = pathParamsIndices.joinToString(",") - val input = randomClusterMetricsInput(path = path, pathParams = pathParams) - - // WHEN - val responseMap = (executeTransportAction(input, client())).toMap() - - // THEN - val shards = responseMap[WRAPPER_FIELD] as List> - val returnedIndices = - shards.map { (it[CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD] as String) to it }.toMap() - - assertEquals(pathParamsIndices.size, returnedIndices.keys.size) - testIndices.forEach { (indexName, docCount) -> - if (pathParamsIndices.contains(indexName)) { - assertEquals( - indexName, - returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD) as String - ) - assertEquals( - docCount.toString(), - returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.DOCS_COUNT_FIELD) as String - ) - } - } - } - - suspend fun `test CatIndicesResponseWrapper returns with all indices when empty pathParams`() { - // GIVEN - val testIndices = (1..5).map { - "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) - }.toMap() - - testIndices.forEach { (indexName, docCount) -> - repeat(docCount) { - val docId = (it + 1).toString() - val docMessage = """ - { - "message": "$indexName doc num $docId" - } - """.trimIndent() - indexDoc(indexName, docId, docMessage) - } - } - - val input = randomClusterMetricsInput(path = path) - - // WHEN - val responseMap = (executeTransportAction(input, client())).toMap() - - // THEN - val shards = responseMap[WRAPPER_FIELD] as List> - val returnedIndices = - shards.map { (it[CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD] as String) to it }.toMap() - - assertEquals(testIndices.size, returnedIndices.keys.size) - testIndices.forEach { (indexName, docCount) -> - assertEquals( - indexName, - returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.INDEX_FIELD) as String - ) - assertEquals( - docCount.toString(), - returnedIndices[indexName]?.get(CatIndicesResponseWrapper.IndexInfo.DOCS_COUNT_FIELD) as String - ) - } - } - - private fun indexDoc(index: String, id: String, doc: String) { - client().prepareIndex(index).setId(id) - .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt deleted file mode 100644 index c8b5db561..000000000 --- a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/CatShardsWrappersIT.kt +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.clusterMetricsMonitorHelpers - -import org.opensearch.action.support.WriteRequest -import org.opensearch.alerting.randomClusterMetricsInput -import org.opensearch.alerting.util.clusterMetricsMonitorHelpers.CatShardsResponseWrapper.Companion.WRAPPER_FIELD -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ClusterMetricsInput -import org.opensearch.core.common.Strings -import org.opensearch.test.OpenSearchSingleNodeTestCase - -class CatShardsWrappersIT : OpenSearchSingleNodeTestCase() { - private val path = ClusterMetricsInput.ClusterMetricType.CAT_SHARDS.defaultPath - - fun `test CatShardsRequestWrapper validate valid pathParams`() { - // GIVEN - val pathParams = "index1,index_2,index-3" - - // WHEN - val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) - - // THEN - assertEquals(3, requestWrapper.clusterStateRequest.indices().size) - assertEquals(3, requestWrapper.indicesStatsRequest.indices().size) - } - - fun `test CatShardsRequestWrapper validate without providing pathParams`() { - // GIVEN & WHEN - val requestWrapper = CatShardsRequestWrapper() - - // THEN - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) - assertNull(requestWrapper.indicesStatsRequest.indices()) - } - - fun `test CatShardsRequestWrapper validate blank pathParams`() { - // GIVEN - val pathParams = " " - - // WHEN - val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) - - // THEN - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) - assertNull(requestWrapper.indicesStatsRequest.indices()) - } - - fun `test CatShardsRequestWrapper validate empty pathParams`() { - // GIVEN - val pathParams = "" - - // WHEN - val requestWrapper = CatShardsRequestWrapper(pathParams = pathParams) - - // THEN - assertEquals(Strings.EMPTY_ARRAY, requestWrapper.clusterStateRequest.indices()) - assertNull(requestWrapper.indicesStatsRequest.indices()) - } - - fun `test CatShardsRequestWrapper validate invalid pathParams`() { - // GIVEN - val pathParams = "_index1,index^2" - - // WHEN & THEN - assertThrows(IllegalArgumentException::class.java) { CatShardsRequestWrapper(pathParams = pathParams) } - } - - suspend fun `test CatShardsResponseWrapper returns with only indices in pathParams`() { - // GIVEN - val testIndices = (1..5).map { - "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) - }.toMap() - - testIndices.forEach { (indexName, docCount) -> - repeat(docCount) { - val docId = (it + 1).toString() - val docMessage = """ - { - "message": "$indexName doc num $docId" - } - """.trimIndent() - indexDoc(indexName, docId, docMessage) - } - } - - /* - Creating a subset of indices to use for the pathParams to test that all indices on the cluster ARE NOT returned. - */ - val pathParamsIndices = testIndices.keys.toList().subList(1, testIndices.size - 1) - val pathParams = pathParamsIndices.joinToString(",") - val input = randomClusterMetricsInput(path = path, pathParams = pathParams) - - // WHEN - val responseMap = (executeTransportAction(input, client())).toMap() - - // THEN - val shards = responseMap[WRAPPER_FIELD] as List> - val returnedIndices = - shards.map { (it[CatShardsResponseWrapper.ShardInfo.INDEX_FIELD] as String) to it }.toMap() - - assertEquals(pathParamsIndices.size, returnedIndices.keys.size) - testIndices.forEach { (indexName, docCount) -> - if (pathParamsIndices.contains(indexName)) { - assertEquals( - indexName, - returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.INDEX_FIELD) as String - ) - assertEquals( - docCount.toString(), - returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.DOCS_FIELD) as String - ) - } - } - } - - suspend fun `test CatShardsResponseWrapper returns with all indices when empty pathParams`() { - // GIVEN - val testIndices = (1..5).map { - "test-index${randomAlphaOfLength(10).lowercase()}" to randomIntBetween(1, 10) - }.toMap() - - testIndices.forEach { (indexName, docCount) -> - repeat(docCount) { - val docId = (it + 1).toString() - val docMessage = """ - { - "message": "$indexName doc num $docId" - } - """.trimIndent() - indexDoc(indexName, docId, docMessage) - } - } - - val input = randomClusterMetricsInput(path = path) - - // WHEN - val responseMap = (executeTransportAction(input, client())).toMap() - - // THEN - val shards = responseMap[WRAPPER_FIELD] as List> - val returnedIndices = - shards.map { (it[CatShardsResponseWrapper.ShardInfo.INDEX_FIELD] as String) to it }.toMap() - - assertEquals(testIndices.size, returnedIndices.keys.size) - testIndices.forEach { (indexName, docCount) -> - assertEquals( - indexName, - returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.INDEX_FIELD) as String - ) - assertEquals( - docCount.toString(), - returnedIndices[indexName]?.get(CatShardsResponseWrapper.ShardInfo.DOCS_FIELD) as String - ) - } - } - - private fun indexDoc(index: String, id: String, doc: String) { - client().prepareIndex(index).setId(id) - .setSource(doc, XContentType.JSON).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get() - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt b/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt deleted file mode 100644 index bfe5b8dce..000000000 --- a/alerting/bin/test/org/opensearch/alerting/util/clusterMetricsMonitorHelpers/SupportedClusterMetricsSettingsExtensionsTests.kt +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.clusterMetricsMonitorHelpers - -import org.opensearch.test.OpenSearchTestCase - -class SupportedClusterMetricsSettingsExtensionsTests : OpenSearchTestCase() { - private var expectedResponse = hashMapOf() - private var mappedResponse = hashMapOf() - private var supportedJsonPayload = hashMapOf>() - - fun `test redactFieldsFromResponse with non-empty supportedJsonPayload`() { - // GIVEN - mappedResponse = hashMapOf( - ( - "pathRoot1" to hashMapOf( - ("pathRoot1_subPath1" to 11), - ( - "pathRoot1_subPath2" to hashMapOf( - ("pathRoot1_subPath2_subPath1" to 121), - ( - "pathRoot1_subPath2_subPath2" to hashMapOf( - ("pathRoot1_subPath2_subPath2_subPath1" to 1221) - ) - ) - ) - ) - ) - ), - ("pathRoot2" to hashMapOf(("pathRoot2_subPath1" to 21), ("pathRoot2_subPath2" to setOf(221, 222, "223string")))), - ("pathRoot3" to hashMapOf(("pathRoot3_subPath1" to 31), ("pathRoot3_subPath2" to setOf(321, 322, "323string")))) - ) - - supportedJsonPayload = hashMapOf( - ( - "pathRoot1" to arrayListOf( - "pathRoot1_subPath1", - "pathRoot1_subPath2.pathRoot1_subPath2_subPath2.pathRoot1_subPath2_subPath2_subPath1" - ) - ), - ("pathRoot2" to arrayListOf("pathRoot2_subPath2")), - ("pathRoot3" to arrayListOf()) - ) - - expectedResponse = hashMapOf( - ( - "pathRoot1" to hashMapOf( - ("pathRoot1_subPath1" to 11), - ( - "pathRoot1_subPath2" to hashMapOf( - ( - "pathRoot1_subPath2_subPath2" to hashMapOf( - ("pathRoot1_subPath2_subPath2_subPath1" to 1221) - ) - ) - ) - ) - ) - ), - ("pathRoot2" to hashMapOf(("pathRoot2_subPath2" to setOf(221, 222, "223string")))), - ("pathRoot3" to hashMapOf(("pathRoot3_subPath1" to 31), ("pathRoot3_subPath2" to setOf(321, 322, "323string")))) - ) - - // WHEN - val result = redactFieldsFromResponse(mappedResponse, supportedJsonPayload) - - // THEN - assertEquals(expectedResponse, result) - } - - fun `test redactFieldsFromResponse with empty supportedJsonPayload`() { - // GIVEN - mappedResponse = hashMapOf( - ( - "pathRoot1" to hashMapOf( - ("pathRoot1_subPath1" to 11), - ( - "pathRoot1_subPath2" to hashMapOf( - ("pathRoot1_subPath2_subPath1" to 121), - ( - "pathRoot1_subPath2_subPath2" to hashMapOf( - ("pathRoot1_subPath2_subPath2_subPath1" to 1221) - ) - ) - ) - ) - ) - ), - ("pathRoot2" to hashMapOf(("pathRoot2_subPath1" to 21), ("pathRoot2_subPath2" to setOf(221, 222, "223string")))), - ("pathRoot3" to 3) - ) - - expectedResponse = hashMapOf( - ( - "pathRoot1" to hashMapOf( - ("pathRoot1_subPath1" to 11), - ( - "pathRoot1_subPath2" to hashMapOf( - ("pathRoot1_subPath2_subPath1" to 121), - ( - "pathRoot1_subPath2_subPath2" to hashMapOf( - ("pathRoot1_subPath2_subPath2_subPath1" to 1221) - ) - ) - ) - ) - ) - ), - ("pathRoot2" to hashMapOf(("pathRoot2_subPath1" to 21), ("pathRoot2_subPath2" to setOf(221, 222, "223string")))), - ("pathRoot3" to 3) - ) - - // WHEN - val result = redactFieldsFromResponse(mappedResponse, supportedJsonPayload) - - // THEN - assertEquals(expectedResponse, result) - } -} diff --git a/alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt b/alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt deleted file mode 100644 index f9c40e465..000000000 --- a/alerting/bin/test/org/opensearch/alerting/util/destinationmigration/DestinationMigrationUtilServiceIT.kt +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.util.destinationmigration - -import org.opensearch.alerting.AlertingRestTestCase -import org.opensearch.alerting.makeRequest -import org.opensearch.alerting.model.destination.Destination -import org.opensearch.alerting.model.destination.email.Email -import org.opensearch.alerting.model.destination.email.EmailAccount -import org.opensearch.alerting.model.destination.email.EmailEntry -import org.opensearch.alerting.model.destination.email.EmailGroup -import org.opensearch.alerting.model.destination.email.Recipient -import org.opensearch.alerting.randomUser -import org.opensearch.alerting.util.DestinationType -import org.opensearch.client.ResponseException -import org.opensearch.commons.alerting.model.ScheduledJob.Companion.SCHEDULED_JOBS_INDEX -import org.opensearch.core.rest.RestStatus -import java.time.Instant -import java.util.UUID - -class DestinationMigrationUtilServiceIT : AlertingRestTestCase() { - - fun `test migrateData`() { - if (isNotificationPluginInstalled()) { - // Create alerting config index - createRandomMonitor() - - val emailAccount = EmailAccount( - name = "test", - email = "test@email.com", - host = "smtp.com", - port = 25, - method = EmailAccount.MethodType.NONE, - username = null, - password = null - ) - val emailAccountDoc = "{\"email_account\" : ${emailAccount.toJsonString()}}" - val emailGroup = EmailGroup( - name = "test", - emails = listOf(EmailEntry("test@email.com")) - ) - val emailGroupDoc = "{\"email_group\" : ${emailGroup.toJsonString()}}" - val emailAccountId = UUID.randomUUID().toString() - val emailGroupId = UUID.randomUUID().toString() - indexDocWithAdminClient(SCHEDULED_JOBS_INDEX, emailAccountId, emailAccountDoc) - indexDocWithAdminClient(SCHEDULED_JOBS_INDEX, emailGroupId, emailGroupDoc) - - val recipient = Recipient(Recipient.RecipientType.EMAIL, null, "test@email.com") - val email = Email(emailAccountId, listOf(recipient)) - val emailDest = Destination( - id = UUID.randomUUID().toString(), - type = DestinationType.EMAIL, - name = "test", - user = randomUser(), - lastUpdateTime = Instant.now(), - chime = null, - slack = null, - customWebhook = null, - email = email - ) - val slackDestination = getSlackDestination().copy(id = UUID.randomUUID().toString()) - val chimeDestination = getChimeDestination().copy(id = UUID.randomUUID().toString()) - val customWebhookDestination = getCustomWebhookDestination().copy(id = UUID.randomUUID().toString()) - - val destinations = listOf(emailDest, slackDestination, chimeDestination, customWebhookDestination) - - val ids = mutableListOf(emailAccountId, emailGroupId) - for (destination in destinations) { - val dest = """ - { - "destination" : ${destination.toJsonString()} - } - """.trimIndent() - indexDocWithAdminClient(SCHEDULED_JOBS_INDEX, destination.id, dest) - ids.add(destination.id) - } - - // Create cluster change event and wait for migration service to complete migrating data over - client().updateSettings("indices.recovery.max_bytes_per_sec", "40mb") - Thread.sleep(120000) - - for (id in ids) { - val response = client().makeRequest( - "GET", - "_plugins/_notifications/configs/$id" - ) - assertEquals(RestStatus.OK, response.restStatus()) - - try { - client().makeRequest( - "GET", - ".opendistro-alerting-config/_doc/$id" - ) - fail("Expecting ResponseException") - } catch (e: ResponseException) { - assertEquals(RestStatus.NOT_FOUND, e.response.restStatus()) - } - } - } - } -} diff --git a/alerting/bin/test/plugin-security.policy b/alerting/bin/test/plugin-security.policy deleted file mode 100644 index bcee5e9e6..000000000 --- a/alerting/bin/test/plugin-security.policy +++ /dev/null @@ -1,8 +0,0 @@ -grant { - // needed to find the classloader to load whitelisted classes. - permission java.lang.RuntimePermission "createClassLoader"; - permission java.lang.RuntimePermission "getClassLoader"; - - permission java.net.SocketPermission "*", "connect,resolve"; - permission java.net.NetPermission "getProxySelector"; -}; diff --git a/alerting/bin/test/root-ca.pem b/alerting/bin/test/root-ca.pem deleted file mode 100644 index 4015d866e..000000000 --- a/alerting/bin/test/root-ca.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID/jCCAuagAwIBAgIBATANBgkqhkiG9w0BAQsFADCBjzETMBEGCgmSJomT8ixk -ARkWA2NvbTEXMBUGCgmSJomT8ixkARkWB2V4YW1wbGUxGTAXBgNVBAoMEEV4YW1w -bGUgQ29tIEluYy4xITAfBgNVBAsMGEV4YW1wbGUgQ29tIEluYy4gUm9vdCBDQTEh -MB8GA1UEAwwYRXhhbXBsZSBDb20gSW5jLiBSb290IENBMB4XDTE4MDQyMjAzNDM0 -NloXDTI4MDQxOTAzNDM0NlowgY8xEzARBgoJkiaJk/IsZAEZFgNjb20xFzAVBgoJ -kiaJk/IsZAEZFgdleGFtcGxlMRkwFwYDVQQKDBBFeGFtcGxlIENvbSBJbmMuMSEw -HwYDVQQLDBhFeGFtcGxlIENvbSBJbmMuIFJvb3QgQ0ExITAfBgNVBAMMGEV4YW1w -bGUgQ29tIEluYy4gUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAK/u+GARP5innhpXK0c0q7s1Su1VTEaIgmZr8VWI6S8amf5cU3ktV7WT9SuV -TsAm2i2A5P+Ctw7iZkfnHWlsC3HhPUcd6mvzGZ4moxnamM7r+a9otRp3owYoGStX -ylVTQusAjbq9do8CMV4hcBTepCd+0w0v4h6UlXU8xjhj1xeUIz4DKbRgf36q0rv4 -VIX46X72rMJSETKOSxuwLkov1ZOVbfSlPaygXIxqsHVlj1iMkYRbQmaTib6XWHKf -MibDaqDejOhukkCjzpptGZOPFQ8002UtTTNv1TiaKxkjMQJNwz6jfZ53ws3fh1I0 -RWT6WfM4oeFRFnyFRmc4uYTUgAkCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAf -BgNVHSMEGDAWgBSSNQzgDx4rRfZNOfN7X6LmEpdAczAdBgNVHQ4EFgQUkjUM4A8e -K0X2TTnze1+i5hKXQHMwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4IB -AQBoQHvwsR34hGO2m8qVR9nQ5Klo5HYPyd6ySKNcT36OZ4AQfaCGsk+SecTi35QF -RHL3g2qffED4tKR0RBNGQSgiLavmHGCh3YpDupKq2xhhEeS9oBmQzxanFwWFod4T -nnsG2cCejyR9WXoRzHisw0KJWeuNlwjUdJY0xnn16srm1zL/M/f0PvCyh9HU1mF1 -ivnOSqbDD2Z7JSGyckgKad1Omsg/rr5XYtCeyJeXUPcmpeX6erWJJNTUh6yWC/hY -G/dFC4xrJhfXwz6Z0ytUygJO32bJG4Np2iGAwvvgI9EfxzEv/KP+FGrJOvQJAq4/ -BU36ZAa80W/8TBnqZTkNnqZV ------END CERTIFICATE----- diff --git a/alerting/bin/test/sample.pem b/alerting/bin/test/sample.pem deleted file mode 100644 index a1fc20a77..000000000 --- a/alerting/bin/test/sample.pem +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEPDCCAySgAwIBAgIUZjrlDPP8azRDPZchA/XEsx0X2iIwDQYJKoZIhvcNAQEL -BQAwgY8xEzARBgoJkiaJk/IsZAEZFgNjb20xFzAVBgoJkiaJk/IsZAEZFgdleGFt -cGxlMRkwFwYDVQQKDBBFeGFtcGxlIENvbSBJbmMuMSEwHwYDVQQLDBhFeGFtcGxl -IENvbSBJbmMuIFJvb3QgQ0ExITAfBgNVBAMMGEV4YW1wbGUgQ29tIEluYy4gUm9v -dCBDQTAeFw0yMzA4MjkwNDIzMTJaFw0zMzA4MjYwNDIzMTJaMFcxCzAJBgNVBAYT -AmRlMQ0wCwYDVQQHDAR0ZXN0MQ0wCwYDVQQKDARub2RlMQ0wCwYDVQQLDARub2Rl -MRswGQYDVQQDDBJub2RlLTAuZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQCm93kXteDQHMAvbUPNPW5pyRHKDD42XGWSgq0k1D29C/Ud -yL21HLzTJa49ZU2ldIkSKs9JqbkHdyK0o8MO6L8dotLoYbxDWbJFW8bp1w6tDTU0 -HGkn47XVu3EwbfrTENg3jFu+Oem6a/501SzITzJWtS0cn2dIFOBimTVpT/4Zv5qr -XA6Cp4biOmoTYWhi/qQl8d0IaADiqoZ1MvZbZ6x76qTrRAbg+UWkpTEXoH1xTc8n -dibR7+HP6OTqCKvo1NhE8uP4pY+fWd6b6l+KLo3IKpfTbAIJXIO+M67FLtWKtttD -ao94B069skzKk6FPgW/OZh6PRCD0oxOavV+ld2SjAgMBAAGjgcYwgcMwRwYDVR0R -BEAwPogFKgMEBQWCEm5vZGUtMC5leGFtcGxlLmNvbYIJbG9jYWxob3N0hxAAAAAA -AAAAAAAAAAAAAAABhwR/AAABMAsGA1UdDwQEAwIF4DAdBgNVHSUEFjAUBggrBgEF -BQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU0/qDQaY10jIo -wCjLUpz/HfQXyt8wHwYDVR0jBBgwFoAUF4ffoFrrZhKn1dD4uhJFPLcrAJwwDQYJ -KoZIhvcNAQELBQADggEBAD2hkndVih6TWxoe/oOW0i2Bq7ScNO/n7/yHWL04HJmR -MaHv/Xjc8zLFLgHuHaRvC02ikWIJyQf5xJt0Oqu2GVbqXH9PBGKuEP2kCsRRyU27 -zTclAzfQhqmKBTYQ/3lJ3GhRQvXIdYTe+t4aq78TCawp1nSN+vdH/1geG6QjMn5N -1FU8tovDd4x8Ib/0dv8RJx+n9gytI8n/giIaDCEbfLLpe4EkV5e5UNpOnRgJjjuy -vtZutc81TQnzBtkS9XuulovDE0qI+jQrKkKu8xgGLhgH0zxnPkKtUg2I3Aq6zl1L -zYkEOUF8Y25J6WeY88Yfnc0iigI+Pnz5NK8R9GL7TYo= ------END CERTIFICATE----- diff --git a/alerting/bin/test/test-kirk.jks b/alerting/bin/test/test-kirk.jks deleted file mode 100644 index 6dbc51e714784fa58a4209c75deab8b9ed1698ff..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4504 zcma)AXEYp+vt7GZ$?DyT=tPUf>Rt32Rtcg+B4PQKLo)5nT`xBt(f8 zz4zYx{`1az=l47B(|aH0%$a-V&c}OZ28N+d1QLK?7-~f#Qh{)-@KbUEVuBnDwFn`G zTJSH-2g86X{uc$#Cd7a<{=zALBY_C=KPs|Y1i%~&Sotp~4}12H0!$9GfJy&blEDNC z=>%hA9@l)1y-8vD6#cH^U}=KBI0FdeqXH7J!^nt8{(B;j6byi|5|P@4YY{kr2nhrT zsl1TD93_M516EPM#9d4EG(rsFKtBW4^r*(5KwKbTLB){+^0E(}Q+A7HoW0lrA)@i+ zydGtY^95cAh7C?*2qIcESObb&7%#|($|(-eXIiQ#0>bYpj@=?*4?U=5@-ISTdSa4x zOtEjIWb0hr)D^1HVpX7-CjwnsDG8#WM@AVZvyufeW?}`^GtGW7WcGsVl)G*$?lP3S z^GYelg04B!ZBp4GnwCzq@uOLfB4xY#hE;StB61*Yd8?%(Nl9NW{s3+HODy#ik72s%Hj($a8 zhF0>hs}=106=eHlR<&9zT@LuHAUIZWLFWrKQ#$R3^=pv*&-7e6{O_Ji`|s`^^4v@-Hr>`?(V#!ktZ-$-0?Jt1G-G? zE9HvN@-0iPpKSDRsLacPB>#JY4d$KM!zs7xPBvUu4HQ}!Bz$qc)A`=Ver4EBC?!g7b zuW7GvE*puJA=;!bv2_S?8ZQx_n`M?F&kkb{-h zKwO=OA_@auvAUmAsQW~NjYK|}m{>`{*n^45MJ^ph*%K9}8GnxA%-;D^^-}ih8oWP* zXJ#vzJY3e4?&oSey+_=qv19lq zeLI>%Gjx=y!qVzf%Y&c7dgkjEw?^rl8^KxGs^%{Fd_(b51&l(wYCO&Rc~ZUl5^~y> zc}BJ!4+n2KaS|<{vd#M44my1W|M0Y-gfk9<&l%IBje@31-Sr1Mt!fvT(Pe+Gt$Bz? z_up@HJf$b!)YfI|4{%l^JDxgWvp75|nMzg7E)(qZ%=alvt zXMfZg7Z=_eanGP?tBXFKyvFRu$?uMAzg|k-(32orZccxnHGr$(gM%4Hgc&3blJCi; z6j@^Y3XVg*doBz7pms~Jn7 z9>1&oI7bPBOnn7vyV1x>YahPMDy_bySw!71ij);ebzBEUSZK&o1y43I-AuJKXJ~C3 z{ScF0neCZB8?5r>Px#3V%} zq$OY&i2FZH#6&q5i2Yy421o$-o6P@Z2>vgd4p$sB)+@I7CAQvk>m=OVG#EC`^#8Hx zXo}&oS5+Eg(sw4>QN4_Cy_0U!W9o!pxS@}|4s+L{ow)59*P>fYuDV~JqCwTL5s{)3(v zzbM`$E?)E;`zu*Kjpah> zgQl1ucOJOd1|%MDBk_Lsu64*-#r>9orWT19xT!DnCoNv_AnWczl?5a3@Sd4mtPrx@ z;QPqXK#%ve%3=_Sa$)(zJ)mvCYW0$Uim6bQ!S}#H@uPFY+qvmT_x`cr%&q*~6sufG zKKVZ8ebd?WhVYT)or=?jzV*~PLH&t?CH^KO=IX%=oHNr75%vVz=nN9ipHOrX*7{h! zNkaI3@a@JfTINcbD<@;DNwqa&=S5v4pM=tBEMN8HU3}euq?(dEFWfNC>H+2C+1dBA zFs|s&27315cK^vG`LRKX~{Ugw!|2K~TP_VAqXtzNY6)j={rQ zv73v$!psb1ph9o6`kKlGjC8GEdFX9+@{I}q{33}%?v>$a-cw6HGOOLVnv3ITN_D~k zo^QL%)6K#_{j)b&>8Qy@Eweq=Ne8rKsjJTe)mfDw?scqlc&US2dxU0@o5$(Zu(GB4 zujr5^yZdwlP>E{wrkq=NiW~PQZm5`fJz5m&9I}B^zPVNSSa9vWcXu^m%+bU|aOg5q zK%|a72J^vxGy)&3GlNod=Wt|FBG=mgP)o%{(2PCL$9s$dMvIcv^FdM?hbNYQrX%I| z{binoW_?J27M3L2H_Y4n0!3PGL#b*UxRbpd3l$RLC#I})-32((m#4}vP%kHB3Q7PGLpvuro4~7i2u6z$3ar+YSP2?_%+^%f* zR}5Rl@nUnDVdT&uE_ZP%NU-(Zn*^k2*4S;xubW_f3f-cK+=>uy-sK;&F{mRdpgwIgSHfJSw=22paH-mu>R=3Kf9cR*A_Sjg7q#MM< zqobyHu#q_oM3;REOf&nTGa=n6MK4QZ{pey;iGwX&bnAUCVq`=c0{gykLm{VZo%ulF z*n_LEk%}KbmVW1)L+Ab3sSZPR+Fe*5p$^HC|Oyb{_is> zsuD42;l;BT-a#X6fP(~C+`TP&(``5KD7dp9)GD&EVfNN4Bf@5N63j4c_IOZZ`^gF1 zphj9>;b1JVOWrk`HhO{mmk*Lp>wXpL*r|VQth!^2ajO2-Q$=;E0ZcMzj9V;D}3k7ej?g$MEOSvfr*p<&b z6B?7p3F^a78y9pEd$#q2Pm1b zU#?c^Op~TXSZ`3z2a{A=UzcS`zB%Z|XG2xth@1`h=wY$wyp|u2)s&QN#af+k>`vF! z&{oB;K{Wblwtcc`JH%E!TwV2q%vd}p>iZ9d@C(kwR>Dm)p? zV-i0tv8PP66)jD1#I*Qm*`@U`^o)}|58+bGD1y(EEM_dJh-O9xP^xdF-_Z#qZ&m{c zbC6W;iNU!24Cvnj14>>_V8a{IB$GXu&z39rEKNX_07*3xp*W3rJo!}pp2M0Hwe$#* zi#HgV_>>SSD;YT=uK8*Lu|$a+IIXPF$${!eaPU%X#jh@y96VcWEFGqB#<_hE8QPmQ zO_C$p_nXzGgQtqVrC1t-5`*juoj0Q%VLnw`@Yt&eCg!x)84Pq&N%`@t**O@LYz3OR(@+})Hu&$>gJ;6oxdO{ z&KR3!hDx52>YBb*JE@4B`8}j*yOg=37>&zbSN}#T@GA6n9+dFcA*9q_l2eI%Xh*7~ ziU87?k{%5!@e5oasj8xTY|ysPyOMR3W;w?vvG}prD%~$8wf$j!6&K4LI%aD1$6B&8 zG|Bq_{em<75I~pVeMNJ6Dv9e{<=x@Es?2r|L;d(lJhNv+5~$`ps7`1lAq>B{Ot5Ga z6qD6CeNHKADuYBeC(!$C>E5yJ7O5IFfdN*2lPV*LTj(fX$`T*h6!l7_BFQ%HhbJFp zKUVk@Dl`5ZH)LoQ^{7N6?HyY_;Jo?*Uu#dn_XW`49o!xdK!+JJN_3KD7k@2J((0h0 z?0!++a*3VkR_Y8-s+o<1M(>PCz=|sJMqa z0+r0sNH_$gvD_@AC}TCb8}m~2v}_leWOtWdheZwxJl0i{OGIRcO0iVJ-B>5CgP^O-M7OYVJ*8(0|euX~UGp`sq@@gaEw*bHD4*Dj8_ zPO4*=dce-k-f;9Xl`P>A2U6SzIPhFWQT>2(PjqTMlBf}zL3<&dS*!E0mM}&jbXhc- zAb9}5!V(`=H1zl4fM|8TdAE{XwAuTJ>dTw3o}wzSb&xhxCijhe4Q#{|l(FXGy+A)j zH>IZrWy4|#?wJ-1?zBm;cKLHK*H5ngXeiJE?k?6Lz1i+02rcMG7kNDQlDJ_??0D#; z(Bju>vbV@>IGl97vC?TD(|fa!E?NjDA;*m&#_ZiX>Vgi+wr`atYOngkRp_w%?M~sv zUVImV4>dX4Ih+MO4LU`Ui=K%20a~JOwq1$6)KUw@81y#uUGKMV4>O0ioDGDvtZ{Jl zmay)x!zLD>Hl1jqnzX9b_da}w9xr9S`kQwUZPAei4I5Ao#$N}f9I10=!}MXIF!F!C z6+i+ofRKI2Rvlk8erCmgYu2%A6S_nSX7!cGJQ6pQ{xw*Iw(KXQGft90Ft(YQ<7nw! ROz*Khv5A{`^It3We*oUlR=)rM From 0448ebdd1ddddd945a61c1d7c7e7294e80f7ce84 Mon Sep 17 00:00:00 2001 From: Tyler Ohlsen Date: Mon, 22 Jan 2024 17:33:08 +0000 Subject: [PATCH 3/3] remove core bin Signed-off-by: Tyler Ohlsen --- core/bin/main/mappings/doc-level-queries.json | 16 - core/bin/main/mappings/scheduled-jobs.json | 694 ------------------ .../org/opensearch/alerting/core/JobRunner.kt | 17 - .../opensearch/alerting/core/JobSweeper.kt | 512 ------------- .../alerting/core/JobSweeperMetrics.kt | 29 - .../alerting/core/ScheduledJobIndices.kt | 65 -- .../core/action/node/ScheduledJobStats.kt | 88 --- .../action/node/ScheduledJobsStatsAction.kt | 25 - .../action/node/ScheduledJobsStatsRequest.kt | 45 -- .../action/node/ScheduledJobsStatsResponse.kt | 78 -- .../node/ScheduledJobsStatsTransportAction.kt | 139 ---- .../RestScheduledJobStatsHandler.kt | 121 --- .../alerting/core/schedule/JobScheduler.kt | 228 ------ .../core/schedule/JobSchedulerMetrics.kt | 48 -- .../LegacyOpenDistroScheduledJobSettings.kt | 49 -- .../core/settings/ScheduledJobSettings.kt | 51 -- .../opensearchapi/OpenSearchExtensions.kt | 207 ------ core/bin/main/settings/doc-level-queries.json | 10 - .../alerting/core/WriteableTests.kt | 26 - .../alerting/core/model/MockScheduledJob.kt | 33 - .../core/schedule/JobSchedulerTest.kt | 190 ----- .../alerting/core/schedule/MockJobRunner.kt | 31 - 22 files changed, 2702 deletions(-) delete mode 100644 core/bin/main/mappings/doc-level-queries.json delete mode 100644 core/bin/main/mappings/scheduled-jobs.json delete mode 100644 core/bin/main/org/opensearch/alerting/core/JobRunner.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/JobSweeper.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt delete mode 100644 core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt delete mode 100644 core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt delete mode 100644 core/bin/main/settings/doc-level-queries.json delete mode 100644 core/bin/test/org/opensearch/alerting/core/WriteableTests.kt delete mode 100644 core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt delete mode 100644 core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt delete mode 100644 core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt diff --git a/core/bin/main/mappings/doc-level-queries.json b/core/bin/main/mappings/doc-level-queries.json deleted file mode 100644 index 7f0602df7..000000000 --- a/core/bin/main/mappings/doc-level-queries.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "_meta": { - "schema_version": 1 - }, - "properties": { - "query": { - "type": "percolator_ext" - }, - "monitor_id": { - "type": "text" - }, - "index": { - "type": "text" - } - } -} \ No newline at end of file diff --git a/core/bin/main/mappings/scheduled-jobs.json b/core/bin/main/mappings/scheduled-jobs.json deleted file mode 100644 index 2651c862e..000000000 --- a/core/bin/main/mappings/scheduled-jobs.json +++ /dev/null @@ -1,694 +0,0 @@ -{ - "_meta" : { - "schema_version": 8 - }, - "properties": { - "monitor": { - "dynamic": "false", - "properties": { - "schema_version": { - "type": "integer" - }, - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "owner": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "monitor_type": { - "type": "keyword" - }, - "user": { - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "backend_roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "custom_attribute_names": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "type": { - "type": "keyword" - }, - "enabled": { - "type": "boolean" - }, - "enabled_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "last_update_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "schedule": { - "properties": { - "period": { - "properties": { - "interval": { - "type": "integer" - }, - "unit": { - "type": "keyword" - } - } - }, - "cron": { - "properties": { - "expression": { - "type": "text" - }, - "timezone": { - "type": "keyword" - } - } - } - } - }, - "inputs": { - "type": "nested", - "properties": { - "search": { - "properties": { - "indices": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "query": { - "type": "object", - "enabled": false - } - } - } - } - }, - "data_sources": { - "properties": { - "alerts_index": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "findings_index": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "query_index": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "query_index_mapping": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "group_by_fields": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "triggers": { - "type": "nested", - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "min_time_between_executions": { - "type": "integer" - }, - "condition": { - "type": "object", - "enabled": false - }, - "actions": { - "type": "nested", - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "destination_id": { - "type": "keyword" - }, - "subject_template": { - "type": "object", - "enabled": false - }, - "message_template": { - "type": "object", - "enabled": false - }, - "throttle_enabled": { - "type": "boolean" - }, - "throttle": { - "properties": { - "value": { - "type": "integer" - }, - "unit": { - "type": "keyword" - } - } - } - } - }, - "query_level_trigger": { - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "min_time_between_executions": { - "type": "integer" - }, - "condition": { - "type": "object", - "enabled": false - }, - "actions": { - "type": "nested", - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "destination_id": { - "type": "keyword" - }, - "subject_template": { - "type": "object", - "enabled": false - }, - "message_template": { - "type": "object", - "enabled": false - }, - "throttle_enabled": { - "type": "boolean" - }, - "throttle": { - "properties": { - "value": { - "type": "integer" - }, - "unit": { - "type": "keyword" - } - } - } - } - } - } - } - } - }, - "ui_metadata": { - "type": "object", - "enabled": false - } - } - }, - "workflow": { - "dynamic": "false", - "properties": { - "schema_version": { - "type": "integer" - }, - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "owner": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "workflow_type": { - "type": "keyword" - }, - "user": { - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "backend_roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "custom_attribute_names": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "type": { - "type": "keyword" - }, - "enabled": { - "type": "boolean" - }, - "audit_delegate_monitor_alerts": { - "type": "boolean" - }, - "enabled_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "last_update_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "schedule": { - "properties": { - "period": { - "properties": { - "interval": { - "type": "integer" - }, - "unit": { - "type": "keyword" - } - } - }, - "cron": { - "properties": { - "expression": { - "type": "text" - }, - "timezone": { - "type": "keyword" - } - } - } - } - }, - "inputs": { - "type": "nested", - "properties": { - "composite_input": { - "type": "nested", - "properties": { - "sequence": { - "properties": { - "delegates": { - "type": "nested", - "properties": { - "order": { - "type": "integer" - }, - "monitor_id": { - "type": "keyword" - }, - "chained_monitor_findings": { - "properties": { - "monitor_id": { - "type": "keyword" - } - } - } - } - } - } - } - } - } - } - }, - "group_by_fields": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "destination": { - "dynamic": "false", - "properties": { - "schema_version": { - "type": "integer" - }, - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "user": { - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "backend_roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "roles": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "custom_attribute_names": { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "type": { - "type": "keyword" - }, - "last_update_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "chime": { - "properties": { - "url": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "slack": { - "properties": { - "url": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - } - } - }, - "custom_webhook": { - "properties": { - "url": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "scheme": { - "type": "keyword" - }, - "host": { - "type": "text" - }, - "port": { - "type": "integer" - }, - "path": { - "type": "keyword" - }, - "query_params": { - "type": "object", - "enabled": false - }, - "header_params": { - "type": "object", - "enabled": false - }, - "username": { - "type": "text" - }, - "password": { - "type": "text" - } - } - }, - "email": { - "properties": { - "email_account_id": { - "type": "keyword" - }, - "recipients": { - "type": "nested", - "properties": { - "type": { - "type": "keyword" - }, - "email_group_id": { - "type": "keyword" - }, - "email": { - "type": "text" - } - } - } - } - } - } - }, - "email_account": { - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "host": { - "type": "text" - }, - "port": { - "type": "integer" - }, - "method": { - "type": "text" - }, - "from": { - "type": "text" - } - } - }, - "email_group": { - "properties": { - "name": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } - } - }, - "emails": { - "type": "nested", - "properties": { - "email": { - "type": "text" - } - } - } - } - }, - "metadata" : { - "properties": { - "monitor_id": { - "type": "keyword" - }, - "last_action_execution_times": { - "type": "nested", - "properties": { - "action_id": { - "type": "keyword" - }, - "execution_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - } - } - }, - "last_run_context": { - "type": "object", - "enabled": false - }, - "source_to_query_index_mapping": { - "type": "object", - "enabled": false - } - } - }, - "workflow_metadata" : { - "properties": { - "workflow_id": { - "type": "keyword" - }, - "monitor_ids": { - "type": "text", - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 1000 - } - } - }, - "latest_run_time": { - "type": "date", - "format": "strict_date_time||epoch_millis" - }, - "latest_execution_id": { - "type": "keyword" - } - } - } - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/JobRunner.kt b/core/bin/main/org/opensearch/alerting/core/JobRunner.kt deleted file mode 100644 index c251c8c6a..000000000 --- a/core/bin/main/org/opensearch/alerting/core/JobRunner.kt +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core - -import org.opensearch.commons.alerting.model.ScheduledJob -import java.time.Instant - -interface JobRunner { - fun postDelete(jobId: String) - - fun postIndex(job: ScheduledJob) - - fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) -} diff --git a/core/bin/main/org/opensearch/alerting/core/JobSweeper.kt b/core/bin/main/org/opensearch/alerting/core/JobSweeper.kt deleted file mode 100644 index 6ba910707..000000000 --- a/core/bin/main/org/opensearch/alerting/core/JobSweeper.kt +++ /dev/null @@ -1,512 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.action.search.SearchRequest -import org.opensearch.alerting.core.schedule.JobScheduler -import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.REQUEST_TIMEOUT -import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEPER_ENABLED -import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_BACKOFF_MILLIS -import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_BACKOFF_RETRY_COUNT -import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_PAGE_SIZE -import org.opensearch.alerting.core.settings.ScheduledJobSettings.Companion.SWEEP_PERIOD -import org.opensearch.alerting.opensearchapi.firstFailureOrNull -import org.opensearch.alerting.opensearchapi.retry -import org.opensearch.client.Client -import org.opensearch.cluster.ClusterChangedEvent -import org.opensearch.cluster.ClusterStateListener -import org.opensearch.cluster.routing.IndexShardRoutingTable -import org.opensearch.cluster.routing.Murmur3HashFunction -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.lifecycle.LifecycleListener -import org.opensearch.common.logging.Loggers -import org.opensearch.common.lucene.uid.Versions -import org.opensearch.common.settings.Settings -import org.opensearch.common.unit.TimeValue -import org.opensearch.common.util.concurrent.OpenSearchExecutors -import org.opensearch.common.xcontent.LoggingDeprecationHandler -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.common.Strings -import org.opensearch.core.common.bytes.BytesReference -import org.opensearch.core.index.shard.ShardId -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.xcontent.NamedXContentRegistry -import org.opensearch.core.xcontent.XContentParser -import org.opensearch.core.xcontent.XContentParserUtils -import org.opensearch.index.engine.Engine -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.index.shard.IndexingOperationListener -import org.opensearch.search.builder.SearchSourceBuilder -import org.opensearch.search.sort.FieldSortBuilder -import org.opensearch.threadpool.Scheduler -import org.opensearch.threadpool.ThreadPool -import java.util.TreeMap -import java.util.concurrent.ConcurrentHashMap -import java.util.concurrent.Executors - -typealias JobId = String -typealias JobVersion = Long - -/** - * 'Sweeping' is the process of listening for new and updated [ScheduledJob]s and deciding if they should be scheduled for - * execution on this node. The [JobSweeper] runs on every node, sweeping all local active shards that are present on the node. - * - * A [consistent hash][ShardNodes] is used to distribute jobs across all nodes that contain an active instance of the same shard. - * This minimizes any interruptions in job execution when the cluster configuration changes. - * - * There are two types of sweeps: - * - *Full sweeps* occur when the [routing table][IndexShardRoutingTable] for the shard changes (for e.g. a replica has been - * added or removed). The full sweep re-reads all jobs in the shard, deciding which ones to run locally. All full sweeps - * happen asynchronously in the background in a serial manner. See the [sweepAllShards] method. - * - *Single job sweeps* occur when a new version of the job is indexed or deleted. An [IndexingOperationListener] listens - * for index changes and synchronously schedules or removes the job from the scheduler. - */ -class JobSweeper( - private val settings: Settings, - private val client: Client, - private val clusterService: ClusterService, - private val threadPool: ThreadPool, - private val xContentRegistry: NamedXContentRegistry, - private val scheduler: JobScheduler, - private val sweepableJobTypes: List -) : ClusterStateListener, IndexingOperationListener, LifecycleListener() { - private val logger = LogManager.getLogger(javaClass) - - private val fullSweepExecutor = Executors.newSingleThreadExecutor(OpenSearchExecutors.daemonThreadFactory("opendistro_job_sweeper")) - - private val sweptJobs = ConcurrentHashMap>() - - private var scheduledFullSweep: Scheduler.Cancellable? = null - - @Volatile private var lastFullSweepTimeNano = System.nanoTime() - - @Volatile private var requestTimeout = REQUEST_TIMEOUT.get(settings) - - @Volatile private var sweepPeriod = SWEEP_PERIOD.get(settings) - - @Volatile private var sweeperEnabled = SWEEPER_ENABLED.get(settings) - - @Volatile private var sweepPageSize = SWEEP_PAGE_SIZE.get(settings) - - @Volatile private var sweepBackoffMillis = SWEEP_BACKOFF_MILLIS.get(settings) - - @Volatile private var sweepBackoffRetryCount = SWEEP_BACKOFF_RETRY_COUNT.get(settings) - - @Volatile private var sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) - - init { - clusterService.addListener(this) - clusterService.addLifecycleListener(this) - clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_PERIOD) { - // if sweep period change, restart background sweep with new sweep period - logger.debug("Reinitializing background full sweep with period: ${sweepPeriod.minutes()}") - sweepPeriod = it - initBackgroundSweep() - } - clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEPER_ENABLED) { - sweeperEnabled = it - if (!sweeperEnabled) disable() else enable() - } - clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_BACKOFF_MILLIS) { - sweepBackoffMillis = it - sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) - } - clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_BACKOFF_RETRY_COUNT) { - sweepBackoffRetryCount = it - sweepSearchBackoff = BackoffPolicy.exponentialBackoff(sweepBackoffMillis, sweepBackoffRetryCount) - } - clusterService.clusterSettings.addSettingsUpdateConsumer(SWEEP_PAGE_SIZE) { sweepPageSize = it } - clusterService.clusterSettings.addSettingsUpdateConsumer(REQUEST_TIMEOUT) { requestTimeout = it } - } - - override fun afterStart() { - initBackgroundSweep() - } - - override fun beforeStop() { - scheduledFullSweep?.cancel() - } - - override fun beforeClose() { - fullSweepExecutor.shutdown() - } - - /** - * Initiates a full sweep of all local shards when the index routing table is changed (for e.g. when the node joins - * the cluster, a replica is added, removed or promoted to primary). - * - * This callback won't be invoked concurrently since cluster state changes are applied serially to the node - * in the order they occur on the cluster manager. However we can't block this callback for the duration of a full sweep so - * we perform the sweep in the background in a single threaded executor [fullSweepExecutor]. - */ - override fun clusterChanged(event: ClusterChangedEvent) { - if (!isSweepingEnabled()) return - - if (!event.indexRoutingTableChanged(ScheduledJob.SCHEDULED_JOBS_INDEX)) return - - logger.debug("Scheduled Jobs routing table changed. Running full sweep...") - fullSweepExecutor.submit { - sweepAllShards() - } - } - - /** - * This callback is invoked when a new job (or new version of a job) is indexed. If the job is assigned to the node - * it is scheduled. Relies on all indexing operations using optimistic concurrency control to ensure that stale versions - * of jobs are not scheduled. It schedules job only if it is one of the [sweepableJobTypes] - * - */ - override fun postIndex(shardId: ShardId, index: Engine.Index, result: Engine.IndexResult) { - if (!isSweepingEnabled()) return - - if (result.resultType != Engine.Result.Type.SUCCESS) { - val shardJobs = sweptJobs[shardId] ?: emptyMap() - val currentVersion = shardJobs[index.id()] ?: Versions.NOT_FOUND - logger.debug("Indexing failed for ScheduledJob: ${index.id()}. Continuing with current version $currentVersion") - return - } - - if (isOwningNode(shardId, index.id())) { - val xcp = XContentHelper.createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, index.source(), XContentType.JSON) - if (isSweepableJobType(xcp)) { - val job = parseAndSweepJob(xcp, shardId, index.id(), result.version, index.source(), true) - if (job != null) scheduler.postIndex(job) - } else { - logger.debug("Not a valid job type in document ${index.id()} to sweep.") - } - } - } - - /** - * This callback is invoked when a job is deleted from a shard. The job is descheduled. Relies on all delete operations - * using optimistic concurrency control to ensure that stale versions of jobs are not scheduled. - */ - override fun postDelete(shardId: ShardId, delete: Engine.Delete, result: Engine.DeleteResult) { - if (!isSweepingEnabled()) return - - if (result.resultType != Engine.Result.Type.SUCCESS) { - val shardJobs = sweptJobs[shardId] ?: emptyMap() - val currentVersion = shardJobs[delete.id()] ?: Versions.NOT_FOUND - logger.debug("Deletion failed for ScheduledJob: ${delete.id()}. Continuing with current version $currentVersion") - return - } - - if (isOwningNode(shardId, delete.id())) { - if (scheduler.scheduledJobs().contains(delete.id())) { - sweep(shardId, delete.id(), result.version, null) - } - scheduler.postDelete(delete.id()) - } - } - - fun enable() { - // initialize background sweep - initBackgroundSweep() - // set sweeperEnabled flag to true to make the listeners aware of this setting - sweeperEnabled = true - } - - fun disable() { - // cancel background sweep - scheduledFullSweep?.cancel() - // deschedule existing jobs on this node - logger.info("Descheduling all jobs as sweeping is disabled") - scheduler.deschedule(scheduler.scheduledJobs()) - // set sweeperEnabled flag to false to make the listeners aware of this setting - sweeperEnabled = false - } - - public fun isSweepingEnabled(): Boolean { - // Although it is a single link check, keeping it as a separate function, so we - // can abstract out logic of finding out whether to proceed or not - return sweeperEnabled == true - } - - private fun initBackgroundSweep() { - // if sweeping disabled, background sweep should not be triggered - if (!isSweepingEnabled()) return - - // cancel existing background thread if present - scheduledFullSweep?.cancel() - - // Manually sweep all shards before scheduling the background sweep so it picks up any changes immediately - // since the first run of a task submitted with scheduleWithFixedDelay() happens after the interval has passed. - logger.debug("Performing sweep of scheduled jobs.") - fullSweepExecutor.submit { - sweepAllShards() - } - - // Setup an anti-entropy/self-healing background sweep, in case a sweep that was triggered by an event fails. - val scheduledSweep = Runnable { - val elapsedTime = getFullSweepElapsedTime() - - // Rate limit to at most one full sweep per sweep period - // The schedule runs may wake up a few milliseconds early. - // Delta will be giving some buffer on the schedule to allow waking up slightly earlier. - val delta = sweepPeriod.millis - elapsedTime.millis - if (delta < 20L) { // give 20ms buffer. - fullSweepExecutor.submit { - logger.debug("Performing background sweep of scheduled jobs.") - sweepAllShards() - } - } - } - scheduledFullSweep = threadPool.scheduleWithFixedDelay(scheduledSweep, sweepPeriod, ThreadPool.Names.SAME) - } - - private fun sweepAllShards() { - val clusterState = clusterService.state() - if (!clusterState.routingTable.hasIndex(ScheduledJob.SCHEDULED_JOBS_INDEX)) { - scheduler.deschedule(scheduler.scheduledJobs()) - sweptJobs.clear() - lastFullSweepTimeNano = System.nanoTime() - return - } - - // Find all shards that are currently assigned to this node. - val localNodeId = clusterState.nodes.localNodeId - val localShards = clusterState.routingTable.allShards(ScheduledJob.SCHEDULED_JOBS_INDEX) - // Find all active shards - .filter { it.active() } - // group by shardId - .groupBy { it.shardId() } - // assigned to local node - .filter { (_, shards) -> shards.any { it.currentNodeId() == localNodeId } } - - // Remove all jobs on shards that are no longer assigned to this node. - val removedShards = sweptJobs.keys - localShards.keys - removedShards.forEach { shardId -> - val shardJobs = sweptJobs.remove(shardId) ?: emptyMap() - scheduler.deschedule(shardJobs.keys) - } - - // resweep all shards that are assigned to this node. - localShards.forEach { (shardId, shards) -> - try { - sweepShard(shardId, ShardNodes(localNodeId, shards.map { it.currentNodeId() })) - } catch (e: Exception) { - val shardLogger = Loggers.getLogger(javaClass, shardId) - shardLogger.error("Error while sweeping shard $shardId", e) - } - } - lastFullSweepTimeNano = System.nanoTime() - } - - private fun sweepShard(shardId: ShardId, shardNodes: ShardNodes, startAfter: String = "") { - val logger = Loggers.getLogger(javaClass, shardId) - logger.debug("Sweeping shard $shardId") - - // Remove any jobs that are currently scheduled that are no longer owned by this node - val currentJobs = sweptJobs.getOrPut(shardId) { ConcurrentHashMap() } - currentJobs.keys.filterNot { shardNodes.isOwningNode(it) }.forEach { - scheduler.deschedule(it) - currentJobs.remove(it) - } - - // sweep the shard for new and updated jobs. Uses a search after query to paginate, assuming that any concurrent - // updates and deletes are handled by the index operation listener. - var searchAfter: String? = startAfter - while (searchAfter != null) { - val boolQueryBuilder = BoolQueryBuilder() - sweepableJobTypes.forEach { boolQueryBuilder.should(QueryBuilders.existsQuery(it)) } - val jobSearchRequest = SearchRequest() - .indices(ScheduledJob.SCHEDULED_JOBS_INDEX) - .preference("_shards:${shardId.id}|_only_local") - .source( - SearchSourceBuilder.searchSource() - .version(true) - .sort( - FieldSortBuilder("_id") - .unmappedType("keyword") - .missing("_last") - ) - .searchAfter(arrayOf(searchAfter)) - .size(sweepPageSize) - .query(boolQueryBuilder) - ) - - val response = sweepSearchBackoff.retry { - client.search(jobSearchRequest).actionGet(requestTimeout) - } - if (response.status() != RestStatus.OK) { - logger.error("Error sweeping shard $shardId.", response.firstFailureOrNull()) - return - } - for (hit in response.hits) { - if (shardNodes.isOwningNode(hit.id)) { - val xcp = XContentHelper.createParser( - xContentRegistry, - LoggingDeprecationHandler.INSTANCE, - hit.sourceRef, - XContentType.JSON - ) - parseAndSweepJob(xcp, shardId, hit.id, hit.version, hit.sourceRef) - } - } - searchAfter = response.hits.lastOrNull()?.id - } - } - - private fun sweep( - shardId: ShardId, - jobId: JobId, - newVersion: JobVersion, - job: ScheduledJob?, - failedToParse: Boolean = false - ) { - sweptJobs.getOrPut(shardId) { ConcurrentHashMap() } - // Use [compute] to update atomically in case another thread concurrently indexes/deletes the same job - .compute(jobId) { _, currentVersion -> - val jobCurrentlyScheduled = scheduler.scheduledJobs().contains(jobId) - - if (newVersion <= (currentVersion ?: Versions.NOT_FOUND)) { - if (unchangedJobToBeRescheduled(newVersion, currentVersion, jobCurrentlyScheduled, job)) { - logger.debug("Not skipping job $jobId since it is an unchanged job slated to be rescheduled") - } else { - logger.debug("Skipping job $jobId, $newVersion <= $currentVersion") - return@compute currentVersion - } - } - - // deschedule the currently scheduled version - if (jobCurrentlyScheduled) { - scheduler.deschedule(jobId) - } - - if (failedToParse) { - return@compute currentVersion - } - if (job != null) { - if (job.enabled) { - scheduler.schedule(job) - } - return@compute newVersion - } else { - return@compute null - } - } - } - - /* - * During the job sweep, normally jobs where the currentVersion is equal to the newVersion are skipped since - * there was no change. - * - * However, there exists an edge-case where a job could have been de-scheduled by flipping [SWEEPER_ENABLED] - * to false and then not have undergone any changes when the sweeper is re-enabled. In this case, the job should - * not be skipped so it can be re-scheduled. This utility method checks for this condition so the sweep() method - * can account for it. - */ - private fun unchangedJobToBeRescheduled( - newVersion: JobVersion, - currentVersion: JobVersion?, - jobCurrentlyScheduled: Boolean, - job: ScheduledJob? - ): Boolean { - // newVersion should not be [Versions.NOT_FOUND] here since it's passed in from existing search hits - // or successful doc delete operations - val versionWasUnchanged = newVersion == (currentVersion ?: Versions.NOT_FOUND) - val jobEnabled = job?.enabled ?: false - - return versionWasUnchanged && !jobCurrentlyScheduled && jobEnabled - } - - private fun parseAndSweepJob( - xcp: XContentParser, - shardId: ShardId, - jobId: JobId, - jobVersion: JobVersion, - jobSource: BytesReference, - typeIsParsed: Boolean = false - ): ScheduledJob? { - return try { - val job = parseScheduledJob(xcp, jobId, jobVersion, typeIsParsed) - sweep(shardId, jobId, jobVersion, job) - job - } catch (e: Exception) { - logger.warn( - "Unable to parse ScheduledJob source: {}", - Strings.cleanTruncate(jobSource.utf8ToString(), 1000) - ) - sweep(shardId, jobId, jobVersion, null, true) - null - } - } - - private fun parseScheduledJob(xcp: XContentParser, jobId: JobId, jobVersion: JobVersion, typeIsParsed: Boolean): ScheduledJob { - return if (typeIsParsed) { - ScheduledJob.parse(xcp, xcp.currentName(), jobId, jobVersion) - } else { - ScheduledJob.parse(xcp, jobId, jobVersion) - } - } - - private fun getFullSweepElapsedTime(): TimeValue { - return TimeValue.timeValueNanos(System.nanoTime() - lastFullSweepTimeNano) - } - - fun getJobSweeperMetrics(): JobSweeperMetrics { - if (!isSweepingEnabled()) { - return JobSweeperMetrics(-1, true) - } - val elapsedTime = getFullSweepElapsedTime() - return JobSweeperMetrics(elapsedTime.millis, elapsedTime.millis <= sweepPeriod.millis) - } - - private fun isSweepableJobType(xcp: XContentParser): Boolean { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, xcp.nextToken(), xcp) - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, xcp.nextToken(), xcp) - val jobType = xcp.currentName() - return sweepableJobTypes.contains(jobType) - } - - private fun isOwningNode(shardId: ShardId, jobId: JobId): Boolean { - val localNodeId = clusterService.localNode().id - val shardNodeIds = clusterService.state().routingTable.shardRoutingTable(shardId) - .filter { it.active() } - .map { it.currentNodeId() } - val shardNodes = ShardNodes(localNodeId, shardNodeIds) - return shardNodes.isOwningNode(jobId) - } -} - -/** - * A group of nodes in the cluster that contain active instances of a single OpenSearch shard. This uses a consistent hash to divide - * the jobs indexed in that shard amongst the nodes such that each job is "owned" by exactly one of the nodes. - * The local node must have an active instance of the shard. - * - * Implementation notes: This class is not thread safe. It uses the same [hash function][Murmur3HashFunction] that OpenSearch uses - * for routing. For each real node `100` virtual nodes are added to provide a good distribution. - */ -private class ShardNodes(val localNodeId: String, activeShardNodeIds: Collection) { - - private val circle = TreeMap() - - companion object { - private const val VIRTUAL_NODE_COUNT = 100 - } - - init { - for (node in activeShardNodeIds) { - for (i in 0 until VIRTUAL_NODE_COUNT) { - circle[Murmur3HashFunction.hash(node + i)] = node - } - } - } - - fun isOwningNode(id: JobId): Boolean { - if (circle.isEmpty()) { - return false - } - val hash = Murmur3HashFunction.hash(id) - val nodeId = (circle.higherEntry(hash) ?: circle.firstEntry()).value - return (localNodeId == nodeId) - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt b/core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt deleted file mode 100644 index 9a10586d1..000000000 --- a/core/bin/main/org/opensearch/alerting/core/JobSweeperMetrics.kt +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core - -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentFragment -import org.opensearch.core.xcontent.XContentBuilder - -data class JobSweeperMetrics(val lastFullSweepTimeMillis: Long, val fullSweepOnTime: Boolean) : ToXContentFragment, Writeable { - - constructor(si: StreamInput) : this(si.readLong(), si.readBoolean()) - - override fun writeTo(out: StreamOutput) { - out.writeLong(lastFullSweepTimeMillis) - out.writeBoolean(fullSweepOnTime) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.field("last_full_sweep_time_millis", lastFullSweepTimeMillis) - builder.field("full_sweep_on_time", fullSweepOnTime) - return builder - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt b/core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt deleted file mode 100644 index a71a7e64f..000000000 --- a/core/bin/main/org/opensearch/alerting/core/ScheduledJobIndices.kt +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core - -import org.opensearch.action.admin.indices.create.CreateIndexRequest -import org.opensearch.action.admin.indices.create.CreateIndexResponse -import org.opensearch.client.AdminClient -import org.opensearch.cluster.health.ClusterIndexHealth -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.action.ActionListener - -/** - * Initialize the OpenSearch components required to run [ScheduledJobs]. - * - * [initScheduledJobIndex] is called before indexing a new scheduled job. It verifies that the index exists before - * allowing the index to go through. This is to ensure the correct mappings exist for [ScheduledJob]. - */ -class ScheduledJobIndices(private val client: AdminClient, private val clusterService: ClusterService) { - - companion object { - @JvmStatic - fun scheduledJobMappings(): String { - return ScheduledJobIndices::class.java.classLoader.getResource("mappings/scheduled-jobs.json").readText() - } - } - /** - * Initialize the indices required for scheduled jobs. - * First check if the index exists, and if not create the index with the provided callback listeners. - * - * @param actionListener A callback listener for the index creation call. Generally in the form of onSuccess, onFailure - */ - fun initScheduledJobIndex(actionListener: ActionListener) { - if (!scheduledJobIndexExists()) { - var indexRequest = CreateIndexRequest(ScheduledJob.SCHEDULED_JOBS_INDEX) - .mapping(scheduledJobMappings()) - .settings(Settings.builder().put("index.hidden", true).build()) - client.indices().create(indexRequest, actionListener) - } - } - - fun scheduledJobIndexExists(): Boolean { - val clusterState = clusterService.state() - return clusterState.routingTable.hasIndex(ScheduledJob.SCHEDULED_JOBS_INDEX) - } - - /** - * Check if the index exists. If the index does not exist, return null. - */ - fun scheduledJobIndexHealth(): ClusterIndexHealth? { - var indexHealth: ClusterIndexHealth? = null - - if (scheduledJobIndexExists()) { - val indexRoutingTable = clusterService.state().routingTable.index(ScheduledJob.SCHEDULED_JOBS_INDEX) - val indexMetaData = clusterService.state().metadata().index(ScheduledJob.SCHEDULED_JOBS_INDEX) - - indexHealth = ClusterIndexHealth(indexMetaData, indexRoutingTable) - } - return indexHealth - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt deleted file mode 100644 index 07792d553..000000000 --- a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobStats.kt +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.action.node - -import org.opensearch.action.support.nodes.BaseNodeResponse -import org.opensearch.alerting.core.JobSweeperMetrics -import org.opensearch.alerting.core.resthandler.RestScheduledJobStatsHandler -import org.opensearch.alerting.core.schedule.JobSchedulerMetrics -import org.opensearch.cluster.node.DiscoveryNode -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentFragment -import org.opensearch.core.xcontent.XContentBuilder -import java.util.Locale - -/** - * Scheduled job stat that will be generated by each node. - */ -class ScheduledJobStats : BaseNodeResponse, ToXContentFragment { - - enum class ScheduleStatus(val status: String) { - RED("red"), - GREEN("green"); - - override fun toString(): String { - return status - } - } - - var status: ScheduleStatus - var jobSweeperMetrics: JobSweeperMetrics? = null - var jobInfos: Array? = null - - constructor(si: StreamInput) : super(si) { - this.status = si.readEnum(ScheduleStatus::class.java) - this.jobSweeperMetrics = si.readOptionalWriteable { JobSweeperMetrics(it) } - this.jobInfos = si.readOptionalArray({ sti: StreamInput -> JobSchedulerMetrics(sti) }, { size -> arrayOfNulls(size) }) - } - - constructor( - node: DiscoveryNode, - status: ScheduleStatus, - jobSweeperMetrics: JobSweeperMetrics?, - jobsInfo: Array? - ) : super(node) { - this.status = status - this.jobSweeperMetrics = jobSweeperMetrics - this.jobInfos = jobsInfo - } - - companion object { - @JvmStatic - fun readScheduledJobStatus(si: StreamInput) = ScheduledJobStats(si) - } - - override fun writeTo(out: StreamOutput) { - super.writeTo(out) - out.writeEnum(status) - out.writeOptionalWriteable(jobSweeperMetrics) - out.writeOptionalArray(jobInfos) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.field("name", node.name) - builder.field("schedule_status", status) - builder.field("roles", node.roles.map { it.roleName().uppercase(Locale.getDefault()) }) - if (jobSweeperMetrics != null) { - builder.startObject(RestScheduledJobStatsHandler.JOB_SCHEDULING_METRICS) - jobSweeperMetrics!!.toXContent(builder, params) - builder.endObject() - } - - if (jobInfos != null) { - builder.startObject(RestScheduledJobStatsHandler.JOBS_INFO) - for (job in jobInfos!!) { - builder.startObject(job.scheduledJobId) - job.toXContent(builder, params) - builder.endObject() - } - builder.endObject() - } - return builder - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt deleted file mode 100644 index 698c6c44e..000000000 --- a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsAction.kt +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.action.node - -import org.opensearch.action.ActionType -import org.opensearch.core.common.io.stream.Writeable - -class ScheduledJobsStatsAction : ActionType(NAME, reader) { - companion object { - val INSTANCE = ScheduledJobsStatsAction() - const val NAME = "cluster:admin/opendistro/_scheduled_jobs/stats" - - val reader = Writeable.Reader { - val response = ScheduledJobsStatsResponse(it) - response - } - } - - override fun getResponseReader(): Writeable.Reader { - return reader - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt deleted file mode 100644 index 6a82e8204..000000000 --- a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsRequest.kt +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.action.node - -import org.opensearch.action.support.nodes.BaseNodesRequest -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import java.io.IOException - -/** - * A request to get node (cluster) level ScheduledJobsStatus. - * By default all the parameters will be true. - */ -class ScheduledJobsStatsRequest : BaseNodesRequest { - var jobSchedulingMetrics: Boolean = true - var jobsInfo: Boolean = true - - constructor(si: StreamInput) : super(si) { - jobSchedulingMetrics = si.readBoolean() - jobsInfo = si.readBoolean() - } - constructor(nodeIds: Array) : super(*nodeIds) - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - super.writeTo(out) - out.writeBoolean(jobSchedulingMetrics) - out.writeBoolean(jobsInfo) - } - - fun all(): ScheduledJobsStatsRequest { - jobSchedulingMetrics = true - jobsInfo = true - return this - } - - fun clear(): ScheduledJobsStatsRequest { - jobSchedulingMetrics = false - jobsInfo = false - return this - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt deleted file mode 100644 index edfcc0cce..000000000 --- a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsResponse.kt +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.action.node - -import org.opensearch.action.FailedNodeException -import org.opensearch.action.support.nodes.BaseNodesResponse -import org.opensearch.alerting.core.settings.LegacyOpenDistroScheduledJobSettings -import org.opensearch.alerting.core.settings.ScheduledJobSettings -import org.opensearch.cluster.ClusterName -import org.opensearch.cluster.health.ClusterIndexHealth -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentFragment -import org.opensearch.core.xcontent.XContentBuilder - -/** - * ScheduledJobsStatsResponse is a class that will contain all the response from each node. - */ -class ScheduledJobsStatsResponse : BaseNodesResponse, ToXContentFragment { - - private var scheduledJobEnabled: Boolean = false - private var indexExists: Boolean? = null - private var indexHealth: ClusterIndexHealth? = null - - constructor(si: StreamInput) : super(si) { - this.scheduledJobEnabled = si.readBoolean() - this.indexExists = si.readBoolean() - this.indexHealth = si.readOptionalWriteable { ClusterIndexHealth(si) } - } - - constructor( - clusterName: ClusterName, - nodeResponses: List, - failures: List, - scheduledJobEnabled: Boolean, - indexExists: Boolean, - indexHealth: ClusterIndexHealth? - ) : super(clusterName, nodeResponses, failures) { - this.scheduledJobEnabled = scheduledJobEnabled - this.indexExists = indexExists - this.indexHealth = indexHealth - } - - override fun writeNodesTo( - out: StreamOutput, - nodes: MutableList - ) { - out.writeList(nodes) - } - - override fun readNodesFrom(si: StreamInput): MutableList { - return si.readList { ScheduledJobStats.readScheduledJobStatus(it) } - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - builder.field(LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED.key, scheduledJobEnabled) - builder.field(ScheduledJobSettings.SWEEPER_ENABLED.key, scheduledJobEnabled) - builder.field("scheduled_job_index_exists", indexExists) - builder.field("scheduled_job_index_status", indexHealth?.status?.name?.lowercase()) - val nodesOnSchedule = nodes.count { it.status == ScheduledJobStats.ScheduleStatus.GREEN } - val nodesNotOnSchedule = nodes.count { it.status == ScheduledJobStats.ScheduleStatus.RED } - builder.field("nodes_on_schedule", nodesOnSchedule) - builder.field("nodes_not_on_schedule", nodesNotOnSchedule) - builder.startObject("nodes") - for (scheduledJobStatus in nodes) { - builder.startObject(scheduledJobStatus.node.id) - scheduledJobStatus.toXContent(builder, params) - builder.endObject() - } - builder.endObject() - - return builder - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt b/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt deleted file mode 100644 index ac6f8f3a1..000000000 --- a/core/bin/main/org/opensearch/alerting/core/action/node/ScheduledJobsStatsTransportAction.kt +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.action.node - -import org.apache.logging.log4j.LogManager -import org.opensearch.action.FailedNodeException -import org.opensearch.action.support.ActionFilters -import org.opensearch.action.support.nodes.BaseNodeRequest -import org.opensearch.action.support.nodes.TransportNodesAction -import org.opensearch.alerting.core.JobSweeper -import org.opensearch.alerting.core.JobSweeperMetrics -import org.opensearch.alerting.core.ScheduledJobIndices -import org.opensearch.alerting.core.schedule.JobScheduler -import org.opensearch.alerting.core.schedule.JobSchedulerMetrics -import org.opensearch.cluster.health.ClusterIndexHealth -import org.opensearch.cluster.service.ClusterService -import org.opensearch.common.inject.Inject -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.threadpool.ThreadPool -import org.opensearch.transport.TransportService -import java.io.IOException - -private val log = LogManager.getLogger(ScheduledJobsStatsTransportAction::class.java) - -class ScheduledJobsStatsTransportAction : TransportNodesAction { - - private val jobSweeper: JobSweeper - private val jobScheduler: JobScheduler - private val scheduledJobIndices: ScheduledJobIndices - - @Inject - constructor( - threadPool: ThreadPool, - clusterService: ClusterService, - transportService: TransportService, - actionFilters: ActionFilters, - jobSweeper: JobSweeper, - jobScheduler: JobScheduler, - scheduledJobIndices: ScheduledJobIndices - ) : super( - ScheduledJobsStatsAction.NAME, - threadPool, - clusterService, - transportService, - actionFilters, - { ScheduledJobsStatsRequest(it) }, - { ScheduledJobStatusRequest(it) }, - ThreadPool.Names.MANAGEMENT, - ScheduledJobStats::class.java - ) { - this.jobSweeper = jobSweeper - this.jobScheduler = jobScheduler - this.scheduledJobIndices = scheduledJobIndices - } - - override fun newNodeRequest(request: ScheduledJobsStatsRequest): ScheduledJobStatusRequest { - return ScheduledJobStatusRequest(request) - } - - override fun newNodeResponse(si: StreamInput): ScheduledJobStats { - return ScheduledJobStats(si) - } - - override fun newResponse( - request: ScheduledJobsStatsRequest, - responses: MutableList, - failures: MutableList - ): ScheduledJobsStatsResponse { - val scheduledJobEnabled = jobSweeper.isSweepingEnabled() - val scheduledJobIndexExist = scheduledJobIndices.scheduledJobIndexExists() - val indexHealth: ClusterIndexHealth? = if (scheduledJobIndexExist) scheduledJobIndices.scheduledJobIndexHealth() else null - - return ScheduledJobsStatsResponse( - clusterService.clusterName, - responses, - failures, - scheduledJobEnabled, - scheduledJobIndexExist, - indexHealth - ) - } - - override fun nodeOperation(request: ScheduledJobStatusRequest): ScheduledJobStats { - return createScheduledJobStatus(request.request) - } - - private fun createScheduledJobStatus( - scheduledJobsStatusRequest: ScheduledJobsStatsRequest - ): ScheduledJobStats { - val jobSweeperMetrics = jobSweeper.getJobSweeperMetrics() - val jobSchedulerMetrics = jobScheduler.getJobSchedulerMetric() - - val status: ScheduledJobStats.ScheduleStatus = evaluateStatus(jobSchedulerMetrics, jobSweeperMetrics) - return ScheduledJobStats( - this.transportService.localNode, - status, - if (scheduledJobsStatusRequest.jobSchedulingMetrics) jobSweeperMetrics else null, - if (scheduledJobsStatusRequest.jobsInfo) jobSchedulerMetrics.toTypedArray() else null - ) - } - - private fun evaluateStatus( - jobsInfo: List, - jobSweeperMetrics: JobSweeperMetrics - ): ScheduledJobStats.ScheduleStatus { - val allJobsRunningOnTime = jobsInfo.all { it.runningOnTime } - if (allJobsRunningOnTime && jobSweeperMetrics.fullSweepOnTime) { - return ScheduledJobStats.ScheduleStatus.GREEN - } - log.info("Jobs Running on time: $allJobsRunningOnTime, Sweeper on time: ${jobSweeperMetrics.fullSweepOnTime}") - return ScheduledJobStats.ScheduleStatus.RED - } - - class ScheduledJobStatusRequest : BaseNodeRequest { - - lateinit var request: ScheduledJobsStatsRequest - - constructor() : super() - - constructor(si: StreamInput) : super(si) { - request = ScheduledJobsStatsRequest(si) - } - - constructor(request: ScheduledJobsStatsRequest) : super() { - this.request = request - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - super.writeTo(out) - request.writeTo(out) - } - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt b/core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt deleted file mode 100644 index c4f800ab3..000000000 --- a/core/bin/main/org/opensearch/alerting/core/resthandler/RestScheduledJobStatsHandler.kt +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.resthandler - -import org.opensearch.alerting.core.action.node.ScheduledJobsStatsAction -import org.opensearch.alerting.core.action.node.ScheduledJobsStatsRequest -import org.opensearch.client.node.NodeClient -import org.opensearch.core.common.Strings -import org.opensearch.rest.BaseRestHandler -import org.opensearch.rest.RestHandler -import org.opensearch.rest.RestHandler.Route -import org.opensearch.rest.RestRequest -import org.opensearch.rest.RestRequest.Method.GET -import org.opensearch.rest.action.RestActions -import java.util.Locale -import java.util.TreeSet - -/** - * RestScheduledJobStatsHandler is handler for getting ScheduledJob Stats. - */ -class RestScheduledJobStatsHandler(private val path: String) : BaseRestHandler() { - - companion object { - const val JOB_SCHEDULING_METRICS: String = "job_scheduling_metrics" - const val JOBS_INFO: String = "jobs_info" - private val METRICS = mapOf Unit>( - JOB_SCHEDULING_METRICS to { it -> it.jobSchedulingMetrics = true }, - JOBS_INFO to { it -> it.jobsInfo = true } - ) - } - - override fun getName(): String { - return "${path}_jobs_stats" - } - - override fun routes(): List { - return listOf() - } - - override fun replacedRoutes(): MutableList { - return mutableListOf( - RestHandler.ReplacedRoute( - GET, - "/_plugins/$path/{nodeId}/stats/", - GET, - "/_opendistro/$path/{nodeId}/stats/" - ), - RestHandler.ReplacedRoute( - GET, - "/_plugins/$path/{nodeId}/stats/{metric}", - GET, - "/_opendistro/$path/{nodeId}/stats/{metric}" - ), - RestHandler.ReplacedRoute( - GET, - "/_plugins/$path/stats/", - GET, - "/_opendistro/$path/stats/" - ), - RestHandler.ReplacedRoute( - GET, - "/_plugins/$path/stats/{metric}", - GET, - "/_opendistro/$path/stats/{metric}" - ) - ) - } - - override fun prepareRequest(request: RestRequest, client: NodeClient): RestChannelConsumer { - val scheduledJobNodesStatsRequest = getRequest(request) - return RestChannelConsumer { channel -> - client.execute( - ScheduledJobsStatsAction.INSTANCE, - scheduledJobNodesStatsRequest, - RestActions.NodesResponseRestListener(channel) - ) - } - } - - private fun getRequest(request: RestRequest): ScheduledJobsStatsRequest { - val nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")) - val metrics = Strings.tokenizeByCommaToSet(request.param("metric")) - val scheduledJobsStatsRequest = ScheduledJobsStatsRequest(nodesIds) - scheduledJobsStatsRequest.timeout(request.param("timeout")) - - if (metrics.isEmpty()) { - return scheduledJobsStatsRequest - } else if (metrics.size == 1 && metrics.contains("_all")) { - scheduledJobsStatsRequest.all() - } else if (metrics.contains("_all")) { - throw IllegalArgumentException( - String.format( - Locale.ROOT, - "request [%s] contains _all and individual metrics [%s]", - request.path(), - request.param("metric") - ) - ) - } else { - // use a sorted set so the unrecognized parameters appear in a reliable sorted order - scheduledJobsStatsRequest.clear() - val invalidMetrics = TreeSet() - for (metric in metrics) { - val handler = METRICS[metric] - if (handler != null) { - handler.invoke(scheduledJobsStatsRequest) - } else { - invalidMetrics.add(metric) - } - } - - if (!invalidMetrics.isEmpty()) { - throw IllegalArgumentException(unrecognized(request, invalidMetrics, METRICS.keys, "metric")) - } - } - return scheduledJobsStatsRequest - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt b/core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt deleted file mode 100644 index a4a729121..000000000 --- a/core/bin/main/org/opensearch/alerting/core/schedule/JobScheduler.kt +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.schedule - -import org.apache.logging.log4j.LogManager -import org.opensearch.alerting.core.JobRunner -import org.opensearch.common.unit.TimeValue -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.threadpool.Scheduler -import org.opensearch.threadpool.ThreadPool -import java.time.Duration -import java.time.Instant -import java.util.concurrent.ConcurrentHashMap -import java.util.concurrent.TimeUnit -import java.util.stream.Collectors - -/** - * JobScheduler is a class for scheduling and descheduling ScheduleJobs. This class keeps list of ScheduledJob Ids that are currently scheduled. - * - * JobScheduler is unaware of the ScheduledJob version and it is up to callers to ensure that the older version of ScheduledJob to be descheduled and schedule the new version. - */ -class JobScheduler(private val threadPool: ThreadPool, private val jobRunner: JobRunner) { - private val logger = LogManager.getLogger(JobScheduler::class.java) - - /** - * Map of ScheduledJobName to Info of the ScheduledJob. - */ - private val scheduledJobIdToInfo = ConcurrentHashMap() - - /** - * Schedules the jobs in [jobsToSchedule] for execution. - * - * @return List of jobs that could not be scheduled - */ - fun schedule(vararg jobsToSchedule: ScheduledJob): List { - return jobsToSchedule.filter { - !this.schedule(it) - } - } - - /** - * Schedules a single [scheduledJob] - * - * The [schedule] does not check for new version of the ScheduledJob. - * The caller should be aware of the update that happened in [ScheduledJob] and must first call [deschedule] if the Job version is updated and then followed by [schedule] - * - * [schedule] is considered successfully scheduled when - * 1. Cron expression is out of Scheduled. eg. past year 2016. - * 2. If the schedule already exists. This is to keep the function idempotent. - * 3. we are able to schedule the job in the [ThreadPool.schedule] - * - * [schedule] is considered unsuccessfully schedule when - * 1. Schedule is disabled. - * 2. In rare race condition where scheduledJob is already marked [ScheduledJobInfo.descheduled] true at the time of making [ThreadPool.schedule] - * 3. any unexpected failures. - * - * @return true if the ScheduledJob is scheduled successfully; - * false otherwise. - */ - fun schedule(scheduledJob: ScheduledJob): Boolean { - logger.info("Scheduling jobId : ${scheduledJob.id}, name: ${scheduledJob.name}") - - if (!scheduledJob.enabled) { - // ensure that the ScheduledJob is not enabled. The caller should be also checking this before calling this function. - return false - } - - val scheduledJobInfo = scheduledJobIdToInfo.getOrPut(scheduledJob.id) { - ScheduledJobInfo(scheduledJob.id, scheduledJob) - } - if (scheduledJobInfo.scheduledCancellable != null) { - // This means that the given ScheduledJob already has schedule running. We should not schedule any more. - return true - } - - // Start the first schedule. - return this.reschedule(scheduledJob, scheduledJobInfo) - } - - /** - * Deschedules the jobs given ScheduledJob [ids]. - * - * caller should retry [deschedule] that failed. - * - * @return List of job ids failed to deschedule. - */ - fun deschedule(ids: Collection): List { - return ids.filter { - !this.deschedule(it) - }.also { - if (it.isNotEmpty()) { - logger.error("Unable to deschedule jobs $it") - } - } - } - - /** - * Mark the scheduledJob as descheduled and try to cancel any future schedule for given scheduledJob id. - * - * [deschedule] is considered successful when - * 1. ScheduledJob id does not exist. - * 2. ScheduledJob is complete. - * 3. ScheduledJob is not complete and is successfully cancelled. - * - * Caller should retry if ScheduledJob [deschedule] fails. - * - * @return true if job is successfully descheduled; - * false otherwise. - */ - fun deschedule(id: String): Boolean { - val scheduledJobInfo = scheduledJobIdToInfo[id] - if (scheduledJobInfo == null) { - logger.info("JobId $id does not exist.") - return true - } else { - logger.info("Descheduling jobId : $id") - scheduledJobInfo.descheduled = true - scheduledJobInfo.actualPreviousExecutionTime = null - scheduledJobInfo.expectedNextExecutionTime = null - var result = true - val scheduledFuture = scheduledJobInfo.scheduledCancellable - - if (scheduledFuture != null && !scheduledFuture.isCancelled) { - result = scheduledFuture.cancel() - } - - if (result) { - // If we have successfully descheduled the job, remove from the info map. - scheduledJobIdToInfo.remove(scheduledJobInfo.scheduledJobId, scheduledJobInfo) - } - return result - } - } - - /** - * @return list of jobIds that are scheduled. - */ - fun scheduledJobs(): Set { - return scheduledJobIdToInfo.keys - } - - private fun reschedule(scheduleJob: ScheduledJob, scheduledJobInfo: ScheduledJobInfo): Boolean { - if (scheduleJob.enabledTime == null) { - logger.info("${scheduleJob.name} there is no enabled time. This job should never have been scheduled.") - return false - } - scheduledJobInfo.expectedNextExecutionTime = scheduleJob.schedule.getExpectedNextExecutionTime( - scheduleJob.enabledTime!!, scheduledJobInfo.expectedNextExecutionTime - ) - - // Validate if there is next execution that needs to happen. - // e.g cron job that is expected to run in 30th of Feb (which doesn't exist). "0/5 * 30 2 *" - if (scheduledJobInfo.expectedNextExecutionTime == null) { - logger.info("${scheduleJob.name} there is no next execution time.") - return true - } - - val duration = Duration.between(Instant.now(), scheduledJobInfo.expectedNextExecutionTime) - - // Create anonymous runnable. - val runnable = Runnable { - // Check again if the scheduled job is marked descheduled. - if (scheduledJobInfo.descheduled) { - return@Runnable // skip running job if job is marked descheduled. - } - - // Order of operations inside here matter, we specifically call getPeriodEndingAt before reschedule because - // reschedule will update expectedNextExecutionTime to the next one which would throw off the startTime/endTime - val (startTime, endTime) = scheduleJob.schedule.getPeriodEndingAt(scheduledJobInfo.expectedNextExecutionTime) - scheduledJobInfo.actualPreviousExecutionTime = Instant.now() - - this.reschedule(scheduleJob, scheduledJobInfo) - - jobRunner.runJob(scheduleJob, startTime, endTime) - } - - // Check descheduled flag as close as possible before we actually schedule a job. - // This way we will can minimize race conditions. - if (scheduledJobInfo.descheduled) { - // Do not reschedule if schedule has been marked descheduled. - return false - } - - // Finally schedule the job in the ThreadPool with next time to execute. - val scheduledCancellable = threadPool.schedule(runnable, TimeValue(duration.toNanos(), TimeUnit.NANOSECONDS), ThreadPool.Names.SAME) - scheduledJobInfo.scheduledCancellable = scheduledCancellable - - return true - } - - fun getJobSchedulerMetric(): List { - return scheduledJobIdToInfo.entries.stream() - .map { entry -> - JobSchedulerMetrics( - entry.value.scheduledJobId, - entry.value.actualPreviousExecutionTime?.toEpochMilli(), - entry.value.scheduledJob.schedule.runningOnTime(entry.value.actualPreviousExecutionTime) - ) - } - .collect(Collectors.toList()) - } - - fun postIndex(job: ScheduledJob) { - jobRunner.postIndex(job) - } - - fun postDelete(jobId: String) { - jobRunner.postDelete(jobId) - } - - /** - * ScheduledJobInfo which we can use to check if the job should be descheduled. - * Some Idea for more use of this class is - * 1. Total number of runs. - * 2. Tracking of number of failed runs (helps to control error handling.) - */ - private data class ScheduledJobInfo( - val scheduledJobId: String, - val scheduledJob: ScheduledJob, - var descheduled: Boolean = false, - var actualPreviousExecutionTime: Instant? = null, - var expectedNextExecutionTime: Instant? = null, - var scheduledCancellable: Scheduler.ScheduledCancellable? = null - ) -} diff --git a/core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt b/core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt deleted file mode 100644 index dff1ecd52..000000000 --- a/core/bin/main/org/opensearch/alerting/core/schedule/JobSchedulerMetrics.kt +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.schedule - -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.common.io.stream.Writeable -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.ToXContentFragment -import org.opensearch.core.xcontent.XContentBuilder -import java.time.Instant - -class JobSchedulerMetrics : ToXContentFragment, Writeable { - val scheduledJobId: String - val lastExecutionTime: Long? - val runningOnTime: Boolean - - constructor(scheduledJobId: String, lastExecutionTime: Long?, runningOnTime: Boolean) { - this.scheduledJobId = scheduledJobId - this.lastExecutionTime = lastExecutionTime - this.runningOnTime = runningOnTime - } - - constructor(si: StreamInput) { - scheduledJobId = si.readString() - lastExecutionTime = si.readOptionalLong() - runningOnTime = si.readBoolean() - } - - override fun writeTo(out: StreamOutput) { - out.writeString(scheduledJobId) - out.writeOptionalLong(lastExecutionTime) - out.writeBoolean(runningOnTime) - } - - override fun toXContent(builder: XContentBuilder, params: ToXContent.Params): XContentBuilder { - if (lastExecutionTime != null) - builder.timeField( - "last_execution_time", "last_execution_time_in_millis", - Instant.ofEpochMilli(lastExecutionTime).toEpochMilli() - ) - builder.field("running_on_time", runningOnTime) - return builder - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt b/core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt deleted file mode 100644 index 3a37ff97f..000000000 --- a/core/bin/main/org/opensearch/alerting/core/settings/LegacyOpenDistroScheduledJobSettings.kt +++ /dev/null @@ -1,49 +0,0 @@ -package org.opensearch.alerting.core.settings - -import org.opensearch.common.settings.Setting -import org.opensearch.common.unit.TimeValue - -/** - * Legacy Opendistro settings used for [ScheduledJob]'s. These include back off settings, retry counts, timeouts etc... - */ - -class LegacyOpenDistroScheduledJobSettings { - - companion object { - val SWEEPER_ENABLED = Setting.boolSetting( - "opendistro.scheduled_jobs.enabled", - true, - Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated - ) - - val REQUEST_TIMEOUT = Setting.positiveTimeSetting( - "opendistro.scheduled_jobs.request_timeout", - TimeValue.timeValueSeconds(10), - Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated - ) - - val SWEEP_BACKOFF_MILLIS = Setting.positiveTimeSetting( - "opendistro.scheduled_jobs.sweeper.backoff_millis", - TimeValue.timeValueMillis(50), - Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated - ) - - val SWEEP_BACKOFF_RETRY_COUNT = Setting.intSetting( - "opendistro.scheduled_jobs.retry_count", - 3, - Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated - ) - - val SWEEP_PERIOD = Setting.positiveTimeSetting( - "opendistro.scheduled_jobs.sweeper.period", - TimeValue.timeValueMinutes(5), - Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated - ) - - val SWEEP_PAGE_SIZE = Setting.intSetting( - "opendistro.scheduled_jobs.sweeper.page_size", - 100, - Setting.Property.NodeScope, Setting.Property.Dynamic, Setting.Property.Deprecated - ) - } -} diff --git a/core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt b/core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt deleted file mode 100644 index 6bdb18bec..000000000 --- a/core/bin/main/org/opensearch/alerting/core/settings/ScheduledJobSettings.kt +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.settings - -import org.opensearch.common.settings.Setting - -/** - * settings used for [ScheduledJob]'s. These include back off settings, retry counts, timeouts etc... - */ -class ScheduledJobSettings { - - companion object { - val SWEEPER_ENABLED = Setting.boolSetting( - "plugins.scheduled_jobs.enabled", - LegacyOpenDistroScheduledJobSettings.SWEEPER_ENABLED, - Setting.Property.NodeScope, Setting.Property.Dynamic - ) - val REQUEST_TIMEOUT = Setting.positiveTimeSetting( - "plugins.scheduled_jobs.request_timeout", - LegacyOpenDistroScheduledJobSettings.REQUEST_TIMEOUT, - Setting.Property.NodeScope, Setting.Property.Dynamic - ) - - val SWEEP_BACKOFF_MILLIS = Setting.positiveTimeSetting( - "plugins.scheduled_jobs.sweeper.backoff_millis", - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_MILLIS, - Setting.Property.NodeScope, Setting.Property.Dynamic - ) - - val SWEEP_BACKOFF_RETRY_COUNT = Setting.intSetting( - "plugins.scheduled_jobs.retry_count", - LegacyOpenDistroScheduledJobSettings.SWEEP_BACKOFF_RETRY_COUNT, - Setting.Property.NodeScope, Setting.Property.Dynamic - ) - - val SWEEP_PERIOD = Setting.positiveTimeSetting( - "plugins.scheduled_jobs.sweeper.period", - LegacyOpenDistroScheduledJobSettings.SWEEP_PERIOD, - Setting.Property.NodeScope, Setting.Property.Dynamic - ) - - val SWEEP_PAGE_SIZE = Setting.intSetting( - "plugins.scheduled_jobs.sweeper.page_size", - LegacyOpenDistroScheduledJobSettings.SWEEP_PAGE_SIZE, - Setting.Property.NodeScope, Setting.Property.Dynamic - ) - } -} diff --git a/core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt b/core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt deleted file mode 100644 index 3e87f207f..000000000 --- a/core/bin/main/org/opensearch/alerting/opensearchapi/OpenSearchExtensions.kt +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.opensearchapi - -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.ThreadContextElement -import kotlinx.coroutines.delay -import kotlinx.coroutines.withContext -import org.apache.logging.log4j.Logger -import org.opensearch.OpenSearchException -import org.opensearch.action.bulk.BackoffPolicy -import org.opensearch.action.search.SearchResponse -import org.opensearch.action.search.ShardSearchFailure -import org.opensearch.client.OpenSearchClient -import org.opensearch.common.settings.Settings -import org.opensearch.common.util.concurrent.ThreadContext -import org.opensearch.common.xcontent.XContentHelper -import org.opensearch.common.xcontent.XContentType -import org.opensearch.commons.InjectSecurity -import org.opensearch.commons.authuser.User -import org.opensearch.commons.notifications.NotificationsPluginInterface -import org.opensearch.core.action.ActionListener -import org.opensearch.core.rest.RestStatus -import org.opensearch.core.rest.RestStatus.BAD_GATEWAY -import org.opensearch.core.rest.RestStatus.GATEWAY_TIMEOUT -import org.opensearch.core.rest.RestStatus.SERVICE_UNAVAILABLE -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.index.query.BoolQueryBuilder -import org.opensearch.index.query.QueryBuilders -import org.opensearch.search.builder.SearchSourceBuilder -import kotlin.coroutines.CoroutineContext -import kotlin.coroutines.resume -import kotlin.coroutines.resumeWithException -import kotlin.coroutines.suspendCoroutine - -/** Convert an object to maps and lists representation */ -fun ToXContent.convertToMap(): Map { - val bytesReference = XContentHelper.toXContent(this, XContentType.JSON, false) - return XContentHelper.convertToMap(bytesReference, false, XContentType.JSON).v2() -} - -/** - * Backs off and retries a lambda that makes a request. This should not be called on any of the [standard][ThreadPool] - * executors since those executors are not meant to be blocked by sleeping. - */ -fun BackoffPolicy.retry(block: () -> T): T { - val iter = iterator() - do { - try { - return block() - } catch (e: OpenSearchException) { - if (iter.hasNext() && e.isRetriable()) { - Thread.sleep(iter.next().millis) - } else { - throw e - } - } - } while (true) -} - -/** - * Backs off and retries a lambda that makes a request. This retries on any Exception unless it detects the - * Notification plugin is not installed. - * - * @param logger - logger used to log intermediate failures - * @param block - the block of code to retry. This should be a suspend function. - */ -suspend fun BackoffPolicy.retryForNotification( - logger: Logger, - block: suspend () -> T -): T { - val iter = iterator() - do { - try { - return block() - } catch (e: java.lang.Exception) { - val isMissingNotificationPlugin = e.message?.contains("failed to find action") ?: false - if (isMissingNotificationPlugin) { - throw OpenSearchException("Notification plugin is not installed. Please install the Notification plugin.", e) - } else if (iter.hasNext()) { - val backoff = iter.next() - logger.warn("Notification operation failed. Retrying in $backoff.", e) - delay(backoff.millis) - } else { - throw e - } - } - } while (true) -} - -/** - * Retries the given [block] of code as specified by the receiver [BackoffPolicy], if [block] throws an [OpenSearchException] - * that is retriable (502, 503, 504). - * - * If all retries fail the final exception will be rethrown. Exceptions caught during intermediate retries are - * logged as warnings to [logger]. Similar to [org.opensearch.action.bulk.Retry], except this retries on - * 502, 503, 504 error codes as well as 429. - * - * @param logger - logger used to log intermediate failures - * @param retryOn - any additional [RestStatus] values that should be retried - * @param block - the block of code to retry. This should be a suspend function. - */ -suspend fun BackoffPolicy.retry( - logger: Logger, - retryOn: List = emptyList(), - block: suspend () -> T -): T { - val iter = iterator() - do { - try { - return block() - } catch (e: OpenSearchException) { - if (iter.hasNext() && (e.isRetriable() || retryOn.contains(e.status()))) { - val backoff = iter.next() - logger.warn("Operation failed. Retrying in $backoff.", e) - delay(backoff.millis) - } else { - throw e - } - } - } while (true) -} - -/** - * Retries on 502, 503 and 504 per elastic client's behavior: https://github.com/elastic/elasticsearch-net/issues/2061 - * 429 must be retried manually as it's not clear if it's ok to retry for requests other than Bulk requests. - */ -fun OpenSearchException.isRetriable(): Boolean { - return (status() in listOf(BAD_GATEWAY, SERVICE_UNAVAILABLE, GATEWAY_TIMEOUT)) -} - -fun SearchResponse.firstFailureOrNull(): ShardSearchFailure? { - return shardFailures?.getOrNull(0) -} - -fun addFilter(user: User, searchSourceBuilder: SearchSourceBuilder, fieldName: String) { - val filterBackendRoles = QueryBuilders.termsQuery(fieldName, user.backendRoles) - val queryBuilder = searchSourceBuilder.query() as BoolQueryBuilder - searchSourceBuilder.query(queryBuilder.filter(filterBackendRoles)) -} - -/** - * Converts [OpenSearchClient] methods that take a callback into a kotlin suspending function. - * - * @param block - a block of code that is passed an [ActionListener] that should be passed to the OpenSearch client API. - */ -suspend fun C.suspendUntil(block: C.(ActionListener) -> Unit): T = - suspendCoroutine { cont -> - block(object : ActionListener { - override fun onResponse(response: T) = cont.resume(response) - - override fun onFailure(e: Exception) = cont.resumeWithException(e) - }) - } - -/** - * Converts [NotificationsPluginInterface] methods that take a callback into a kotlin suspending function. - * - * @param block - a block of code that is passed an [ActionListener] that should be passed to the NotificationsPluginInterface API. - */ -suspend fun NotificationsPluginInterface.suspendUntil(block: NotificationsPluginInterface.(ActionListener) -> Unit): T = - suspendCoroutine { cont -> - block(object : ActionListener { - override fun onResponse(response: T) = cont.resume(response) - - override fun onFailure(e: Exception) = cont.resumeWithException(e) - }) - } - -class InjectorContextElement( - id: String, - settings: Settings, - threadContext: ThreadContext, - private val roles: List?, - private val user: User? = null -) : ThreadContextElement { - - companion object Key : CoroutineContext.Key - override val key: CoroutineContext.Key<*> - get() = Key - - var rolesInjectorHelper = InjectSecurity(id, settings, threadContext) - - override fun updateThreadContext(context: CoroutineContext) { - rolesInjectorHelper.injectRoles(roles) - // This is from where plugins extract backend roles. It should be passed when calling APIs of other plugins - rolesInjectorHelper.injectUserInfo(user) - } - - override fun restoreThreadContext(context: CoroutineContext, oldState: Unit) { - rolesInjectorHelper.close() - } -} - -suspend fun withClosableContext( - context: InjectorContextElement, - block: suspend CoroutineScope.() -> T -): T { - try { - return withContext(context) { block() } - } finally { - context.rolesInjectorHelper.close() - } -} diff --git a/core/bin/main/settings/doc-level-queries.json b/core/bin/main/settings/doc-level-queries.json deleted file mode 100644 index c5cbfa445..000000000 --- a/core/bin/main/settings/doc-level-queries.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "index": { - "mapping": { - "total_fields": { - "limit": 10000 - } - }, - "hidden": true - } -} \ No newline at end of file diff --git a/core/bin/test/org/opensearch/alerting/core/WriteableTests.kt b/core/bin/test/org/opensearch/alerting/core/WriteableTests.kt deleted file mode 100644 index f48ffa370..000000000 --- a/core/bin/test/org/opensearch/alerting/core/WriteableTests.kt +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core - -import org.joda.time.DateTime -import org.junit.Test -import org.opensearch.alerting.core.schedule.JobSchedulerMetrics -import org.opensearch.common.io.stream.BytesStreamOutput -import org.opensearch.core.common.io.stream.StreamInput -import org.opensearch.test.OpenSearchTestCase.assertEquals - -class WriteableTests { - - @Test - fun `test jobschedule metrics as stream`() { - val metrics = JobSchedulerMetrics("test", DateTime.now().millis, false) - val out = BytesStreamOutput() - metrics.writeTo(out) - val sin = StreamInput.wrap(out.bytes().toBytesRef().bytes) - val newMetrics = JobSchedulerMetrics(sin) - assertEquals("Round tripping metrics doesn't work", metrics.scheduledJobId, newMetrics.scheduledJobId) - } -} diff --git a/core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt b/core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt deleted file mode 100644 index 08e3fb8c4..000000000 --- a/core/bin/test/org/opensearch/alerting/core/model/MockScheduledJob.kt +++ /dev/null @@ -1,33 +0,0 @@ -package org.opensearch.alerting.core.model - -import org.opensearch.commons.alerting.model.Schedule -import org.opensearch.commons.alerting.model.ScheduledJob -import org.opensearch.core.common.io.stream.StreamOutput -import org.opensearch.core.xcontent.ToXContent -import org.opensearch.core.xcontent.XContentBuilder -import java.io.IOException -import java.time.Instant - -class MockScheduledJob( - override val id: String, - override val version: Long, - override val name: String, - override val type: String, - override val enabled: Boolean, - override val schedule: Schedule, - override var lastUpdateTime: Instant, - override val enabledTime: Instant? -) : ScheduledJob { - override fun fromDocument(id: String, version: Long): ScheduledJob { - TODO("not implemented") - } - - override fun toXContent(builder: XContentBuilder?, params: ToXContent.Params?): XContentBuilder { - TODO("not implemented") - } - - @Throws(IOException::class) - override fun writeTo(out: StreamOutput) { - TODO("not implemented") - } -} diff --git a/core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt b/core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt deleted file mode 100644 index a0453e935..000000000 --- a/core/bin/test/org/opensearch/alerting/core/schedule/JobSchedulerTest.kt +++ /dev/null @@ -1,190 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.schedule - -import org.junit.Before -import org.opensearch.alerting.core.model.MockScheduledJob -import org.opensearch.common.settings.Settings -import org.opensearch.commons.alerting.model.CronSchedule -import org.opensearch.commons.alerting.model.IntervalSchedule -import org.opensearch.threadpool.ThreadPool -import java.time.Instant -import java.time.ZoneId -import java.time.temporal.ChronoUnit -import kotlin.test.Test -import kotlin.test.assertEquals -import kotlin.test.assertFalse -import kotlin.test.assertTrue - -class JobSchedulerTest { - - private var testSettings: Settings = Settings.builder().put("node.name", "node-0").build() - private val testThreadPool = ThreadPool(testSettings) - private var jobRunner: MockJobRunner = MockJobRunner() - private var jobScheduler: JobScheduler = JobScheduler(ThreadPool(testSettings), jobRunner) - - @Before - fun `setup`() { - jobRunner = MockJobRunner() - jobScheduler = JobScheduler(ThreadPool(testSettings), jobRunner) - } - - @Test - fun `schedule and deschedule`() { - val mockScheduledJob = MockScheduledJob( - "mockScheduledJob-id", - 1L, - "mockScheduledJob-name", - "MockScheduledJob", - true, - IntervalSchedule(1, ChronoUnit.MINUTES), - Instant.now(), - Instant.now() - ) - - assertTrue(jobScheduler.schedule(mockScheduledJob)) - - assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") - assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") - assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") - } - - @Test - fun `schedule cron past year`() { - // This is to run cron in Feb 30 which we should never run. - val cronExpression = "0/5 * 30 2 *" - val jobRunner = MockJobRunner() - val jobScheduler = JobScheduler(testThreadPool, jobRunner) - val mockScheduledJob = MockScheduledJob( - "mockScheduledJob-id", - 1L, - "mockScheduledJob-name", - "MockScheduledJob", - true, - CronSchedule(cronExpression, ZoneId.of("UTC")), - Instant.now(), - Instant.now() - ) - - assertTrue(jobScheduler.schedule(mockScheduledJob)) - assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") - - assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") - - assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") - } - - @Test - fun `schedule disabled`() { - val cronExpression = "0/5 * * * *" - val jobRunner = MockJobRunner() - val jobScheduler = JobScheduler(testThreadPool, jobRunner) - val mockScheduledJob = MockScheduledJob( - "mockScheduledJob-id", - 1L, - "mockScheduledJob-name", - "MockScheduledJob", - false, - CronSchedule(cronExpression, ZoneId.of("UTC")), - Instant.now(), - Instant.now() - ) - - assertFalse(jobScheduler.schedule(mockScheduledJob), "We should return false if we try to schedule disabled schedule.") - assertEquals(setOf(), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") - } - - @Test - fun `deschedule non existing schedule`() { - val cronExpression = "0/5 * * * *" - val jobRunner = MockJobRunner() - val jobScheduler = JobScheduler(testThreadPool, jobRunner) - val mockScheduledJob = MockScheduledJob( - "mockScheduledJob-id", - 1L, - "mockScheduledJob-name", - "MockScheduledJob", - true, - CronSchedule(cronExpression, ZoneId.of("UTC")), - Instant.now(), - Instant.now() - ) - - assertTrue(jobScheduler.schedule(mockScheduledJob)) - assertEquals(setOf("mockScheduledJob-id"), jobScheduler.scheduledJobs(), "List of ScheduledJobs are not the same.") - - assertEquals(0, jobRunner.numberOfRun, "Number of JobRunner ran is wrong.") - - assertTrue(jobScheduler.deschedule("mockScheduledJob-invalid"), "Descheduling should be true.") - assertTrue(jobScheduler.deschedule("mockScheduledJob-id"), "Descheduling should be true.") - } - - @Test - fun `schedule multiple jobs`() { - val cronExpression = "0/5 * * * *" - val mockScheduledJob1 = MockScheduledJob( - "mockScheduledJob-1", - 1L, - "mockScheduledJob-name", - "MockScheduledJob", - true, - CronSchedule(cronExpression, ZoneId.of("UTC")), - Instant.now(), - Instant.now() - ) - val mockScheduledJob2 = MockScheduledJob( - "mockScheduledJob-2", - 1L, - "mockScheduledJob-name", - "MockScheduledJob", - true, - CronSchedule(cronExpression, ZoneId.of("UTC")), - Instant.now(), - Instant.now() - ) - - assertTrue(jobScheduler.schedule(mockScheduledJob1, mockScheduledJob2).isEmpty()) - } - - @Test - fun `schedule null enabled time job`() { - val cronExpression = "0/5 * * * *" - val mockScheduledJob2 = MockScheduledJob( - "mockScheduledJob-2", 1L, "mockScheduledJob-name", "MockScheduledJob", true, - CronSchedule(cronExpression, ZoneId.of("UTC")), Instant.now(), null - ) - - assertFalse(jobScheduler.schedule(mockScheduledJob2)) - } - - @Test - fun `schedule disabled job`() { - val cronExpression = "0/5 * * * *" - val mockScheduledJob1 = MockScheduledJob( - "mockScheduledJob-1", 1L, "mockScheduledJob-name", "MockScheduledJob", false, - CronSchedule(cronExpression, ZoneId.of("UTC")), Instant.now(), Instant.now() - ) - - assertFalse(jobScheduler.schedule(mockScheduledJob1)) - } - - @Test - fun `run Job`() { - val cronExpression = "0/5 * * * *" - val mockScheduledJob = MockScheduledJob( - "mockScheduledJob-id", - 1L, - "mockScheduledJob-name", - "MockScheduledJob", - true, - CronSchedule(cronExpression, ZoneId.of("UTC")), - Instant.now(), - Instant.now() - ) - - jobRunner.runJob(mockScheduledJob, Instant.now(), Instant.now()) - } -} diff --git a/core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt b/core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt deleted file mode 100644 index 15fe770b9..000000000 --- a/core/bin/test/org/opensearch/alerting/core/schedule/MockJobRunner.kt +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.alerting.core.schedule - -import org.opensearch.alerting.core.JobRunner -import org.opensearch.commons.alerting.model.ScheduledJob -import java.time.Instant - -class MockJobRunner : JobRunner { - var numberOfRun: Int = 0 - private set - var numberOfIndex: Int = 0 - private set - var numberOfDelete: Int = 0 - private set - - override fun postDelete(jobId: String) { - numberOfDelete++ - } - - override fun postIndex(job: ScheduledJob) { - numberOfIndex++ - } - - override fun runJob(job: ScheduledJob, periodStart: Instant, periodEnd: Instant) { - numberOfRun++ - } -}