From 03b7cd5dae29d808925b7df0f790d2dbe539d1c5 Mon Sep 17 00:00:00 2001 From: Pierre Gayvallet Date: Mon, 11 Sep 2023 16:34:40 +0200 Subject: [PATCH] Add integration test coverage for SO migrations against serverless ES (#164959) ## Summary ~~Blocked by https://github.com/elastic/kibana/pull/162673~~ Add some initial integration test coverage for SO migrations when running against serverless Elasticsearch: - our migration actions test suite - some of the zdt algo migration suites The actions test suite was adapted to skip, when run against serverless, the tests that are not supposed to be run (or passing) in that environment --- .../src/saved_objects_config.ts | 2 +- .../src/actions/clone_index.test.ts | 27 +- .../src/actions/clone_index.ts | 57 +- .../src/actions/es_errors.test.ts | 9 + .../src/actions/es_errors.ts | 4 +- .../src/actions/index.ts | 6 + .../src/model/model.ts | 6 + .../src/create_serverless_root.ts | 23 +- .../capabilities_serverless.test.ts | 4 +- .../migrations/group3/actions/actions.test.ts | 2054 +--------------- .../group3/actions/actions_test_suite.ts | 2095 +++++++++++++++++ .../migrations/kibana_migrator_test_kit.ts | 7 +- .../zdt/basic_document_migration.ts | 247 ++ .../shared_suites/zdt/standard_workflow.ts | 118 + .../saved_objects/migrations/test_types.ts | 13 + .../zdt_1/basic_document_migration.test.ts | 235 +- .../zdt_1/standard_workflow.test.ts | 132 +- .../serverless/migrations/actions.test.ts | 28 + .../basic_document_migration.test.ts | 25 + .../serverless/migrations/smoke.test.ts | 7 +- .../migrations/standard_workflow.test.ts | 25 + 21 files changed, 2664 insertions(+), 2460 deletions(-) create mode 100644 src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions_test_suite.ts create mode 100644 src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/basic_document_migration.ts create mode 100644 src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/standard_workflow.ts create mode 100644 src/core/server/integration_tests/saved_objects/migrations/test_types.ts create mode 100644 src/core/server/integration_tests/saved_objects/serverless/migrations/actions.test.ts create mode 100644 src/core/server/integration_tests/saved_objects/serverless/migrations/basic_document_migration.test.ts create mode 100644 src/core/server/integration_tests/saved_objects/serverless/migrations/standard_workflow.test.ts diff --git a/packages/core/saved-objects/core-saved-objects-base-server-internal/src/saved_objects_config.ts b/packages/core/saved-objects/core-saved-objects-base-server-internal/src/saved_objects_config.ts index 747b184c1a2f..f09746325ce9 100644 --- a/packages/core/saved-objects/core-saved-objects-base-server-internal/src/saved_objects_config.ts +++ b/packages/core/saved-objects/core-saved-objects-base-server-internal/src/saved_objects_config.ts @@ -45,7 +45,7 @@ const migrationSchema = schema.object({ * The delay that the migrator will wait for, in seconds, when updating the * index mapping's meta to let the other nodes pickup the changes. */ - metaPickupSyncDelaySec: schema.number({ min: 1, defaultValue: 120 }), + metaPickupSyncDelaySec: schema.number({ min: 1, defaultValue: 5 }), /** * The document migration phase will be run from instances with any of the specified roles. * diff --git a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.test.ts b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.test.ts index 73cba7294aa6..1fcf77860245 100644 --- a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.test.ts +++ b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.test.ts @@ -73,7 +73,7 @@ describe('cloneIndex', () => { `); }); - it('calls client.indices.clone with the correct parameter for serverless ES', async () => { + it('resolve left with operation_not_supported for serverless ES', async () => { const statelessCapabilities = elasticsearchServiceMock.createCapabilities({ serverless: true }); const task = cloneIndex({ client, @@ -81,27 +81,14 @@ describe('cloneIndex', () => { target: 'my_target_index', esCapabilities: statelessCapabilities, }); - try { - await task(); - } catch (e) { - /** ignore */ - } - expect(client.indices.clone.mock.calls[0][0]).toMatchInlineSnapshot(` + const result = await task(); + expect(result).toMatchInlineSnapshot(` Object { - "index": "my_source_index", - "settings": Object { - "index": Object { - "blocks.write": false, - "mapping": Object { - "total_fields": Object { - "limit": 1500, - }, - }, - }, + "_tag": "Left", + "left": Object { + "operationName": "clone", + "type": "operation_not_supported", }, - "target": "my_target_index", - "timeout": "60s", - "wait_for_active_shards": "all", } `); }); diff --git a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.ts b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.ts index 9bce341d242b..7334f17191df 100644 --- a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.ts +++ b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/clone_index.ts @@ -18,7 +18,7 @@ import { catchRetryableEsClientErrors, type RetryableEsClientError, } from './catch_retryable_es_client_errors'; -import type { IndexNotFound, AcknowledgeResponse } from '.'; +import type { IndexNotFound, AcknowledgeResponse, OperationNotSupported } from '.'; import { type IndexNotGreenTimeout, waitForIndexStatus } from './wait_for_index_status'; import { DEFAULT_TIMEOUT, @@ -58,25 +58,41 @@ export const cloneIndex = ({ target, timeout = DEFAULT_TIMEOUT, }: CloneIndexParams): TaskEither.TaskEither< - RetryableEsClientError | IndexNotFound | IndexNotGreenTimeout | ClusterShardLimitExceeded, + | RetryableEsClientError + | IndexNotFound + | IndexNotGreenTimeout + | ClusterShardLimitExceeded + | OperationNotSupported, CloneIndexResponse > => { const cloneTask: TaskEither.TaskEither< - RetryableEsClientError | IndexNotFound | ClusterShardLimitExceeded, + RetryableEsClientError | IndexNotFound | ClusterShardLimitExceeded | OperationNotSupported, AcknowledgeResponse > = () => { - const indexSettings = { - // The source we're cloning from will have a write block set, so - // we need to remove it to allow writes to our newly cloned index - 'blocks.write': false, - // Increase the fields limit beyond the default of 1000 - mapping: { - total_fields: { limit: 1500 }, - }, - // settings not being supported on serverless ES - ...(esCapabilities.serverless - ? {} - : { + // clone is not supported on serverless + if (esCapabilities.serverless) { + return Promise.resolve( + Either.left({ + type: 'operation_not_supported' as const, + operationName: 'clone', + }) + ); + } + + return client.indices + .clone({ + index: source, + target, + wait_for_active_shards: WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE, + settings: { + index: { + // The source we're cloning from will have a write block set, so + // we need to remove it to allow writes to our newly cloned index + 'blocks.write': false, + // Increase the fields limit beyond the default of 1000 + mapping: { + total_fields: { limit: 1500 }, + }, // The rest of the index settings should have already been applied // to the source index and will be copied to the clone target. But // we repeat it here for explicitness. @@ -88,16 +104,7 @@ export const cloneIndex = ({ refresh_interval: '1s', // Bump priority so that recovery happens before newer indices priority: 10, - }), - }; - - return client.indices - .clone({ - index: source, - target, - wait_for_active_shards: WAIT_FOR_ALL_SHARDS_TO_BE_ACTIVE, - settings: { - index: indexSettings, + }, }, timeout, }) diff --git a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.test.ts b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.test.ts index a68cc62e76c5..ae53e993c411 100644 --- a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.test.ts +++ b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.test.ts @@ -106,6 +106,15 @@ describe('isClusterShardLimitExceeded', () => { }) ).toEqual(true); }); + it('returns true with illegal_argument_exception and reason is maximum normal shards open', () => { + expect( + isClusterShardLimitExceeded({ + type: 'illegal_argument_exception', + reason: + 'Validation Failed: 1: this action would add [2] shards, but this cluster currently has [3]/[1] maximum normal shards open;', + }) + ).toEqual(true); + }); it('returns false for validation_exception with another reason', () => { expect( isClusterShardLimitExceeded({ diff --git a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.ts b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.ts index c4eeebd7df21..735bdabf789c 100644 --- a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.ts +++ b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/es_errors.ts @@ -27,8 +27,10 @@ export const isIndexNotFoundException = (errorCause?: estypes.ErrorCause): boole }; export const isClusterShardLimitExceeded = (errorCause?: estypes.ErrorCause): boolean => { + // traditional ES: validation_exception. serverless ES: illegal_argument_exception return ( - errorCause?.type === 'validation_exception' && + (errorCause?.type === 'validation_exception' || + errorCause?.type === 'illegal_argument_exception') && errorCause?.reason?.match( /this action would add .* shards, but this cluster currently has .* maximum normal shards open/ ) !== null diff --git a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/index.ts b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/index.ts index 270926a10cba..c06cd5f05c13 100644 --- a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/index.ts +++ b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/actions/index.ts @@ -136,6 +136,11 @@ export interface IndexNotFound { index: string; } +export interface OperationNotSupported { + type: 'operation_not_supported'; + operationName: string; +} + export interface WaitForReindexTaskFailure { readonly cause: { type: string; reason: string }; } @@ -179,6 +184,7 @@ export interface ActionErrorTypeMap { synchronization_failed: SynchronizationFailed; actual_mappings_incomplete: ActualMappingsIncomplete; compared_mappings_changed: ComparedMappingsChanged; + operation_not_supported: OperationNotSupported; } /** diff --git a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/model/model.ts b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/model/model.ts index 2264ca388c97..563b138fdb45 100644 --- a/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/model/model.ts +++ b/packages/core/saved-objects/core-saved-objects-migration-server-internal/src/model/model.ts @@ -1167,6 +1167,12 @@ export const model = (currentState: State, resW: ResponseType): controlState: 'FATAL', reason: `${CLUSTER_SHARD_LIMIT_EXCEEDED_REASON} See ${stateP.migrationDocLinks.clusterShardLimitExceeded}`, }; + } else if (isTypeof(left, 'operation_not_supported')) { + return { + ...stateP, + controlState: 'FATAL', + reason: `Action failed due to unsupported operation: ${left.operationName}`, + }; } else { throwBadResponse(stateP, left); } diff --git a/packages/core/test-helpers/core-test-helpers-kbn-server/src/create_serverless_root.ts b/packages/core/test-helpers/core-test-helpers-kbn-server/src/create_serverless_root.ts index 3e12b1683271..e20657b90427 100644 --- a/packages/core/test-helpers/core-test-helpers-kbn-server/src/create_serverless_root.ts +++ b/packages/core/test-helpers/core-test-helpers-kbn-server/src/create_serverless_root.ts @@ -12,6 +12,7 @@ import { Client, HttpConnection } from '@elastic/elasticsearch'; import { Cluster } from '@kbn/es'; import { REPO_ROOT } from '@kbn/repo-info'; import { ToolingLog } from '@kbn/tooling-log'; +import { esTestConfig } from '@kbn/test'; import { CliArgs } from '@kbn/config'; import { createRoot, type TestElasticsearchUtils, type TestKibanaUtils } from './create_root'; @@ -25,6 +26,8 @@ export interface TestServerlessUtils { startKibana: (abortSignal?: AbortSignal) => Promise; } +const ES_BASE_PATH_DIR = Path.join(REPO_ROOT, '.es/es_test_serverless'); + /** * See docs in {@link TestUtils}. This function provides the same utilities but * configured for serverless. @@ -36,9 +39,11 @@ export function createTestServerlessInstances({ }: { adjustTimeout: (timeout: number) => void; }): TestServerlessUtils { + adjustTimeout?.(150_000); + const esUtils = createServerlessES(); const kbUtils = createServerlessKibana(); - adjustTimeout?.(120_000); + return { startES: async () => { const { stop, getClient } = await esUtils.start(); @@ -63,26 +68,29 @@ export function createTestServerlessInstances({ }; } -const ES_BASE_PATH_DIR = Path.join(REPO_ROOT, '.es/es_test_serverless'); function createServerlessES() { const log = new ToolingLog({ level: 'info', writeTo: process.stdout, }); const es = new Cluster({ log }); + const esPort = esTestConfig.getPort(); return { es, start: async () => { await es.runServerless({ basePath: ES_BASE_PATH_DIR, + port: esPort, teardown: true, background: true, clean: true, kill: true, waitForReady: true, }); + const client = getServerlessESClient({ port: esPort }); + return { - getClient: getServerlessESClient, + getClient: () => client, stop: async () => { await es.stop(); }, @@ -91,10 +99,9 @@ function createServerlessES() { }; } -const getServerlessESClient = () => { +const getServerlessESClient = ({ port }: { port: number }) => { return new Client({ - // node ports not configurable from - node: 'http://localhost:9200', + node: `http://localhost:${port}`, Connection: HttpConnection, }); }; @@ -108,6 +115,9 @@ const getServerlessDefault = () => { strictClientVersionCheck: false, }, }, + elasticsearch: { + hosts: [`http://localhost:${esTestConfig.getPort()}`], + }, migrations: { algorithm: 'zdt', zdt: { @@ -134,6 +144,7 @@ const getServerlessDefault = () => { }, }; }; + function createServerlessKibana(settings = {}, cliArgs: Partial = {}) { return createRoot(defaultsDeep(settings, getServerlessDefault()), { ...cliArgs, diff --git a/src/core/server/integration_tests/elasticsearch/capabilities_serverless.test.ts b/src/core/server/integration_tests/elasticsearch/capabilities_serverless.test.ts index 56754a12daed..314fa311cf70 100644 --- a/src/core/server/integration_tests/elasticsearch/capabilities_serverless.test.ts +++ b/src/core/server/integration_tests/elasticsearch/capabilities_serverless.test.ts @@ -13,9 +13,7 @@ import { import type { ElasticsearchClient } from '@kbn/core-elasticsearch-server'; import { getCapabilitiesFromClient } from '@kbn/core-elasticsearch-server-internal'; -// skipped because test serverless ES nodes are currently using static ports -// causing parallel jest runners to fail for obvious port conflicts reasons. -describe.skip('ES capabilities for serverless ES', () => { +describe('ES capabilities for serverless ES', () => { let serverlessES: TestServerlessESUtils; let client: ElasticsearchClient; diff --git a/src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions.test.ts b/src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions.test.ts index dc583d97190a..a951ecc37d1f 100644 --- a/src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions.test.ts +++ b/src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions.test.ts @@ -6,2061 +6,27 @@ * Side Public License, v 1. */ -import Path from 'path'; -import * as Either from 'fp-ts/lib/Either'; -import * as Option from 'fp-ts/lib/Option'; -import { errors } from '@elastic/elasticsearch'; -import type { TaskEither } from 'fp-ts/lib/TaskEither'; -import type { SavedObjectsRawDoc } from '@kbn/core-saved-objects-server'; -import type { ElasticsearchClient } from '@kbn/core-elasticsearch-server'; -import { elasticsearchServiceMock } from '@kbn/core-elasticsearch-server-mocks'; -import { createTestServers, type TestElasticsearchUtils } from '@kbn/core-test-helpers-kbn-server'; -import { - bulkOverwriteTransformedDocuments, - closePit, - createIndex, - openPit, - type OpenPitResponse, - reindex, - readWithPit, - type EsResponseTooLargeError, - type ReadWithPit, - setWriteBlock, - updateAliases, - waitForReindexTask, - type ReindexResponse, - waitForPickupUpdatedMappingsTask, - pickupUpdatedMappings, - type UpdateByQueryResponse, - updateAndPickupMappings, - type UpdateAndPickupMappingsResponse, - updateMappings, - removeWriteBlock, - transformDocs, - waitForIndexStatus, - initAction, - cloneIndex, - type DocumentsTransformFailed, - type DocumentsTransformSuccess, - MIGRATION_CLIENT_OPTIONS, - createBulkIndexOperationTuple, -} from '@kbn/core-saved-objects-migration-server-internal'; +import { createTestServers } from '@kbn/core-test-helpers-kbn-server'; +import { MIGRATION_CLIENT_OPTIONS } from '@kbn/core-saved-objects-migration-server-internal'; +import { runActionTestSuite } from './actions_test_suite'; const { startES } = createTestServers({ adjustTimeout: (t: number) => jest.setTimeout(t), settings: { es: { license: 'basic', - dataArchive: Path.resolve(__dirname, '../../archives/7.7.2_xpack_100k_obj.zip'), esArgs: ['http.max_content_length=10Kb'], }, }, }); -let esServer: TestElasticsearchUtils; describe('migration actions', () => { - let client: ElasticsearchClient; - let esCapabilities: ReturnType; - - beforeAll(async () => { - esServer = await startES(); - client = esServer.es.getClient().child(MIGRATION_CLIENT_OPTIONS); - esCapabilities = elasticsearchServiceMock.createCapabilities(); - - // Create test fixture data: - await createIndex({ - client, - indexName: 'existing_index_with_docs', - aliases: ['existing_index_with_docs_alias'], - esCapabilities, - mappings: { - dynamic: true, - properties: { - someProperty: { - type: 'integer', - }, - }, - _meta: { - migrationMappingPropertyHashes: { - references: '7997cf5a56cc02bdc9c93361bde732b0', - }, - }, - }, - })(); - const docs = [ - { _source: { title: 'doc 1' } }, - { _source: { title: 'doc 2' } }, - { _source: { title: 'doc 3' } }, - { _source: { title: 'saved object 4', type: 'another_unused_type' } }, - { _source: { title: 'f-agent-event 5', type: 'f_agent_event' } }, - { _source: { title: new Array(1000).fill('a').join(), type: 'large' } }, // "large" saved object - ] as unknown as SavedObjectsRawDoc[]; - await bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_with_docs', - operations: docs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - })(); - - await createIndex({ - client, - indexName: 'existing_index_2', - mappings: { properties: {} }, - esCapabilities, - })(); - await createIndex({ - client, - indexName: 'existing_index_with_write_block', - mappings: { properties: {} }, - esCapabilities, - })(); - await bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_with_write_block', - operations: docs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - })(); - await setWriteBlock({ client, index: 'existing_index_with_write_block' })(); - await updateAliases({ - client, - aliasActions: [{ add: { index: 'existing_index_2', alias: 'existing_index_2_alias' } }], - })(); - }); - - afterAll(async () => { - await esServer.stop(); - }); - - describe('initAction', () => { - afterAll(async () => { - await client.cluster.putSettings({ - body: { - persistent: { - // Reset persistent test settings - cluster: { routing: { allocation: { enable: null } } }, - }, - }, - }); - }); - it('resolves right empty record if no indices were found', async () => { - expect.assertions(1); - const task = initAction({ client, indices: ['no_such_index'] }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": Object {}, - } - `); - }); - it('resolves right record with found indices', async () => { - expect.assertions(1); - const res = (await initAction({ - client, - indices: ['no_such_index', 'existing_index_with_docs'], - })()) as Either.Right; - - expect(res.right).toEqual( - expect.objectContaining({ - existing_index_with_docs: { - aliases: { - existing_index_with_docs_alias: {}, - }, - mappings: expect.anything(), - settings: expect.anything(), - }, - }) - ); - }); - it('includes the _meta data of the indices in the response', async () => { - expect.assertions(1); - const res = (await initAction({ - client, - indices: ['existing_index_with_docs'], - })()) as Either.Right; - - expect(res.right).toEqual( - expect.objectContaining({ - existing_index_with_docs: { - aliases: { - existing_index_with_docs_alias: {}, - }, - mappings: { - // FIXME https://github.com/elastic/elasticsearch-js/issues/1796 - dynamic: 'true', - properties: expect.anything(), - _meta: { - migrationMappingPropertyHashes: { - references: '7997cf5a56cc02bdc9c93361bde732b0', - }, - }, - }, - settings: expect.anything(), - }, - }) - ); - }); - it('resolves left when cluster.routing.allocation.enabled is incompatible', async () => { - expect.assertions(3); - await client.cluster.putSettings({ - body: { - persistent: { - // Disable all routing allocation - cluster: { routing: { allocation: { enable: 'none' } } }, - }, - }, - }); - const task = initAction({ - client, - indices: ['existing_index_with_docs'], - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "incompatible_cluster_routing_allocation", - }, - } - `); - await client.cluster.putSettings({ - body: { - persistent: { - // Allow routing to existing primaries only - cluster: { routing: { allocation: { enable: 'primaries' } } }, - }, - }, - }); - const task2 = initAction({ - client, - indices: ['existing_index_with_docs'], - }); - await expect(task2()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "incompatible_cluster_routing_allocation", - }, - } - `); - await client.cluster.putSettings({ - body: { - persistent: { - // Allow routing to new primaries only - cluster: { routing: { allocation: { enable: 'new_primaries' } } }, - }, - }, - }); - const task3 = initAction({ - client, - indices: ['existing_index_with_docs'], - }); - await expect(task3()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "incompatible_cluster_routing_allocation", - }, - } - `); - }); - it('resolves right when cluster.routing.allocation.enabled=all', async () => { - expect.assertions(1); - await client.cluster.putSettings({ - body: { - persistent: { - cluster: { routing: { allocation: { enable: 'all' } } }, - }, - }, - }); - const task = initAction({ - client, - indices: ['existing_index_with_docs'], - }); - const result = await task(); - expect(Either.isRight(result)).toBe(true); - }); - }); - - describe('setWriteBlock', () => { - beforeAll(async () => { - await createIndex({ - client, - indexName: 'new_index_without_write_block', - mappings: { properties: {} }, - esCapabilities, - })(); - }); - it('resolves right when setting the write block succeeds', async () => { - expect.assertions(1); - const task = setWriteBlock({ client, index: 'new_index_without_write_block' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "set_write_block_succeeded", - } - `); - }); - it('resolves right when setting a write block on an index that already has one', async () => { - expect.assertions(1); - const task = setWriteBlock({ client, index: 'existing_index_with_write_block' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "set_write_block_succeeded", - } - `); - }); - it('once resolved, prevents further writes to the index', async () => { - expect.assertions(1); - const task = setWriteBlock({ client, index: 'new_index_without_write_block' }); - await task(); - const sourceDocs = [ - { _source: { title: 'doc 1' } }, - { _source: { title: 'doc 2' } }, - { _source: { title: 'doc 3' } }, - { _source: { title: 'doc 4' } }, - ] as unknown as SavedObjectsRawDoc[]; - - const res = (await bulkOverwriteTransformedDocuments({ - client, - index: 'new_index_without_write_block', - operations: sourceDocs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - })()) as Either.Left; - - expect(res.left).toEqual({ - type: 'target_index_had_write_block', - }); - }); - it('resolves left index_not_found_exception when the index does not exist', async () => { - expect.assertions(1); - const task = setWriteBlock({ client, index: 'no_such_index' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "index": "no_such_index", - "type": "index_not_found_exception", - }, - } - `); - }); - }); - - describe('removeWriteBlock', () => { - beforeAll(async () => { - await createIndex({ - client, - indexName: 'existing_index_without_write_block_2', - mappings: { properties: {} }, - esCapabilities, - })(); - await createIndex({ - client, - indexName: 'existing_index_with_write_block_2', - mappings: { properties: {} }, - esCapabilities, - })(); - await setWriteBlock({ client, index: 'existing_index_with_write_block_2' })(); - }); - it('resolves right if successful when an index already has a write block', async () => { - expect.assertions(1); - const task = removeWriteBlock({ client, index: 'existing_index_with_write_block_2' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "remove_write_block_succeeded", - } - `); - }); - it('resolves right if successful when an index does not have a write block', async () => { - expect.assertions(1); - const task = removeWriteBlock({ client, index: 'existing_index_without_write_block_2' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "remove_write_block_succeeded", - } - `); - }); - it('rejects if there is a non-retryable error', async () => { - expect.assertions(1); - const task = removeWriteBlock({ client, index: 'no_such_index' }); - await expect(task()).rejects.toThrow('index_not_found_exception'); - }); - }); - - describe('waitForIndexStatus', () => { - afterEach(async () => { - try { - await client.indices.delete({ index: 'red_then_yellow_index' }); - await client.indices.delete({ index: 'red_index' }); - } catch (e) { - /** ignore */ - } - }); - it('resolves right after waiting for an index status to be yellow if the index already existed', async () => { - // Create a red index - await client.indices.create( - { - index: 'red_then_yellow_index', - timeout: '5s', - body: { - mappings: { properties: {} }, - settings: { - // Allocate 1 replica so that this index stays yellow - number_of_replicas: '1', - // Disable all shard allocation so that the index status is red - routing: { allocation: { enable: 'none' } }, - }, - }, - }, - { maxRetries: 0 /** handle retry ourselves for now */ } - ); - - // Start tracking the index status - const indexStatusPromise = waitForIndexStatus({ - client, - index: 'red_then_yellow_index', - status: 'yellow', - })(); - - const redStatusResponse = await client.cluster.health({ index: 'red_then_yellow_index' }); - expect(redStatusResponse.status).toBe('red'); - - client.indices.putSettings({ - index: 'red_then_yellow_index', - body: { - // Enable all shard allocation so that the index status turns yellow - routing: { allocation: { enable: 'all' } }, - }, - }); - - await indexStatusPromise; - // Assert that the promise didn't resolve before the index became yellow - - const yellowStatusResponse = await client.cluster.health({ index: 'red_then_yellow_index' }); - expect(yellowStatusResponse.status).toBe('yellow'); - }); - it('resolves left with "index_not_yellow_timeout" after waiting for an index status to be yellow timeout', async () => { - // Create a red index - await client.indices - .create({ - index: 'red_index', - timeout: '5s', - body: { - mappings: { properties: {} }, - settings: { - // Allocate no replicas so that this index stays red - number_of_replicas: '0', - // Disable all shard allocation so that the index status is red - index: { routing: { allocation: { enable: 'none' } } }, - }, - }, - }) - .catch((e) => {}); - // try to wait for index status yellow: - const task = waitForIndexStatus({ - client, - index: 'red_index', - timeout: '1s', - status: 'yellow', - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "message": "[index_not_yellow_timeout] Timeout waiting for the status of the [red_index] index to become 'yellow'", - "type": "index_not_yellow_timeout", - }, - } - `); - }); - - it('resolves left with "index_not_green_timeout" after waiting for an index status to be green timeout', async () => { - // Create a yellow index - await client.indices - .create({ - index: 'yellow_index', - timeout: '5s', - body: { - mappings: { properties: {} }, - settings: { - // Allocate no replicas so that this index stays yellow - number_of_replicas: '0', - }, - }, - }) - .catch((e) => {}); - // try to wait for index status yellow: - const task = waitForIndexStatus({ - client, - index: 'red_index', - timeout: '1s', - status: 'green', - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "message": "[index_not_green_timeout] Timeout waiting for the status of the [red_index] index to become 'green'", - "type": "index_not_green_timeout", - }, - } - `); - }); - }); - - describe('cloneIndex', () => { - afterAll(async () => { - try { - // Restore the default setting of 1000 shards per node - await client.cluster.putSettings({ - persistent: { cluster: { max_shards_per_node: null } }, - }); - await client.indices.delete({ index: 'clone_*' }); - } catch (e) { - /** ignore */ - } - }); - it('resolves right if cloning into a new target index', async () => { - const task = cloneIndex({ - client, - source: 'existing_index_with_write_block', - target: 'clone_target_1', - esCapabilities, - }); - expect.assertions(3); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": Object { - "acknowledged": true, - "shardsAcknowledged": true, - }, - } - `); - const { clone_target_1: cloneTarget1 } = await client.indices.getSettings({ - index: 'clone_target_1', - }); - // @ts-expect-error https://github.com/elastic/elasticsearch/issues/89381 - expect(cloneTarget1.settings?.index.mapping?.total_fields.limit).toBe('1500'); - expect(cloneTarget1.settings?.blocks?.write).toBeUndefined(); - }); - it('resolves right if clone target already existed after waiting for index status to be green ', async () => { - expect.assertions(2); - - // Create a red index that we later turn into green - await client.indices - .create({ - index: 'clone_red_then_green_index', - timeout: '5s', - body: { - mappings: { properties: {} }, - settings: { - // Allocate 1 replica so that this index can go to green - number_of_replicas: '0', - // Disable all shard allocation so that the index status is red - index: { routing: { allocation: { enable: 'none' } } }, - }, - }, - }) - .catch((e) => {}); - - // Call clone even though the index already exists - const cloneIndexPromise = cloneIndex({ - client, - source: 'existing_index_with_write_block', - target: 'clone_red_then_green_index', - esCapabilities, - })(); - - let indexGreen = false; - setTimeout(() => { - client.indices.putSettings({ - index: 'clone_red_then_green_index', - body: { - // Enable all shard allocation so that the index status goes green - routing: { allocation: { enable: 'all' } }, - }, - }); - indexGreen = true; - }, 10); - - await cloneIndexPromise.then((res) => { - // Assert that the promise didn't resolve before the index became green - expect(indexGreen).toBe(true); - expect(res).toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": Object { - "acknowledged": true, - "shardsAcknowledged": true, - }, - } - `); - }); - }); - it('resolves left with a index_not_green_timeout if clone target already exists but takes longer than the specified timeout before turning green', async () => { - // Create a red index - await client.indices - .create({ - index: 'clone_red_index', - timeout: '5s', - body: { - mappings: { properties: {} }, - settings: { - // Allocate 1 replica so that this index stays yellow - number_of_replicas: '1', - // Disable all shard allocation so that the index status is red - index: { routing: { allocation: { enable: 'none' } } }, - }, - }, - }) - .catch((e) => {}); - - // Call clone even though the index already exists - let cloneIndexPromise = cloneIndex({ - client, - source: 'existing_index_with_write_block', - target: 'clone_red_index', - timeout: '1s', - esCapabilities, - })(); - - await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "message": "[index_not_green_timeout] Timeout waiting for the status of the [clone_red_index] index to become 'green'", - "type": "index_not_green_timeout", - }, - } - `); - - // Now make the index yellow and repeat - - await client.indices.putSettings({ - index: 'clone_red_index', - body: { - // Enable all shard allocation so that the index status goes yellow - routing: { allocation: { enable: 'all' } }, - }, - }); - - // Call clone even though the index already exists - cloneIndexPromise = cloneIndex({ - client, - source: 'existing_index_with_write_block', - target: 'clone_red_index', - timeout: '1s', - esCapabilities, - })(); - - await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "message": "[index_not_green_timeout] Timeout waiting for the status of the [clone_red_index] index to become 'green'", - "type": "index_not_green_timeout", - }, - } - `); - - // Now make the index green and it should succeed - - await client.indices.putSettings({ - index: 'clone_red_index', - body: { - // Set zero replicas so status goes green - number_of_replicas: 0, - }, - }); - - // Call clone even though the index already exists - cloneIndexPromise = cloneIndex({ - client, - source: 'existing_index_with_write_block', - target: 'clone_red_index', - timeout: '30s', - esCapabilities, - })(); - - await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": Object { - "acknowledged": true, - "shardsAcknowledged": true, - }, - } - `); - }); - it('resolves left index_not_found_exception if the source index does not exist', async () => { - expect.assertions(1); - const task = cloneIndex({ - client, - source: 'no_such_index', - target: 'clone_target_3', - esCapabilities, - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "index": "no_such_index", - "type": "index_not_found_exception", - }, - } - `); - }); - it('resolves left cluster_shard_limit_exceeded when the action would exceed the maximum normal open shards', async () => { - // Set the max shards per node really low so that any new index that's created would exceed the maximum open shards for this cluster - await client.cluster.putSettings({ persistent: { cluster: { max_shards_per_node: 1 } } }); - const cloneIndexPromise = cloneIndex({ - client, - source: 'existing_index_with_write_block', - target: 'clone_target_4', - esCapabilities, - })(); - await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "cluster_shard_limit_exceeded", - }, - } - `); - }); - }); - - // Reindex doesn't return any errors on it's own, so we have to test - // together with waitForReindexTask - describe('reindex & waitForReindexTask', () => { - it('resolves right when reindex succeeds without reindex script', async () => { - const res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "reindex_succeeded", - } - `); - - const results = await client.search({ index: 'reindex_target', size: 1000 }); - expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) - .toMatchInlineSnapshot(` - Array [ - "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a", - "doc 1", - "doc 2", - "doc 3", - "f-agent-event 5", - "saved object 4", - ] - `); - }); - it('resolves right and excludes all documents not matching the excludeOnUpgradeQuery', async () => { - const res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target_excluded_docs', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { - bool: { - must_not: ['f_agent_event', 'another_unused_type'].map((type) => ({ - term: { type }, - })), - }, - }, - batchSize: 1000, - })()) as Either.Right; - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "reindex_succeeded", - } - `); - - const results = await client.search({ index: 'reindex_target_excluded_docs', size: 1000 }); - expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) - .toMatchInlineSnapshot(` - Array [ - "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a", - "doc 1", - "doc 2", - "doc 3", - ] - `); - }); - it('resolves right when reindex succeeds with reindex script', async () => { - expect.assertions(2); - const res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target_2', - reindexScript: Option.some(`ctx._source.title = ctx._source.title + '_updated'`), - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "reindex_succeeded", - } - `); - - const results = await client.search({ index: 'reindex_target_2', size: 1000 }); - expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) - .toMatchInlineSnapshot(` - Array [ - "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a_updated", - "doc 1_updated", - "doc 2_updated", - "doc 3_updated", - "f-agent-event 5_updated", - "saved object 4_updated", - ] - `); - }); - it('resolves right, ignores version conflicts and does not update existing docs when reindex multiple times', async () => { - expect.assertions(3); - // Reindex with a script - let res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target_3', - reindexScript: Option.some(`ctx._source.title = ctx._source.title + '_updated'`), - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - let task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "reindex_succeeded", - } - `); - - // reindex without a script - res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target_3', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "reindex_succeeded", - } - `); - - // Assert that documents weren't overridden by the second, unscripted reindex - const results = await client.search({ index: 'reindex_target_3', size: 1000 }); - expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) - .toMatchInlineSnapshot(` - Array [ - "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a_updated", - "doc 1_updated", - "doc 2_updated", - "doc 3_updated", - "f-agent-event 5_updated", - "saved object 4_updated", - ] - `); - }); - it('resolves right and proceeds to add missing documents if there are some existing docs conflicts', async () => { - expect.assertions(2); - // Simulate a reindex that only adds some of the documents from the - // source index into the target index - await createIndex({ - client, - indexName: 'reindex_target_4', - mappings: { properties: {} }, - esCapabilities, - })(); - const response = await client.search({ index: 'existing_index_with_docs', size: 1000 }); - const sourceDocs = (response.hits?.hits as SavedObjectsRawDoc[]) - .slice(0, 2) - .map(({ _id, _source }) => ({ - _id, - _source, - })); - await bulkOverwriteTransformedDocuments({ - client, - index: 'reindex_target_4', - operations: sourceDocs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - })(); - - // Now do a real reindex - const res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target_4', - reindexScript: Option.some(`ctx._source.title = ctx._source.title + '_updated'`), - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "reindex_succeeded", - } - `); - // Assert that existing documents weren't overridden, but that missing - // documents were added by the reindex - const results = await client.search({ index: 'reindex_target_4', size: 1000 }); - expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) - .toMatchInlineSnapshot(` - Array [ - "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a_updated", - "doc 1", - "doc 2", - "doc 3_updated", - "f-agent-event 5_updated", - "saved object 4_updated", - ] - `); - }); - it('resolves left incompatible_mapping_exception if all reindex failures are due to a strict_dynamic_mapping_exception', async () => { - expect.assertions(1); - // Simulates one instance having completed the UPDATE_TARGET_MAPPINGS - // step which makes the mappings incompatible with outdated documents. - // If another instance then tries a reindex it will get a - // strict_dynamic_mapping_exception even if the documents already exist - // and should ignore this error. - - // Create an index with incompatible mappings - await createIndex({ - client, - indexName: 'reindex_target_5', - mappings: { - dynamic: 'strict', - properties: { - /** no title field */ - }, - }, - esCapabilities, - })(); - - const { - right: { taskId: reindexTaskId }, - } = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target_5', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - const task = waitForReindexTask({ client, taskId: reindexTaskId, timeout: '10s' }); - - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "incompatible_mapping_exception", - }, - } - `); - }); - it('resolves left incompatible_mapping_exception if all reindex failures are due to a mapper_parsing_exception', async () => { - expect.assertions(1); - // Simulates one instance having completed the UPDATE_TARGET_MAPPINGS - // step which makes the mappings incompatible with outdated documents. - // If another instance then tries a reindex it will get a - // strict_dynamic_mapping_exception even if the documents already exist - // and should ignore this error. - - // Create an index with incompatible mappings - await createIndex({ - client, - indexName: 'reindex_target_6', - mappings: { - dynamic: false, - properties: { title: { type: 'integer' } }, // integer is incompatible with string title - }, - esCapabilities, - })(); - - const { - right: { taskId: reindexTaskId }, - } = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'reindex_target_6', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - const task = waitForReindexTask({ client, taskId: reindexTaskId, timeout: '10s' }); - - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "incompatible_mapping_exception", - }, - } - `); - }); - it('resolves left index_not_found_exception if source index does not exist', async () => { - expect.assertions(1); - const res = (await reindex({ - client, - sourceIndex: 'no_such_index', - targetIndex: 'reindex_target', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { - match_all: {}, - }, - batchSize: 1000, - })()) as Either.Right; - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "index": "no_such_index", - "type": "index_not_found_exception", - }, - } - `); - }); - it('resolves left target_index_had_write_block if all failures are due to a write block', async () => { - expect.assertions(1); - const res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'existing_index_with_write_block', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "target_index_had_write_block", - }, - } - `); - }); - it('resolves left if requireAlias=true and the target is not an alias', async () => { - expect.assertions(1); - const res = (await reindex({ - client, - sourceIndex: 'existing_index_with_docs', - targetIndex: 'existing_index_with_write_block', - reindexScript: Option.none, - requireAlias: true, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); - - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "index": "existing_index_with_write_block", - "type": "index_not_found_exception", - }, - } - `); - }); - it('resolves left wait_for_task_completion_timeout when the task does not finish within the timeout', async () => { - await waitForIndexStatus({ - client, - index: '.kibana_1', - status: 'yellow', - })(); - - const res = (await reindex({ - client, - sourceIndex: '.kibana_1', - targetIndex: 'reindex_target', - reindexScript: Option.none, - requireAlias: false, - excludeOnUpgradeQuery: { match_all: {} }, - batchSize: 1000, - })()) as Either.Right; - - const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '0s' }); - - await expect(task()).resolves.toMatchObject({ - _tag: 'Left', - left: { - error: expect.any(errors.ResponseError), - message: expect.stringContaining('[timeout_exception]'), - type: 'wait_for_task_completion_timeout', - }, - }); - }); - }); - - describe('openPit', () => { - it('opens PointInTime for an index', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - expect(pitResponse.right.pitId).toEqual(expect.any(String)); - - const searchResponse = await client.search({ - body: { - pit: { id: pitResponse.right.pitId }, - }, - }); - - await expect(searchResponse.hits.hits.length).toBeGreaterThan(0); - }); - it('rejects if index does not exist', async () => { - const openPitTask = openPit({ client, index: 'no_such_index' }); - await expect(openPitTask()).rejects.toThrow('index_not_found_exception'); - }); - }); - - describe('readWithPit', () => { - it('requests documents from an index using given PIT', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - const readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { match_all: {} }, - batchSize: 1000, - searchAfter: undefined, - }); - const docsResponse = (await readWithPitTask()) as Either.Right; - - await expect(docsResponse.right.outdatedDocuments.length).toBe(6); - }); - - it('requests the batchSize of documents from an index', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - const readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { match_all: {} }, - batchSize: 3, - searchAfter: undefined, - }); - const docsResponse = (await readWithPitTask()) as Either.Right; - - await expect(docsResponse.right.outdatedDocuments.length).toBe(3); - }); - - it('it excludes documents not matching the provided "query"', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - const readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { - bool: { - must_not: [ - { - term: { - type: 'f_agent_event', - }, - }, - { - term: { - type: 'another_unused_type', - }, - }, - ], - }, - }, - batchSize: 1000, - searchAfter: undefined, - }); - - const docsResponse = (await readWithPitTask()) as Either.Right; - - expect(docsResponse.right.outdatedDocuments.map((doc) => doc._source.title).sort()) - .toMatchInlineSnapshot(` - Array [ - "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a", - "doc 1", - "doc 2", - "doc 3", - ] - `); - }); - - it('only returns documents that match the provided "query"', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - const readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { - match: { title: { query: 'doc' } }, - }, - batchSize: 1000, - searchAfter: undefined, - }); - - const docsResponse = (await readWithPitTask()) as Either.Right; - - expect(docsResponse.right.outdatedDocuments.map((doc) => doc._source.title).sort()) - .toMatchInlineSnapshot(` - Array [ - "doc 1", - "doc 2", - "doc 3", - ] - `); - }); - - it('returns docs with _seq_no and _primary_term when specified', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - const readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { - match: { title: { query: 'doc' } }, - }, - batchSize: 1000, - searchAfter: undefined, - seqNoPrimaryTerm: true, - }); - - const docsResponse = (await readWithPitTask()) as Either.Right; - - expect(docsResponse.right.outdatedDocuments).toEqual( - expect.arrayContaining([ - expect.objectContaining({ - _seq_no: expect.any(Number), - _primary_term: expect.any(Number), - }), - ]) - ); - }); - - it('does not return docs with _seq_no and _primary_term if not specified', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - const readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { - match: { title: { query: 'doc' } }, - }, - batchSize: 1000, - searchAfter: undefined, - }); - - const docsResponse = (await readWithPitTask()) as Either.Right; - - expect(docsResponse.right.outdatedDocuments).toEqual( - expect.arrayContaining([ - expect.not.objectContaining({ - _seq_no: expect.any(Number), - _primary_term: expect.any(Number), - }), - ]) - ); - }); - - it('returns a left es_response_too_large error when a read batch exceeds the maxResponseSize', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - let readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { match_all: {} }, - batchSize: 1, // small batch size so we don't exceed the maxResponseSize - searchAfter: undefined, - maxResponseSizeBytes: 500, // set a small size to force the error - }); - const rightResponse = (await readWithPitTask()) as Either.Right; - - await expect(Either.isRight(rightResponse)).toBe(true); - - readWithPitTask = readWithPit({ - client, - pitId: pitResponse.right.pitId, - query: { match_all: {} }, - batchSize: 10, // a bigger batch will exceed the maxResponseSize - searchAfter: undefined, - maxResponseSizeBytes: 500, // set a small size to force the error - }); - const leftResponse = (await readWithPitTask()) as Either.Left; - - expect(leftResponse.left.type).toBe('es_response_too_large'); - // ES response contains a field that indicates how long it took ES to get the response, e.g.: "took": 7 - // if ES takes more than 9ms, the payload will be 1 byte bigger. - // see https://github.com/elastic/kibana/issues/160994 - // Thus, the statements below account for response times up to 99ms - expect(leftResponse.left.contentLength).toBeGreaterThanOrEqual(3184); - expect(leftResponse.left.contentLength).toBeLessThanOrEqual(3185); - }); - - it('rejects if PIT does not exist', async () => { - const readWithPitTask = readWithPit({ - client, - pitId: 'no_such_pit', - query: { match_all: {} }, - batchSize: 1000, - searchAfter: undefined, - }); - await expect(readWithPitTask()).rejects.toThrow('illegal_argument_exception'); - }); - }); - - describe('closePit', () => { - it('closes PointInTime', async () => { - const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); - const pitResponse = (await openPitTask()) as Either.Right; - - const pitId = pitResponse.right.pitId; - await closePit({ client, pitId })(); - - const searchTask = client.search({ - body: { - pit: { id: pitId }, - }, - }); - - await expect(searchTask).rejects.toThrow('search_phase_execution_exception'); - }); - - it('rejects if PIT does not exist', async () => { - const closePitTask = closePit({ client, pitId: 'no_such_pit' }); - await expect(closePitTask()).rejects.toThrow('illegal_argument_exception'); - }); - }); - - describe('transformDocs', () => { - it('applies "transformRawDocs" and returns the transformed documents', async () => { - const originalDocs = [ - { _id: 'foo:1', _source: { type: 'dashboard', value: 1 } }, - { _id: 'foo:2', _source: { type: 'dashboard', value: 2 } }, - ]; - - function innerTransformRawDocs( - docs: SavedObjectsRawDoc[] - ): TaskEither { - return async () => { - const processedDocs: SavedObjectsRawDoc[] = []; - for (const doc of docs) { - doc._source.value += 1; - processedDocs.push(doc); - } - return Either.right({ processedDocs }); - }; - } - - const transformTask = transformDocs({ - transformRawDocs: innerTransformRawDocs, - outdatedDocuments: originalDocs, - }); - - const resultsWithProcessDocs = ( - (await transformTask()) as Either.Right - ).right.processedDocs; - expect(resultsWithProcessDocs.length).toEqual(2); - const foo2 = resultsWithProcessDocs.find((h) => h._id === 'foo:2'); - expect(foo2?._source?.value).toBe(3); - }); - }); - - describe('waitForPickupUpdatedMappingsTask', () => { - it('rejects if there are failures', async () => { - const res = (await pickupUpdatedMappings( - client, - 'existing_index_with_write_block', - 1000 - )()) as Either.Right; - - const task = waitForPickupUpdatedMappingsTask({ - client, - taskId: res.right.taskId, - timeout: '10s', - }); - - // We can't do a snapshot match because the response includes an index - // id which ES assigns dynamically - await expect(task()).rejects.toMatchObject({ - message: - /pickupUpdatedMappings task failed with the following failures:\n\[\{\"index\":\"existing_index_with_write_block\"/, - }); - }); - it('rejects if there is an error', async () => { - const res = (await pickupUpdatedMappings( - client, - 'no_such_index', - 1000 - )()) as Either.Right; - - const task = waitForPickupUpdatedMappingsTask({ - client, - taskId: res.right.taskId, - timeout: '10s', - }); - - await expect(task()).rejects.toThrow('index_not_found_exception'); - }); - - it('resolves left wait_for_task_completion_timeout when the task does not complete within the timeout', async () => { - const res = (await pickupUpdatedMappings( - client, - '.kibana_1', - 1000 - )()) as Either.Right; - - const task = waitForPickupUpdatedMappingsTask({ - client, - taskId: res.right.taskId, - timeout: '0s', - }); - - await expect(task()).resolves.toMatchObject({ - _tag: 'Left', - left: { - error: expect.any(errors.ResponseError), - message: expect.stringContaining('[timeout_exception]'), - type: 'wait_for_task_completion_timeout', - }, - }); - }); - it('resolves right when successful', async () => { - const res = (await pickupUpdatedMappings( - client, - 'existing_index_with_docs', - 1000 - )()) as Either.Right; - - const task = waitForPickupUpdatedMappingsTask({ - client, - taskId: res.right.taskId, - timeout: '10s', - }); - - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "pickup_updated_mappings_succeeded", - } - `); - }); - }); - - describe('updateAndPickupMappings', () => { - it('resolves right when mappings were updated and picked up', async () => { - // Create an index without any mappings and insert documents into it - await createIndex({ - client, - indexName: 'existing_index_without_mappings', - mappings: { - dynamic: false, - properties: {}, - }, - esCapabilities, - })(); - const sourceDocs = [ - { _source: { title: 'doc 1' } }, - { _source: { title: 'doc 2' } }, - { _source: { title: 'doc 3' } }, - { _source: { title: 'doc 4' } }, - ] as unknown as SavedObjectsRawDoc[]; - await bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_without_mappings', - operations: sourceDocs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - })(); - - // Assert that we can't search over the unmapped fields of the document - - const originalSearchResults = await client.search({ - index: 'existing_index_without_mappings', - size: 1000, - query: { - match: { title: { query: 'doc' } }, - }, - }); - expect(originalSearchResults.hits?.hits.length).toBe(0); - - // Update and pickup mappings so that the title field is searchable - const res = await updateAndPickupMappings({ - client, - index: 'existing_index_without_mappings', - mappings: { - properties: { - title: { type: 'text' }, - }, - }, - batchSize: 1000, - })(); - expect(Either.isRight(res)).toBe(true); - const taskId = (res as Either.Right).right.taskId; - await waitForPickupUpdatedMappingsTask({ client, taskId, timeout: '60s' })(); - - // Repeat the search expecting to be able to find the existing documents - const pickedUpSearchResults = await client.search({ - index: 'existing_index_without_mappings', - size: 1000, - query: { - match: { title: { query: 'doc' } }, - }, - }); - expect(pickedUpSearchResults.hits?.hits.length).toBe(4); - }); - }); - - describe('updateMappings', () => { - it('rejects if ES throws an error', async () => { - const task = updateMappings({ - client, - index: 'no_such_index', - mappings: { - properties: { - created_at: { - type: 'date', - }, - }, - _meta: { - migrationMappingPropertyHashes: { - references: 'updateda56cc02bdc9c93361bupdated', - newReferences: 'fooBarHashMd509387420934879300d9', - }, - }, - }, - })(); - - await expect(task).rejects.toThrow('index_not_found_exception'); - }); - - it('resolves left when the mappings are incompatible', async () => { - const res = await updateMappings({ - client, - index: 'existing_index_with_docs', - mappings: { - properties: { - someProperty: { - type: 'date', // attempt to change an existing field's type in an incompatible fashion - }, - }, - _meta: { - migrationMappingPropertyHashes: { - references: 'updateda56cc02bdc9c93361bupdated', - newReferences: 'fooBarHashMd509387420934879300d9', - }, - }, - }, - })(); - - expect(Either.isLeft(res)).toBe(true); - expect(res).toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "incompatible_mapping_exception", - }, - } - `); - }); - - it('resolves right when mappings are correctly updated', async () => { - const res = await updateMappings({ - client, - index: 'existing_index_with_docs', - mappings: { - properties: { - created_at: { - type: 'date', - }, - }, - _meta: { - migrationMappingPropertyHashes: { - references: 'updateda56cc02bdc9c93361bupdated', - newReferences: 'fooBarHashMd509387420934879300d9', - }, - }, - }, - })(); - - expect(Either.isRight(res)).toBe(true); - - const indices = await client.indices.get({ - index: ['existing_index_with_docs'], - }); - - expect(indices.existing_index_with_docs.mappings?.properties).toEqual( - expect.objectContaining({ - created_at: { - type: 'date', - }, - }) - ); - - expect(indices.existing_index_with_docs.mappings?._meta).toEqual({ - migrationMappingPropertyHashes: { - references: 'updateda56cc02bdc9c93361bupdated', - newReferences: 'fooBarHashMd509387420934879300d9', - }, - }); - }); - }); - - describe('updateAliases', () => { - describe('remove', () => { - it('resolves left index_not_found_exception when the index does not exist', async () => { - const task = updateAliases({ - client, - aliasActions: [ - { - remove: { - alias: 'no_such_alias', - index: 'no_such_index', - must_exist: false, - }, - }, - ], - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "index": "no_such_index", - "type": "index_not_found_exception", - }, - } - `); - }); - describe('with must_exist=false', () => { - it('resolves left alias_not_found_exception when alias does not exist', async () => { - const task = updateAliases({ - client, - aliasActions: [ - { - remove: { - alias: 'no_such_alias', - index: 'existing_index_with_docs', - must_exist: false, - }, - }, - ], - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "alias_not_found_exception", - }, - } - `); - }); - }); - describe('with must_exist=true', () => { - it('resolves left alias_not_found_exception when alias does not exist on specified index', async () => { - const task = updateAliases({ - client, - aliasActions: [ - { - remove: { - alias: 'existing_index_2_alias', - index: 'existing_index_with_docs', - must_exist: true, - }, - }, - ], - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "alias_not_found_exception", - }, - } - `); - }); - it('resolves left alias_not_found_exception when alias does not exist', async () => { - const task = updateAliases({ - client, - aliasActions: [ - { - remove: { - alias: 'no_such_alias', - index: 'existing_index_with_docs', - must_exist: true, - }, - }, - ], - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "alias_not_found_exception", - }, - } - `); - }); - }); - }); - describe('remove_index', () => { - it('left index_not_found_exception if index does not exist', async () => { - const task = updateAliases({ - client, - aliasActions: [ - { - remove_index: { - index: 'no_such_index', - }, - }, - ], - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "index": "no_such_index", - "type": "index_not_found_exception", - }, - } - `); - }); - it('left remove_index_not_a_concrete_index when remove_index targets an alias', async () => { - const task = updateAliases({ - client, - aliasActions: [ - { - remove_index: { - index: 'existing_index_2_alias', - }, - }, - ], - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "remove_index_not_a_concrete_index", - }, - } - `); - }); - }); - }); - - describe('createIndex', () => { - afterEach(async () => { - // Restore the default setting of 1000 shards per node - await client.cluster.putSettings({ persistent: { cluster: { max_shards_per_node: null } } }); - }); - afterAll(async () => { - await client.indices.delete({ index: 'red_then_yellow_index' }).catch(); - await client.indices.delete({ index: 'yellow_then_green_index' }).catch(); - await client.indices.delete({ index: 'create_new_index' }).catch(); - }); - it('resolves right after waiting for an index status to become green when cluster state is not propagated within the timeout', async () => { - // By specifying a very short timeout Elasticsearch will respond before the shard is allocated - const createIndexPromise = createIndex({ - client, - indexName: 'create_new_index', - mappings: undefined as any, - timeout: '1nanos', - esCapabilities, - })(); - await expect(createIndexPromise).resolves.toEqual({ - _tag: 'Right', - right: 'create_index_succeeded', - }); - const { create_new_index: createNewIndex } = await client.indices.getSettings({ - index: 'create_new_index', - }); - // @ts-expect-error https://github.com/elastic/elasticsearch/issues/89381 - expect(createNewIndex.settings?.index?.mapping.total_fields.limit).toBe('1500'); - }); - it('resolves left if an existing index status does not become green', async () => { - expect.assertions(2); - // Create a red index - await client.indices - .create( - { - index: 'red_then_yellow_index', - timeout: '5s', - body: { - mappings: { properties: {} }, - settings: { - // Allocate 1 replica so that this index stays yellow - number_of_replicas: '1', - // Disable all shard allocation so that the index status starts as red - index: { routing: { allocation: { enable: 'none' } } }, - }, - }, - }, - { maxRetries: 0 /** handle retry ourselves for now */ } - ) - .catch((e) => { - /** ignore */ - }); - - // Call createIndex even though the index already exists - const createIndexPromise = createIndex({ - client, - indexName: 'red_then_yellow_index', - mappings: undefined as any, - esCapabilities, - })(); - let indexYellow = false; - - setTimeout(() => { - client.indices.putSettings({ - index: 'red_then_yellow_index', - body: { - // Renable allocation so that the status becomes yellow - routing: { allocation: { enable: 'all' } }, - }, - }); - indexYellow = true; - }, 10); - - await createIndexPromise.then((err) => { - // Assert that the promise didn't resolve before the index became yellow - expect(indexYellow).toBe(true); - expect(err).toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "message": "[index_not_green_timeout] Timeout waiting for the status of the [red_then_yellow_index] index to become 'green'", - "type": "index_not_green_timeout", - }, - } - `); - }); - }); - it('resolves right after waiting for an existing index status to become green', async () => { - expect.assertions(2); - // Create a yellow index - await client.indices - .create({ - index: 'yellow_then_green_index', - timeout: '5s', - body: { - mappings: { properties: {} }, - settings: { - // Allocate 1 replica so that this index stays yellow - number_of_replicas: '1', - }, - }, - }) - .catch((e) => { - /** ignore */ - }); - - // Call createIndex even though the index already exists - const createIndexPromise = createIndex({ - client, - indexName: 'yellow_then_green_index', - mappings: undefined as any, - esCapabilities, - })(); - let indexGreen = false; - - setTimeout(() => { - client.indices.putSettings({ - index: 'yellow_then_green_index', - body: { - // Set 0 replican so that this index becomes green - number_of_replicas: '0', - }, - }); - indexGreen = true; - }, 10); - - await createIndexPromise.then((res) => { - // Assert that the promise didn't resolve before the index became green - expect(indexGreen).toBe(true); - expect(res).toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "index_already_exists", - } - `); - }); - }); - it('resolves left cluster_shard_limit_exceeded when the action would exceed the maximum normal open shards', async () => { - // Set the max shards per node really low so that any new index that's created would exceed the maximum open shards for this cluster - await client.cluster.putSettings({ persistent: { cluster: { max_shards_per_node: 1 } } }); - const createIndexPromise = createIndex({ - client, - indexName: 'create_index_1', - mappings: undefined as any, - esCapabilities, - })(); - await expect(createIndexPromise).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "cluster_shard_limit_exceeded", - }, - } - `); - }); - it('rejects when there is an unexpected error creating the index', async () => { - // Creating an index with the same name as an existing alias to induce - // failure - await expect( - createIndex({ - client, - indexName: 'existing_index_2_alias', - mappings: undefined as any, - esCapabilities, - })() - ).rejects.toThrow('invalid_index_name_exception'); - }); - }); - - describe('bulkOverwriteTransformedDocuments', () => { - it('resolves right when documents do not yet exist in the index', async () => { - const newDocs = [ - { _source: { title: 'doc 5' } }, - { _source: { title: 'doc 6' } }, - { _source: { title: 'doc 7' } }, - ] as unknown as SavedObjectsRawDoc[]; - const task = bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_with_docs', - operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - }); - - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "bulk_index_succeeded", - } - `); - }); - it('resolves right even if there were some version_conflict_engine_exception', async () => { - const response = await client.search({ index: 'existing_index_with_docs', size: 1000 }); - const existingDocs = response.hits?.hits as SavedObjectsRawDoc[]; - - const task = bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_with_docs', - operations: [ - ...existingDocs, - { _source: { title: 'doc 8' } } as unknown as SavedObjectsRawDoc, - ].map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Right", - "right": "bulk_index_succeeded", - } - `); - }); - it('resolves left index_not_found_exception if the index does not exist and useAliasToPreventAutoCreate=true', async () => { - const newDocs = [ - { _source: { title: 'doc 5' } }, - { _source: { title: 'doc 6' } }, - { _source: { title: 'doc 7' } }, - ] as unknown as SavedObjectsRawDoc[]; - await expect( - bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_with_docs_alias_that_does_not_exist', - useAliasToPreventAutoCreate: true, - operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - })() - ).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "index": "existing_index_with_docs_alias_that_does_not_exist", - "type": "index_not_found_exception", - }, - } - `); - }); - it('resolves left target_index_had_write_block if there are write_block errors', async () => { - const newDocs = [ - { _source: { title: 'doc 5' } }, - { _source: { title: 'doc 6' } }, - { _source: { title: 'doc 7' } }, - ] as unknown as SavedObjectsRawDoc[]; - await expect( - bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_with_write_block', - operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), - refresh: 'wait_for', - })() - ).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "target_index_had_write_block", - }, - } - `); - }); - - it('resolves left request_entity_too_large_exception when the payload is too large', async () => { - const newDocs = new Array(10000).fill({ - _source: { - title: - 'how do I create a document thats large enoug to exceed the limits without typing long sentences', - }, - }) as SavedObjectsRawDoc[]; - const task = bulkOverwriteTransformedDocuments({ - client, - index: 'existing_index_with_docs', - operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), - }); - await expect(task()).resolves.toMatchInlineSnapshot(` - Object { - "_tag": "Left", - "left": Object { - "type": "request_entity_too_large_exception", - }, - } - `); - }); + runActionTestSuite({ + startEs: async () => { + const esServer = await startES(); + const client = esServer.es.getClient().child(MIGRATION_CLIENT_OPTIONS); + return { esServer, client }; + }, + environment: 'traditional', }); }); diff --git a/src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions_test_suite.ts b/src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions_test_suite.ts new file mode 100644 index 000000000000..e8587ee82ee4 --- /dev/null +++ b/src/core/server/integration_tests/saved_objects/migrations/group3/actions/actions_test_suite.ts @@ -0,0 +1,2095 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import * as Either from 'fp-ts/lib/Either'; +import * as Option from 'fp-ts/lib/Option'; +import { errors } from '@elastic/elasticsearch'; +import type { TaskEither } from 'fp-ts/lib/TaskEither'; +import type { SavedObjectsRawDoc } from '@kbn/core-saved-objects-server'; +import type { + ElasticsearchClient, + ElasticsearchCapabilities, +} from '@kbn/core-elasticsearch-server'; +import { getCapabilitiesFromClient } from '@kbn/core-elasticsearch-server-internal'; +import { + bulkOverwriteTransformedDocuments, + closePit, + createIndex, + openPit, + type OpenPitResponse, + reindex, + readWithPit, + type EsResponseTooLargeError, + type ReadWithPit, + setWriteBlock, + updateAliases, + waitForReindexTask, + type ReindexResponse, + waitForPickupUpdatedMappingsTask, + pickupUpdatedMappings, + type UpdateByQueryResponse, + updateAndPickupMappings, + type UpdateAndPickupMappingsResponse, + updateMappings, + removeWriteBlock, + transformDocs, + waitForIndexStatus, + initAction, + cloneIndex, + type DocumentsTransformFailed, + type DocumentsTransformSuccess, + createBulkIndexOperationTuple, +} from '@kbn/core-saved-objects-migration-server-internal'; + +interface EsServer { + stop: () => Promise; +} + +type StartEs = () => Promise<{ + esServer: EsServer; + client: ElasticsearchClient; +}>; + +export const runActionTestSuite = ({ + startEs, + environment, +}: { + startEs: StartEs; + environment: 'traditional' | 'serverless'; +}) => { + let esServer: EsServer; + let client: ElasticsearchClient; + let esCapabilities: ElasticsearchCapabilities; + + const runOnTraditionalOnly = (fn: Function) => { + if (environment === 'traditional') { + fn(); + } + }; + + beforeAll(async () => { + const { esServer: _esServer, client: _client } = await startEs(); + esServer = _esServer; + client = _client; + esCapabilities = await getCapabilitiesFromClient(client); + + // Create test fixture data: + await createIndex({ + client, + indexName: 'existing_index_with_docs', + aliases: ['existing_index_with_docs_alias'], + esCapabilities, + mappings: { + dynamic: true, + properties: { + someProperty: { + type: 'integer', + }, + }, + _meta: { + migrationMappingPropertyHashes: { + references: '7997cf5a56cc02bdc9c93361bde732b0', + }, + }, + }, + })(); + const docs = [ + { _source: { title: 'doc 1' } }, + { _source: { title: 'doc 2' } }, + { _source: { title: 'doc 3' } }, + { _source: { title: 'saved object 4', type: 'another_unused_type' } }, + { _source: { title: 'f-agent-event 5', type: 'f_agent_event' } }, + { _source: { title: new Array(1000).fill('a').join(), type: 'large' } }, // "large" saved object + ] as unknown as SavedObjectsRawDoc[]; + await bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_with_docs', + operations: docs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + })(); + + await createIndex({ + client, + indexName: 'existing_index_2', + mappings: { properties: {} }, + esCapabilities, + })(); + await createIndex({ + client, + indexName: 'existing_index_with_write_block', + mappings: { properties: {} }, + esCapabilities, + })(); + await bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_with_write_block', + operations: docs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + })(); + await setWriteBlock({ client, index: 'existing_index_with_write_block' })(); + await updateAliases({ + client, + aliasActions: [{ add: { index: 'existing_index_2', alias: 'existing_index_2_alias' } }], + })(); + }); + + afterAll(async () => { + await client.indices.delete({ index: 'existing_index_with_docs' }).catch(() => ({})); + await client.indices.delete({ index: 'existing_index_2' }).catch(() => ({})); + await client.indices.delete({ index: 'existing_index_with_write_block' }).catch(() => ({})); + + await esServer.stop(); + }); + + describe('initAction', () => { + afterAll(async () => { + await client.cluster.putSettings({ + body: { + persistent: { + // Reset persistent test settings + cluster: { routing: { allocation: { enable: null } } }, + }, + }, + }); + }); + it('resolves right empty record if no indices were found', async () => { + expect.assertions(1); + const task = initAction({ client, indices: ['no_such_index'] }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": Object {}, + } + `); + }); + it('resolves right record with found indices', async () => { + expect.assertions(1); + const res = (await initAction({ + client, + indices: ['no_such_index', 'existing_index_with_docs'], + })()) as Either.Right; + + expect(res.right).toEqual( + expect.objectContaining({ + existing_index_with_docs: expect.objectContaining({ + aliases: { + existing_index_with_docs_alias: {}, + }, + mappings: expect.anything(), + settings: expect.anything(), + }), + }) + ); + }); + it('includes the _meta data of the indices in the response', async () => { + expect.assertions(1); + const res = (await initAction({ + client, + indices: ['existing_index_with_docs'], + })()) as Either.Right; + + expect(res.right).toEqual( + expect.objectContaining({ + existing_index_with_docs: expect.objectContaining({ + aliases: { + existing_index_with_docs_alias: {}, + }, + mappings: { + // FIXME https://github.com/elastic/elasticsearch-js/issues/1796 + dynamic: 'true', + properties: expect.anything(), + _meta: { + migrationMappingPropertyHashes: { + references: '7997cf5a56cc02bdc9c93361bde732b0', + }, + }, + }, + settings: expect.anything(), + }), + }) + ); + }); + it('resolves left when cluster.routing.allocation.enabled is incompatible', async () => { + expect.assertions(3); + await client.cluster.putSettings({ + body: { + persistent: { + // Disable all routing allocation + cluster: { routing: { allocation: { enable: 'none' } } }, + }, + }, + }); + const task = initAction({ + client, + indices: ['existing_index_with_docs'], + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "incompatible_cluster_routing_allocation", + }, + } + `); + await client.cluster.putSettings({ + body: { + persistent: { + // Allow routing to existing primaries only + cluster: { routing: { allocation: { enable: 'primaries' } } }, + }, + }, + }); + const task2 = initAction({ + client, + indices: ['existing_index_with_docs'], + }); + await expect(task2()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "incompatible_cluster_routing_allocation", + }, + } + `); + await client.cluster.putSettings({ + body: { + persistent: { + // Allow routing to new primaries only + cluster: { routing: { allocation: { enable: 'new_primaries' } } }, + }, + }, + }); + const task3 = initAction({ + client, + indices: ['existing_index_with_docs'], + }); + await expect(task3()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "incompatible_cluster_routing_allocation", + }, + } + `); + }); + it('resolves right when cluster.routing.allocation.enabled=all', async () => { + expect.assertions(1); + await client.cluster.putSettings({ + body: { + persistent: { + cluster: { routing: { allocation: { enable: 'all' } } }, + }, + }, + }); + const task = initAction({ + client, + indices: ['existing_index_with_docs'], + }); + const result = await task(); + expect(Either.isRight(result)).toBe(true); + }); + }); + + describe('setWriteBlock', () => { + beforeAll(async () => { + await createIndex({ + client, + indexName: 'new_index_without_write_block', + mappings: { properties: {} }, + esCapabilities, + })(); + }); + it('resolves right when setting the write block succeeds', async () => { + expect.assertions(1); + const task = setWriteBlock({ client, index: 'new_index_without_write_block' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "set_write_block_succeeded", + } + `); + }); + it('resolves right when setting a write block on an index that already has one', async () => { + expect.assertions(1); + const task = setWriteBlock({ client, index: 'existing_index_with_write_block' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "set_write_block_succeeded", + } + `); + }); + it('once resolved, prevents further writes to the index', async () => { + expect.assertions(1); + const task = setWriteBlock({ client, index: 'new_index_without_write_block' }); + await task(); + const sourceDocs = [ + { _source: { title: 'doc 1' } }, + { _source: { title: 'doc 2' } }, + { _source: { title: 'doc 3' } }, + { _source: { title: 'doc 4' } }, + ] as unknown as SavedObjectsRawDoc[]; + + const res = (await bulkOverwriteTransformedDocuments({ + client, + index: 'new_index_without_write_block', + operations: sourceDocs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + })()) as Either.Left; + + expect(res.left).toEqual({ + type: 'target_index_had_write_block', + }); + }); + it('resolves left index_not_found_exception when the index does not exist', async () => { + expect.assertions(1); + const task = setWriteBlock({ client, index: 'no_such_index' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "index": "no_such_index", + "type": "index_not_found_exception", + }, + } + `); + }); + }); + + describe('removeWriteBlock', () => { + beforeAll(async () => { + await createIndex({ + client, + indexName: 'existing_index_without_write_block_2', + mappings: { properties: {} }, + esCapabilities, + })(); + await createIndex({ + client, + indexName: 'existing_index_with_write_block_2', + mappings: { properties: {} }, + esCapabilities, + })(); + await setWriteBlock({ client, index: 'existing_index_with_write_block_2' })(); + }); + it('resolves right if successful when an index already has a write block', async () => { + expect.assertions(1); + const task = removeWriteBlock({ client, index: 'existing_index_with_write_block_2' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "remove_write_block_succeeded", + } + `); + }); + it('resolves right if successful when an index does not have a write block', async () => { + expect.assertions(1); + const task = removeWriteBlock({ client, index: 'existing_index_without_write_block_2' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "remove_write_block_succeeded", + } + `); + }); + it('rejects if there is a non-retryable error', async () => { + expect.assertions(1); + const task = removeWriteBlock({ client, index: 'no_such_index' }); + await expect(task()).rejects.toThrow('index_not_found_exception'); + }); + }); + + describe('waitForIndexStatus', () => { + afterEach(async () => { + await client.indices.delete({ index: 'red_then_yellow_index' }).catch(() => ({})); + await client.indices.delete({ index: 'red_index' }).catch(() => ({})); + }); + + // routing allocation and number_of_replicas settings not supported on serverless + runOnTraditionalOnly(() => { + it('resolves right after waiting for an index status to be yellow if the index already existed', async () => { + // Create a red index + await client.indices.create( + { + index: 'red_then_yellow_index', + timeout: '5s', + body: { + mappings: { properties: {} }, + settings: { + // Allocate 1 replica so that this index stays yellow + number_of_replicas: '1', + // Disable all shard allocation so that the index status is red + routing: { allocation: { enable: 'none' } }, + }, + }, + }, + { maxRetries: 0 /** handle retry ourselves for now */ } + ); + + // Start tracking the index status + const indexStatusPromise = waitForIndexStatus({ + client, + index: 'red_then_yellow_index', + status: 'yellow', + })(); + + const redStatusResponse = await client.cluster.health({ index: 'red_then_yellow_index' }); + expect(redStatusResponse.status).toBe('red'); + + client.indices.putSettings({ + index: 'red_then_yellow_index', + body: { + // Enable all shard allocation so that the index status turns yellow + routing: { allocation: { enable: 'all' } }, + }, + }); + + await indexStatusPromise; + // Assert that the promise didn't resolve before the index became yellow + + const yellowStatusResponse = await client.cluster.health({ + index: 'red_then_yellow_index', + }); + expect(yellowStatusResponse.status).toBe('yellow'); + }); + }); + + it('resolves left with "index_not_yellow_timeout" after waiting for an index status to be yellow timeout', async () => { + // Create a red index + await client.indices + .create({ + index: 'red_index', + timeout: '5s', + body: { + mappings: { properties: {} }, + settings: { + // Allocate no replicas so that this index stays red + number_of_replicas: '0', + // Disable all shard allocation so that the index status is red + index: { routing: { allocation: { enable: 'none' } } }, + }, + }, + }) + .catch((e) => {}); + // try to wait for index status yellow: + const task = waitForIndexStatus({ + client, + index: 'red_index', + timeout: '1s', + status: 'yellow', + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "message": "[index_not_yellow_timeout] Timeout waiting for the status of the [red_index] index to become 'yellow'", + "type": "index_not_yellow_timeout", + }, + } + `); + }); + + it('resolves left with "index_not_green_timeout" after waiting for an index status to be green timeout', async () => { + // Create a yellow index + await client.indices + .create({ + index: 'yellow_index', + timeout: '5s', + body: { + mappings: { properties: {} }, + settings: { + // Allocate no replicas so that this index stays yellow + number_of_replicas: '0', + }, + }, + }) + .catch((e) => {}); + // try to wait for index status yellow: + const task = waitForIndexStatus({ + client, + index: 'red_index', + timeout: '1s', + status: 'green', + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "message": "[index_not_green_timeout] Timeout waiting for the status of the [red_index] index to become 'green'", + "type": "index_not_green_timeout", + }, + } + `); + }); + }); + + // _clone is blocked on serverless + runOnTraditionalOnly(() => { + describe('cloneIndex', () => { + afterAll(async () => { + try { + // Restore the default setting of 1000 shards per node + await client.cluster.putSettings({ + persistent: { cluster: { max_shards_per_node: null } }, + }); + await client.indices.delete({ index: 'clone_*' }); + } catch (e) { + /** ignore */ + } + }); + it('resolves right if cloning into a new target index', async () => { + const task = cloneIndex({ + client, + source: 'existing_index_with_write_block', + target: 'clone_target_1', + esCapabilities, + }); + expect.assertions(3); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": Object { + "acknowledged": true, + "shardsAcknowledged": true, + }, + } + `); + const { clone_target_1: cloneTarget1 } = await client.indices.getSettings({ + index: 'clone_target_1', + }); + // @ts-expect-error https://github.com/elastic/elasticsearch/issues/89381 + expect(cloneTarget1.settings?.index.mapping?.total_fields.limit).toBe('1500'); + expect(cloneTarget1.settings?.blocks?.write).toBeUndefined(); + }); + it('resolves right if clone target already existed after waiting for index status to be green ', async () => { + expect.assertions(2); + + // Create a red index that we later turn into green + await client.indices + .create({ + index: 'clone_red_then_green_index', + timeout: '5s', + body: { + mappings: { properties: {} }, + settings: { + // Allocate 1 replica so that this index can go to green + number_of_replicas: '0', + // Disable all shard allocation so that the index status is red + index: { routing: { allocation: { enable: 'none' } } }, + }, + }, + }) + .catch((e) => {}); + + // Call clone even though the index already exists + const cloneIndexPromise = cloneIndex({ + client, + source: 'existing_index_with_write_block', + target: 'clone_red_then_green_index', + esCapabilities, + })(); + + let indexGreen = false; + setTimeout(() => { + client.indices.putSettings({ + index: 'clone_red_then_green_index', + body: { + // Enable all shard allocation so that the index status goes green + routing: { allocation: { enable: 'all' } }, + }, + }); + indexGreen = true; + }, 10); + + await cloneIndexPromise.then((res) => { + // Assert that the promise didn't resolve before the index became green + expect(indexGreen).toBe(true); + expect(res).toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": Object { + "acknowledged": true, + "shardsAcknowledged": true, + }, + } + `); + }); + }); + it('resolves left with a index_not_green_timeout if clone target already exists but takes longer than the specified timeout before turning green', async () => { + // Create a red index + await client.indices + .create({ + index: 'clone_red_index', + timeout: '5s', + body: { + mappings: { properties: {} }, + settings: { + // Allocate 1 replica so that this index stays yellow + number_of_replicas: '1', + // Disable all shard allocation so that the index status is red + index: { routing: { allocation: { enable: 'none' } } }, + }, + }, + }) + .catch((e) => {}); + + // Call clone even though the index already exists + let cloneIndexPromise = cloneIndex({ + client, + source: 'existing_index_with_write_block', + target: 'clone_red_index', + timeout: '1s', + esCapabilities, + })(); + + await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "message": "[index_not_green_timeout] Timeout waiting for the status of the [clone_red_index] index to become 'green'", + "type": "index_not_green_timeout", + }, + } + `); + + // Now make the index yellow and repeat + + await client.indices.putSettings({ + index: 'clone_red_index', + body: { + // Enable all shard allocation so that the index status goes yellow + routing: { allocation: { enable: 'all' } }, + }, + }); + + // Call clone even though the index already exists + cloneIndexPromise = cloneIndex({ + client, + source: 'existing_index_with_write_block', + target: 'clone_red_index', + timeout: '1s', + esCapabilities, + })(); + + await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "message": "[index_not_green_timeout] Timeout waiting for the status of the [clone_red_index] index to become 'green'", + "type": "index_not_green_timeout", + }, + } + `); + + // Now make the index green and it should succeed + + await client.indices.putSettings({ + index: 'clone_red_index', + body: { + // Set zero replicas so status goes green + number_of_replicas: 0, + }, + }); + + // Call clone even though the index already exists + cloneIndexPromise = cloneIndex({ + client, + source: 'existing_index_with_write_block', + target: 'clone_red_index', + timeout: '30s', + esCapabilities, + })(); + + await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": Object { + "acknowledged": true, + "shardsAcknowledged": true, + }, + } + `); + }); + it('resolves left index_not_found_exception if the source index does not exist', async () => { + expect.assertions(1); + const task = cloneIndex({ + client, + source: 'no_such_index', + target: 'clone_target_3', + esCapabilities, + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "index": "no_such_index", + "type": "index_not_found_exception", + }, + } + `); + }); + it('resolves left cluster_shard_limit_exceeded when the action would exceed the maximum normal open shards', async () => { + // Set the max shards per node really low so that any new index that's created would exceed the maximum open shards for this cluster + await client.cluster.putSettings({ persistent: { cluster: { max_shards_per_node: 1 } } }); + const cloneIndexPromise = cloneIndex({ + client, + source: 'existing_index_with_write_block', + target: 'clone_target_4', + esCapabilities, + })(); + await expect(cloneIndexPromise).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "cluster_shard_limit_exceeded", + }, + } + `); + }); + }); + }); + + // Reindex doesn't return any errors on it's own, so we have to test + // together with waitForReindexTask + describe('reindex & waitForReindexTask', () => { + it('resolves right when reindex succeeds without reindex script', async () => { + const res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "reindex_succeeded", + } + `); + + const results = await client.search({ index: 'reindex_target', size: 1000 }); + expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) + .toMatchInlineSnapshot(` + Array [ + "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a", + "doc 1", + "doc 2", + "doc 3", + "f-agent-event 5", + "saved object 4", + ] + `); + }); + it('resolves right and excludes all documents not matching the excludeOnUpgradeQuery', async () => { + const res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target_excluded_docs', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { + bool: { + must_not: ['f_agent_event', 'another_unused_type'].map((type) => ({ + term: { type }, + })), + }, + }, + batchSize: 1000, + })()) as Either.Right; + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "reindex_succeeded", + } + `); + + const results = await client.search({ index: 'reindex_target_excluded_docs', size: 1000 }); + expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) + .toMatchInlineSnapshot(` + Array [ + "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a", + "doc 1", + "doc 2", + "doc 3", + ] + `); + }); + it('resolves right when reindex succeeds with reindex script', async () => { + expect.assertions(2); + const res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target_2', + reindexScript: Option.some(`ctx._source.title = ctx._source.title + '_updated'`), + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "reindex_succeeded", + } + `); + + const results = await client.search({ index: 'reindex_target_2', size: 1000 }); + expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) + .toMatchInlineSnapshot(` + Array [ + "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a_updated", + "doc 1_updated", + "doc 2_updated", + "doc 3_updated", + "f-agent-event 5_updated", + "saved object 4_updated", + ] + `); + }); + it('resolves right, ignores version conflicts and does not update existing docs when reindex multiple times', async () => { + expect.assertions(3); + // Reindex with a script + let res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target_3', + reindexScript: Option.some(`ctx._source.title = ctx._source.title + '_updated'`), + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + let task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "reindex_succeeded", + } + `); + + // reindex without a script + res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target_3', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "reindex_succeeded", + } + `); + + // Assert that documents weren't overridden by the second, unscripted reindex + const results = await client.search({ index: 'reindex_target_3', size: 1000 }); + expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) + .toMatchInlineSnapshot(` + Array [ + "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a_updated", + "doc 1_updated", + "doc 2_updated", + "doc 3_updated", + "f-agent-event 5_updated", + "saved object 4_updated", + ] + `); + }); + it('resolves right and proceeds to add missing documents if there are some existing docs conflicts', async () => { + expect.assertions(2); + // Simulate a reindex that only adds some of the documents from the + // source index into the target index + await createIndex({ + client, + indexName: 'reindex_target_4', + mappings: { properties: {} }, + esCapabilities, + })(); + const response = await client.search({ index: 'existing_index_with_docs', size: 1000 }); + const sourceDocs = (response.hits?.hits as SavedObjectsRawDoc[]) + .slice(0, 2) + .map(({ _id, _source }) => ({ + _id, + _source, + })); + await bulkOverwriteTransformedDocuments({ + client, + index: 'reindex_target_4', + operations: sourceDocs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + })(); + + // Now do a real reindex + const res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target_4', + reindexScript: Option.some(`ctx._source.title = ctx._source.title + '_updated'`), + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "reindex_succeeded", + } + `); + // Assert that existing documents weren't overridden, but that missing + // documents were added by the reindex + const results = await client.search({ index: 'reindex_target_4', size: 1000 }); + expect((results.hits?.hits as SavedObjectsRawDoc[]).map((doc) => doc._source.title).sort()) + .toMatchInlineSnapshot(` + Array [ + "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a_updated", + "doc 1", + "doc 2", + "doc 3_updated", + "f-agent-event 5_updated", + "saved object 4_updated", + ] + `); + }); + it('resolves left incompatible_mapping_exception if all reindex failures are due to a strict_dynamic_mapping_exception', async () => { + expect.assertions(1); + // Simulates one instance having completed the UPDATE_TARGET_MAPPINGS + // step which makes the mappings incompatible with outdated documents. + // If another instance then tries a reindex it will get a + // strict_dynamic_mapping_exception even if the documents already exist + // and should ignore this error. + + // Create an index with incompatible mappings + await createIndex({ + client, + indexName: 'reindex_target_5', + mappings: { + dynamic: 'strict', + properties: { + /** no title field */ + }, + }, + esCapabilities, + })(); + + const { + right: { taskId: reindexTaskId }, + } = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target_5', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + const task = waitForReindexTask({ client, taskId: reindexTaskId, timeout: '10s' }); + + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "incompatible_mapping_exception", + }, + } + `); + }); + it('resolves left incompatible_mapping_exception if all reindex failures are due to a mapper_parsing_exception', async () => { + expect.assertions(1); + // Simulates one instance having completed the UPDATE_TARGET_MAPPINGS + // step which makes the mappings incompatible with outdated documents. + // If another instance then tries a reindex it will get a + // strict_dynamic_mapping_exception even if the documents already exist + // and should ignore this error. + + // Create an index with incompatible mappings + await createIndex({ + client, + indexName: 'reindex_target_6', + mappings: { + dynamic: false, + properties: { title: { type: 'integer' } }, // integer is incompatible with string title + }, + esCapabilities, + })(); + + const { + right: { taskId: reindexTaskId }, + } = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'reindex_target_6', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + const task = waitForReindexTask({ client, taskId: reindexTaskId, timeout: '10s' }); + + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "incompatible_mapping_exception", + }, + } + `); + }); + it('resolves left index_not_found_exception if source index does not exist', async () => { + expect.assertions(1); + const res = (await reindex({ + client, + sourceIndex: 'no_such_index', + targetIndex: 'reindex_target', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { + match_all: {}, + }, + batchSize: 1000, + })()) as Either.Right; + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "index": "no_such_index", + "type": "index_not_found_exception", + }, + } + `); + }); + it('resolves left target_index_had_write_block if all failures are due to a write block', async () => { + expect.assertions(1); + const res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'existing_index_with_write_block', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "target_index_had_write_block", + }, + } + `); + }); + it('resolves left if requireAlias=true and the target is not an alias', async () => { + expect.assertions(1); + const res = (await reindex({ + client, + sourceIndex: 'existing_index_with_docs', + targetIndex: 'existing_index_with_write_block', + reindexScript: Option.none, + requireAlias: true, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '10s' }); + + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "index": "existing_index_with_write_block", + "type": "index_not_found_exception", + }, + } + `); + }); + it('resolves left wait_for_task_completion_timeout when the task does not finish within the timeout', async () => { + await waitForIndexStatus({ + client, + index: '.kibana_1', + status: 'yellow', + })(); + + const res = (await reindex({ + client, + sourceIndex: '.kibana_1', + targetIndex: 'reindex_target', + reindexScript: Option.none, + requireAlias: false, + excludeOnUpgradeQuery: { match_all: {} }, + batchSize: 1000, + })()) as Either.Right; + + const task = waitForReindexTask({ client, taskId: res.right.taskId, timeout: '0s' }); + + await expect(task()).resolves.toMatchObject({ + _tag: 'Left', + left: { + error: expect.any(errors.ResponseError), + message: expect.stringContaining('[timeout_exception]'), + type: 'wait_for_task_completion_timeout', + }, + }); + }); + }); + + describe('openPit', () => { + it('opens PointInTime for an index', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + expect(pitResponse.right.pitId).toEqual(expect.any(String)); + + const searchResponse = await client.search({ + body: { + pit: { id: pitResponse.right.pitId }, + }, + }); + + await expect(searchResponse.hits.hits.length).toBeGreaterThan(0); + }); + it('rejects if index does not exist', async () => { + const openPitTask = openPit({ client, index: 'no_such_index' }); + await expect(openPitTask()).rejects.toThrow('index_not_found_exception'); + }); + }); + + describe('readWithPit', () => { + it('requests documents from an index using given PIT', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + const readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { match_all: {} }, + batchSize: 1000, + searchAfter: undefined, + }); + const docsResponse = (await readWithPitTask()) as Either.Right; + + await expect(docsResponse.right.outdatedDocuments.length).toBe(6); + }); + + it('requests the batchSize of documents from an index', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + const readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { match_all: {} }, + batchSize: 3, + searchAfter: undefined, + }); + const docsResponse = (await readWithPitTask()) as Either.Right; + + await expect(docsResponse.right.outdatedDocuments.length).toBe(3); + }); + + it('it excludes documents not matching the provided "query"', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + const readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { + bool: { + must_not: [ + { + term: { + type: 'f_agent_event', + }, + }, + { + term: { + type: 'another_unused_type', + }, + }, + ], + }, + }, + batchSize: 1000, + searchAfter: undefined, + }); + + const docsResponse = (await readWithPitTask()) as Either.Right; + + expect(docsResponse.right.outdatedDocuments.map((doc) => doc._source.title).sort()) + .toMatchInlineSnapshot(` + Array [ + "a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a,a", + "doc 1", + "doc 2", + "doc 3", + ] + `); + }); + + it('only returns documents that match the provided "query"', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + const readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { + match: { title: { query: 'doc' } }, + }, + batchSize: 1000, + searchAfter: undefined, + }); + + const docsResponse = (await readWithPitTask()) as Either.Right; + + expect(docsResponse.right.outdatedDocuments.map((doc) => doc._source.title).sort()) + .toMatchInlineSnapshot(` + Array [ + "doc 1", + "doc 2", + "doc 3", + ] + `); + }); + + it('returns docs with _seq_no and _primary_term when specified', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + const readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { + match: { title: { query: 'doc' } }, + }, + batchSize: 1000, + searchAfter: undefined, + seqNoPrimaryTerm: true, + }); + + const docsResponse = (await readWithPitTask()) as Either.Right; + + expect(docsResponse.right.outdatedDocuments).toEqual( + expect.arrayContaining([ + expect.objectContaining({ + _seq_no: expect.any(Number), + _primary_term: expect.any(Number), + }), + ]) + ); + }); + + it('does not return docs with _seq_no and _primary_term if not specified', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + const readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { + match: { title: { query: 'doc' } }, + }, + batchSize: 1000, + searchAfter: undefined, + }); + + const docsResponse = (await readWithPitTask()) as Either.Right; + + expect(docsResponse.right.outdatedDocuments).toEqual( + expect.arrayContaining([ + expect.not.objectContaining({ + _seq_no: expect.any(Number), + _primary_term: expect.any(Number), + }), + ]) + ); + }); + + it('returns a left es_response_too_large error when a read batch exceeds the maxResponseSize', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + let readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { match_all: {} }, + batchSize: 1, // small batch size so we don't exceed the maxResponseSize + searchAfter: undefined, + maxResponseSizeBytes: 500, // set a small size to force the error + }); + const rightResponse = (await readWithPitTask()) as Either.Right; + + await expect(Either.isRight(rightResponse)).toBe(true); + + readWithPitTask = readWithPit({ + client, + pitId: pitResponse.right.pitId, + query: { match_all: {} }, + batchSize: 10, // a bigger batch will exceed the maxResponseSize + searchAfter: undefined, + maxResponseSizeBytes: 500, // set a small size to force the error + }); + const leftResponse = (await readWithPitTask()) as Either.Left; + + expect(leftResponse.left.type).toBe('es_response_too_large'); + // ES response contains a field that indicates how long it took ES to get the response, e.g.: "took": 7 + // if ES takes more than 9ms, the payload will be 1 byte bigger. + // see https://github.com/elastic/kibana/issues/160994 + // Thus, the statements below account for response times up to 99ms + expect(leftResponse.left.contentLength).toBeGreaterThanOrEqual(3184); + expect(leftResponse.left.contentLength).toBeLessThanOrEqual(3185); + }); + + it('rejects if PIT does not exist', async () => { + const readWithPitTask = readWithPit({ + client, + pitId: 'no_such_pit', + query: { match_all: {} }, + batchSize: 1000, + searchAfter: undefined, + }); + await expect(readWithPitTask()).rejects.toThrow('illegal_argument_exception'); + }); + }); + + describe('closePit', () => { + it('closes PointInTime', async () => { + const openPitTask = openPit({ client, index: 'existing_index_with_docs' }); + const pitResponse = (await openPitTask()) as Either.Right; + + const pitId = pitResponse.right.pitId; + await closePit({ client, pitId })(); + + const searchTask = client.search({ + body: { + pit: { id: pitId }, + }, + }); + + await expect(searchTask).rejects.toThrow('search_phase_execution_exception'); + }); + + it('rejects if PIT does not exist', async () => { + const closePitTask = closePit({ client, pitId: 'no_such_pit' }); + await expect(closePitTask()).rejects.toThrow('illegal_argument_exception'); + }); + }); + + describe('transformDocs', () => { + it('applies "transformRawDocs" and returns the transformed documents', async () => { + const originalDocs = [ + { _id: 'foo:1', _source: { type: 'dashboard', value: 1 } }, + { _id: 'foo:2', _source: { type: 'dashboard', value: 2 } }, + ]; + + function innerTransformRawDocs( + docs: SavedObjectsRawDoc[] + ): TaskEither { + return async () => { + const processedDocs: SavedObjectsRawDoc[] = []; + for (const doc of docs) { + doc._source.value += 1; + processedDocs.push(doc); + } + return Either.right({ processedDocs }); + }; + } + + const transformTask = transformDocs({ + transformRawDocs: innerTransformRawDocs, + outdatedDocuments: originalDocs, + }); + + const resultsWithProcessDocs = ( + (await transformTask()) as Either.Right + ).right.processedDocs; + expect(resultsWithProcessDocs.length).toEqual(2); + const foo2 = resultsWithProcessDocs.find((h) => h._id === 'foo:2'); + expect(foo2?._source?.value).toBe(3); + }); + }); + + describe('waitForPickupUpdatedMappingsTask', () => { + it('rejects if there are failures', async () => { + const res = (await pickupUpdatedMappings( + client, + 'existing_index_with_write_block', + 1000 + )()) as Either.Right; + + const task = waitForPickupUpdatedMappingsTask({ + client, + taskId: res.right.taskId, + timeout: '10s', + }); + + // We can't do a snapshot match because the response includes an index + // id which ES assigns dynamically + await expect(task()).rejects.toMatchObject({ + message: + /pickupUpdatedMappings task failed with the following failures:\n\[\{\"index\":\"existing_index_with_write_block\"/, + }); + }); + it('rejects if there is an error', async () => { + const res = (await pickupUpdatedMappings( + client, + 'no_such_index', + 1000 + )()) as Either.Right; + + const task = waitForPickupUpdatedMappingsTask({ + client, + taskId: res.right.taskId, + timeout: '10s', + }); + + await expect(task()).rejects.toThrow('index_not_found_exception'); + }); + + it('resolves left wait_for_task_completion_timeout when the task does not complete within the timeout', async () => { + const res = (await pickupUpdatedMappings( + client, + '.kibana_1', + 1000 + )()) as Either.Right; + + const task = waitForPickupUpdatedMappingsTask({ + client, + taskId: res.right.taskId, + timeout: '0s', + }); + + await expect(task()).resolves.toMatchObject({ + _tag: 'Left', + left: { + error: expect.any(errors.ResponseError), + message: expect.stringContaining('[timeout_exception]'), + type: 'wait_for_task_completion_timeout', + }, + }); + }); + it('resolves right when successful', async () => { + const res = (await pickupUpdatedMappings( + client, + 'existing_index_with_docs', + 1000 + )()) as Either.Right; + + const task = waitForPickupUpdatedMappingsTask({ + client, + taskId: res.right.taskId, + timeout: '10s', + }); + + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "pickup_updated_mappings_succeeded", + } + `); + }); + }); + + describe('updateAndPickupMappings', () => { + it('resolves right when mappings were updated and picked up', async () => { + // Create an index without any mappings and insert documents into it + await createIndex({ + client, + indexName: 'existing_index_without_mappings', + mappings: { + dynamic: false, + properties: {}, + }, + esCapabilities, + })(); + const sourceDocs = [ + { _source: { title: 'doc 1' } }, + { _source: { title: 'doc 2' } }, + { _source: { title: 'doc 3' } }, + { _source: { title: 'doc 4' } }, + ] as unknown as SavedObjectsRawDoc[]; + await bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_without_mappings', + operations: sourceDocs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + })(); + + // Assert that we can't search over the unmapped fields of the document + + const originalSearchResults = await client.search({ + index: 'existing_index_without_mappings', + size: 1000, + query: { + match: { title: { query: 'doc' } }, + }, + }); + expect(originalSearchResults.hits?.hits.length).toBe(0); + + // Update and pickup mappings so that the title field is searchable + const res = await updateAndPickupMappings({ + client, + index: 'existing_index_without_mappings', + mappings: { + properties: { + title: { type: 'text' }, + }, + }, + batchSize: 1000, + })(); + expect(Either.isRight(res)).toBe(true); + const taskId = (res as Either.Right).right.taskId; + await waitForPickupUpdatedMappingsTask({ client, taskId, timeout: '60s' })(); + + // Repeat the search expecting to be able to find the existing documents + const pickedUpSearchResults = await client.search({ + index: 'existing_index_without_mappings', + size: 1000, + query: { + match: { title: { query: 'doc' } }, + }, + }); + expect(pickedUpSearchResults.hits?.hits.length).toBe(4); + }); + }); + + describe('updateMappings', () => { + it('rejects if ES throws an error', async () => { + const task = updateMappings({ + client, + index: 'no_such_index', + mappings: { + properties: { + created_at: { + type: 'date', + }, + }, + _meta: { + migrationMappingPropertyHashes: { + references: 'updateda56cc02bdc9c93361bupdated', + newReferences: 'fooBarHashMd509387420934879300d9', + }, + }, + }, + })(); + + await expect(task).rejects.toThrow('index_not_found_exception'); + }); + + it('resolves left when the mappings are incompatible', async () => { + const res = await updateMappings({ + client, + index: 'existing_index_with_docs', + mappings: { + properties: { + someProperty: { + type: 'date', // attempt to change an existing field's type in an incompatible fashion + }, + }, + _meta: { + migrationMappingPropertyHashes: { + references: 'updateda56cc02bdc9c93361bupdated', + newReferences: 'fooBarHashMd509387420934879300d9', + }, + }, + }, + })(); + + expect(Either.isLeft(res)).toBe(true); + expect(res).toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "incompatible_mapping_exception", + }, + } + `); + }); + + it('resolves right when mappings are correctly updated', async () => { + const res = await updateMappings({ + client, + index: 'existing_index_with_docs', + mappings: { + properties: { + created_at: { + type: 'date', + }, + }, + _meta: { + migrationMappingPropertyHashes: { + references: 'updateda56cc02bdc9c93361bupdated', + newReferences: 'fooBarHashMd509387420934879300d9', + }, + }, + }, + })(); + + expect(Either.isRight(res)).toBe(true); + + const indices = await client.indices.get({ + index: ['existing_index_with_docs'], + }); + + expect(indices.existing_index_with_docs.mappings?.properties).toEqual( + expect.objectContaining({ + created_at: { + type: 'date', + }, + }) + ); + + expect(indices.existing_index_with_docs.mappings?._meta).toEqual({ + migrationMappingPropertyHashes: { + references: 'updateda56cc02bdc9c93361bupdated', + newReferences: 'fooBarHashMd509387420934879300d9', + }, + }); + }); + }); + + describe('updateAliases', () => { + describe('remove', () => { + it('resolves left index_not_found_exception when the index does not exist', async () => { + const task = updateAliases({ + client, + aliasActions: [ + { + remove: { + alias: 'no_such_alias', + index: 'no_such_index', + must_exist: false, + }, + }, + ], + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "index": "no_such_index", + "type": "index_not_found_exception", + }, + } + `); + }); + describe('with must_exist=false', () => { + it('resolves left alias_not_found_exception when alias does not exist', async () => { + const task = updateAliases({ + client, + aliasActions: [ + { + remove: { + alias: 'no_such_alias', + index: 'existing_index_with_docs', + must_exist: false, + }, + }, + ], + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "alias_not_found_exception", + }, + } + `); + }); + }); + describe('with must_exist=true', () => { + it('resolves left alias_not_found_exception when alias does not exist on specified index', async () => { + const task = updateAliases({ + client, + aliasActions: [ + { + remove: { + alias: 'existing_index_2_alias', + index: 'existing_index_with_docs', + must_exist: true, + }, + }, + ], + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "alias_not_found_exception", + }, + } + `); + }); + it('resolves left alias_not_found_exception when alias does not exist', async () => { + const task = updateAliases({ + client, + aliasActions: [ + { + remove: { + alias: 'no_such_alias', + index: 'existing_index_with_docs', + must_exist: true, + }, + }, + ], + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "alias_not_found_exception", + }, + } + `); + }); + }); + }); + describe('remove_index', () => { + it('left index_not_found_exception if index does not exist', async () => { + const task = updateAliases({ + client, + aliasActions: [ + { + remove_index: { + index: 'no_such_index', + }, + }, + ], + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "index": "no_such_index", + "type": "index_not_found_exception", + }, + } + `); + }); + it('left remove_index_not_a_concrete_index when remove_index targets an alias', async () => { + const task = updateAliases({ + client, + aliasActions: [ + { + remove_index: { + index: 'existing_index_2_alias', + }, + }, + ], + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "remove_index_not_a_concrete_index", + }, + } + `); + }); + }); + }); + + describe('createIndex', () => { + afterEach(async () => { + // Restore the default setting of 1000 shards per node + await client.cluster.putSettings({ persistent: { cluster: { max_shards_per_node: null } } }); + }); + afterAll(async () => { + await client.indices.delete({ index: 'red_then_yellow_index' }).catch(() => ({})); + await client.indices.delete({ index: 'yellow_then_green_index' }).catch(() => ({})); + await client.indices.delete({ index: 'create_new_index' }).catch(() => ({})); + }); + it('resolves right after waiting for an index status to become green when cluster state is not propagated within the timeout', async () => { + // By specifying a very short timeout Elasticsearch will respond before the shard is allocated + const createIndexPromise = createIndex({ + client, + indexName: 'create_new_index', + mappings: undefined as any, + timeout: '1nanos', + esCapabilities, + })(); + await expect(createIndexPromise).resolves.toEqual({ + _tag: 'Right', + right: 'create_index_succeeded', + }); + const { create_new_index: createNewIndex } = await client.indices.getSettings({ + index: 'create_new_index', + }); + // @ts-expect-error https://github.com/elastic/elasticsearch/issues/89381 + expect(createNewIndex.settings?.index?.mapping.total_fields.limit).toBe('1500'); + }); + + // number_of_replicas and routing allocation not available on serverless + runOnTraditionalOnly(() => { + it('resolves left if an existing index status does not become green', async () => { + expect.assertions(2); + // Create a red index + await client.indices + .create( + { + index: 'red_then_yellow_index', + timeout: '5s', + body: { + mappings: { properties: {} }, + settings: { + // Allocate 1 replica so that this index stays yellow + number_of_replicas: '1', + // Disable all shard allocation so that the index status starts as red + index: { routing: { allocation: { enable: 'none' } } }, + }, + }, + }, + { maxRetries: 0 /** handle retry ourselves for now */ } + ) + .catch((e) => { + /** ignore */ + }); + + // Call createIndex even though the index already exists + const createIndexPromise = createIndex({ + client, + indexName: 'red_then_yellow_index', + mappings: undefined as any, + esCapabilities, + })(); + let indexYellow = false; + + setTimeout(() => { + client.indices.putSettings({ + index: 'red_then_yellow_index', + body: { + // Renable allocation so that the status becomes yellow + routing: { allocation: { enable: 'all' } }, + }, + }); + indexYellow = true; + }, 10); + + await createIndexPromise.then((err) => { + // Assert that the promise didn't resolve before the index became yellow + expect(indexYellow).toBe(true); + expect(err).toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "message": "[index_not_green_timeout] Timeout waiting for the status of the [red_then_yellow_index] index to become 'green'", + "type": "index_not_green_timeout", + }, + } + `); + }); + }); + it('resolves right after waiting for an existing index status to become green', async () => { + expect.assertions(2); + // Create a yellow index + await client.indices + .create({ + index: 'yellow_then_green_index', + timeout: '5s', + body: { + mappings: { properties: {} }, + settings: { + // Allocate 1 replica so that this index stays yellow + number_of_replicas: '1', + }, + }, + }) + .catch((e) => { + /** ignore */ + }); + + // Call createIndex even though the index already exists + const createIndexPromise = createIndex({ + client, + indexName: 'yellow_then_green_index', + mappings: undefined as any, + esCapabilities, + })(); + let indexGreen = false; + + setTimeout(() => { + client.indices.putSettings({ + index: 'yellow_then_green_index', + body: { + // Set 0 replican so that this index becomes green + number_of_replicas: '0', + }, + }); + indexGreen = true; + }, 10); + + await createIndexPromise.then((res) => { + // Assert that the promise didn't resolve before the index became green + expect(indexGreen).toBe(true); + expect(res).toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "index_already_exists", + } + `); + }); + }); + }); + + it('resolves left cluster_shard_limit_exceeded when the action would exceed the maximum normal open shards', async () => { + // Set the max shards per node really low so that any new index that's created would exceed the maximum open shards for this cluster + await client.cluster.putSettings({ persistent: { cluster: { max_shards_per_node: 1 } } }); + const createIndexPromise = createIndex({ + client, + indexName: 'create_index_1', + mappings: undefined as any, + esCapabilities, + })(); + await expect(createIndexPromise).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "cluster_shard_limit_exceeded", + }, + } + `); + }); + it('rejects when there is an unexpected error creating the index', async () => { + // Creating an index with the same name as an existing alias to induce + // failure + await expect( + createIndex({ + client, + indexName: 'existing_index_2_alias', + mappings: undefined as any, + esCapabilities, + })() + ).rejects.toThrow('invalid_index_name_exception'); + }); + }); + + describe('bulkOverwriteTransformedDocuments', () => { + it('resolves right when documents do not yet exist in the index', async () => { + const newDocs = [ + { _source: { title: 'doc 5' } }, + { _source: { title: 'doc 6' } }, + { _source: { title: 'doc 7' } }, + ] as unknown as SavedObjectsRawDoc[]; + const task = bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_with_docs', + operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + }); + + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "bulk_index_succeeded", + } + `); + }); + it('resolves right even if there were some version_conflict_engine_exception', async () => { + const response = await client.search({ index: 'existing_index_with_docs', size: 1000 }); + const existingDocs = response.hits?.hits as SavedObjectsRawDoc[]; + + const task = bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_with_docs', + operations: [ + ...existingDocs, + { _source: { title: 'doc 8' } } as unknown as SavedObjectsRawDoc, + ].map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Right", + "right": "bulk_index_succeeded", + } + `); + }); + it('resolves left index_not_found_exception if the index does not exist and useAliasToPreventAutoCreate=true', async () => { + const newDocs = [ + { _source: { title: 'doc 5' } }, + { _source: { title: 'doc 6' } }, + { _source: { title: 'doc 7' } }, + ] as unknown as SavedObjectsRawDoc[]; + await expect( + bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_with_docs_alias_that_does_not_exist', + useAliasToPreventAutoCreate: true, + operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + })() + ).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "index": "existing_index_with_docs_alias_that_does_not_exist", + "type": "index_not_found_exception", + }, + } + `); + }); + it('resolves left target_index_had_write_block if there are write_block errors', async () => { + const newDocs = [ + { _source: { title: 'doc 5' } }, + { _source: { title: 'doc 6' } }, + { _source: { title: 'doc 7' } }, + ] as unknown as SavedObjectsRawDoc[]; + await expect( + bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_with_write_block', + operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), + refresh: 'wait_for', + })() + ).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "target_index_had_write_block", + }, + } + `); + }); + + // no way to configure http.max_content_length on the serverless instance for now. + runOnTraditionalOnly(() => { + it('resolves left request_entity_too_large_exception when the payload is too large', async () => { + const newDocs = new Array(10000).fill({ + _source: { + title: + 'how do I create a document thats large enoug to exceed the limits without typing long sentences', + }, + }) as SavedObjectsRawDoc[]; + const task = bulkOverwriteTransformedDocuments({ + client, + index: 'existing_index_with_docs', + operations: newDocs.map((doc) => createBulkIndexOperationTuple(doc)), + }); + await expect(task()).resolves.toMatchInlineSnapshot(` + Object { + "_tag": "Left", + "left": Object { + "type": "request_entity_too_large_exception", + }, + } + `); + }); + }); + }); +}; diff --git a/src/core/server/integration_tests/saved_objects/migrations/kibana_migrator_test_kit.ts b/src/core/server/integration_tests/saved_objects/migrations/kibana_migrator_test_kit.ts index 6249137d8e7b..a911fcdbdead 100644 --- a/src/core/server/integration_tests/saved_objects/migrations/kibana_migrator_test_kit.ts +++ b/src/core/server/integration_tests/saved_objects/migrations/kibana_migrator_test_kit.ts @@ -16,7 +16,6 @@ import { ConfigService, Env } from '@kbn/config'; import { getEnvOptions } from '@kbn/config-mocks'; import { REPO_ROOT } from '@kbn/repo-info'; import { KibanaMigrator } from '@kbn/core-saved-objects-migration-server-internal'; -import { elasticsearchServiceMock } from '@kbn/core-elasticsearch-server-mocks'; import { SavedObjectConfig, type SavedObjectsConfigType, @@ -30,6 +29,7 @@ import { SavedObjectsRepository } from '@kbn/core-saved-objects-api-server-inter import { ElasticsearchConfig, type ElasticsearchConfigType, + getCapabilitiesFromClient, } from '@kbn/core-elasticsearch-server-internal'; import { AgentManager, configureClient } from '@kbn/core-elasticsearch-client-server-internal'; import { type LoggingConfigType, LoggingSystem } from '@kbn/core-logging-server-internal'; @@ -276,6 +276,7 @@ interface GetMigratorParams { kibanaBranch: string; nodeRoles: NodeRoles; } + const getMigrator = async ({ configService, client, @@ -300,6 +301,8 @@ const getMigrator = async ({ links: getDocLinks({ kibanaBranch }), }; + const esCapabilities = await getCapabilitiesFromClient(client); + return new KibanaMigrator({ client, kibanaIndex, @@ -311,7 +314,7 @@ const getMigrator = async ({ docLinks, waitForMigrationCompletion: false, // ensure we have an active role in the migration nodeRoles, - esCapabilities: elasticsearchServiceMock.createCapabilities(), + esCapabilities, }); }; diff --git a/src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/basic_document_migration.ts b/src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/basic_document_migration.ts new file mode 100644 index 000000000000..0c43a40478d0 --- /dev/null +++ b/src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/basic_document_migration.ts @@ -0,0 +1,247 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import fs from 'fs/promises'; +import { range, sortBy } from 'lodash'; +import { SavedObjectsBulkCreateObject } from '@kbn/core-saved-objects-api-server'; +import '../../jest_matchers'; +import { getKibanaMigratorTestKit } from '../../kibana_migrator_test_kit'; +import { delay, parseLogFile } from '../../test_utils'; +import { EsRunner, EsServer } from '../../test_types'; +import { + getBaseMigratorParams, + getSampleAType, + getSampleBType, +} from '../../fixtures/zdt_base.fixtures'; + +export function createBasicDocumentsMigrationTest({ + startES, + logFilePath, +}: { + startES: EsRunner; + logFilePath: string; +}) { + let esServer: EsServer; + + beforeAll(async () => { + await fs.unlink(logFilePath).catch(() => {}); + esServer = await startES(); + }); + + afterAll(async () => { + await esServer?.stop(); + await delay(10); + }); + + const createBaseline = async () => { + const { runMigrations, savedObjectsRepository } = await getKibanaMigratorTestKit({ + ...getBaseMigratorParams(), + types: [getSampleAType(), getSampleBType()], + }); + await runMigrations(); + + const sampleAObjs = range(5).map((number) => ({ + id: `a-${number}`, + type: 'sample_a', + attributes: { + keyword: `a_${number}`, + boolean: true, + }, + })); + + await savedObjectsRepository.bulkCreate(sampleAObjs); + + const sampleBObjs = range(5).map((number) => ({ + id: `b-${number}`, + type: 'sample_b', + attributes: { + text: `i am number ${number}`, + text2: `some static text`, + }, + })); + + await savedObjectsRepository.bulkCreate(sampleBObjs); + }; + + it('migrates the documents', async () => { + await createBaseline(); + + const typeA = getSampleAType(); + const typeB = getSampleBType(); + + // typeA -> we add a new field and bump the model version by one with a migration + + typeA.mappings.properties = { + ...typeA.mappings.properties, + someAddedField: { type: 'keyword' }, + }; + + typeA.modelVersions = { + ...typeA.modelVersions, + '2': { + changes: [ + { + type: 'data_backfill', + backfillFn: (doc) => { + return { + attributes: { + someAddedField: `${doc.attributes.keyword}-mig`, + }, + }; + }, + }, + { + type: 'mappings_addition', + addedMappings: { + someAddedField: { type: 'keyword' }, + }, + }, + ], + }, + }; + + // typeB -> we add two new model version with migrations + + typeB.modelVersions = { + ...typeB.modelVersions, + '2': { + changes: [ + { + type: 'data_backfill', + backfillFn: (doc) => { + return { + attributes: { + text2: `${doc.attributes.text2} - mig2`, + }, + }; + }, + }, + ], + }, + '3': { + changes: [ + { + type: 'data_backfill', + backfillFn: (doc) => { + return { + attributes: { + text2: `${doc.attributes.text2} - mig3`, + }, + }; + }, + }, + ], + }, + }; + + const { runMigrations, client, savedObjectsRepository } = await getKibanaMigratorTestKit({ + ...getBaseMigratorParams(), + logFilePath, + types: [typeA, typeB], + }); + + await runMigrations(); + + const indices = await client.indices.get({ index: '.kibana*' }); + expect(Object.keys(indices)).toEqual(['.kibana_1']); + + const index = indices['.kibana_1']; + const mappings = index.mappings ?? {}; + const mappingMeta = mappings._meta ?? {}; + + expect(mappings.properties).toEqual( + expect.objectContaining({ + sample_a: typeA.mappings, + sample_b: typeB.mappings, + }) + ); + + expect(mappingMeta.docVersions).toEqual({ + sample_a: '10.2.0', + sample_b: '10.3.0', + }); + + const { saved_objects: sampleADocs } = await savedObjectsRepository.find({ type: 'sample_a' }); + const { saved_objects: sampleBDocs } = await savedObjectsRepository.find({ type: 'sample_b' }); + + expect(sampleADocs).toHaveLength(5); + expect(sampleBDocs).toHaveLength(5); + + const sampleAData = sortBy(sampleADocs, 'id').map((object) => ({ + id: object.id, + type: object.type, + attributes: object.attributes, + })); + + expect(sampleAData).toEqual([ + { + id: 'a-0', + type: 'sample_a', + attributes: { boolean: true, keyword: 'a_0', someAddedField: 'a_0-mig' }, + }, + { + id: 'a-1', + type: 'sample_a', + attributes: { boolean: true, keyword: 'a_1', someAddedField: 'a_1-mig' }, + }, + { + id: 'a-2', + type: 'sample_a', + attributes: { boolean: true, keyword: 'a_2', someAddedField: 'a_2-mig' }, + }, + { + id: 'a-3', + type: 'sample_a', + attributes: { boolean: true, keyword: 'a_3', someAddedField: 'a_3-mig' }, + }, + { + id: 'a-4', + type: 'sample_a', + attributes: { boolean: true, keyword: 'a_4', someAddedField: 'a_4-mig' }, + }, + ]); + + const sampleBData = sortBy(sampleBDocs, 'id').map((object) => ({ + id: object.id, + type: object.type, + attributes: object.attributes, + })); + + expect(sampleBData).toEqual([ + { + id: 'b-0', + type: 'sample_b', + attributes: { text: 'i am number 0', text2: 'some static text - mig2 - mig3' }, + }, + { + id: 'b-1', + type: 'sample_b', + attributes: { text: 'i am number 1', text2: 'some static text - mig2 - mig3' }, + }, + { + id: 'b-2', + type: 'sample_b', + attributes: { text: 'i am number 2', text2: 'some static text - mig2 - mig3' }, + }, + { + id: 'b-3', + type: 'sample_b', + attributes: { text: 'i am number 3', text2: 'some static text - mig2 - mig3' }, + }, + { + id: 'b-4', + type: 'sample_b', + attributes: { text: 'i am number 4', text2: 'some static text - mig2 - mig3' }, + }, + ]); + + const records = await parseLogFile(logFilePath); + expect(records).toContainLogEntry('Starting to process 10 documents'); + expect(records).toContainLogEntry('Migration completed'); + }); +} diff --git a/src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/standard_workflow.ts b/src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/standard_workflow.ts new file mode 100644 index 000000000000..b22e522d1d2c --- /dev/null +++ b/src/core/server/integration_tests/saved_objects/migrations/shared_suites/zdt/standard_workflow.ts @@ -0,0 +1,118 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import fs from 'fs/promises'; +import { range } from 'lodash'; +import { SavedObjectsBulkCreateObject } from '@kbn/core-saved-objects-api-server'; +import '../../jest_matchers'; +import { getKibanaMigratorTestKit } from '../../kibana_migrator_test_kit'; +import { delay, parseLogFile } from '../../test_utils'; +import { EsRunner, EsServer } from '../../test_types'; +import { + getBaseMigratorParams, + getSampleAType, + getSampleBType, + dummyModelVersion, +} from '../../fixtures/zdt_base.fixtures'; + +export function createStandardWorkflowTest({ + startES, + logFilePath, +}: { + startES: EsRunner; + logFilePath: string; +}) { + let esServer: EsServer; + + beforeAll(async () => { + await fs.unlink(logFilePath).catch(() => {}); + esServer = await startES(); + }); + + afterAll(async () => { + await esServer?.stop(); + await delay(10); + }); + + const createBaseline = async () => { + const { runMigrations, savedObjectsRepository } = await getKibanaMigratorTestKit({ + ...getBaseMigratorParams(), + types: [getSampleAType(), getSampleBType()], + }); + await runMigrations(); + + const sampleAObjs = range(5).map((number) => ({ + id: `a-${number}`, + type: 'sample_a', + attributes: { keyword: `a_${number}`, boolean: true }, + })); + + await savedObjectsRepository.bulkCreate(sampleAObjs); + + const sampleBObjs = range(5).map((number) => ({ + id: `b-${number}`, + type: 'sample_b', + attributes: { text: `i am number ${number}`, text2: `some static text` }, + })); + + await savedObjectsRepository.bulkCreate(sampleBObjs); + }; + + it('follows the expected stages and transitions', async () => { + await createBaseline(); + + const typeA = getSampleAType(); + const typeB = getSampleBType(); + + typeA.modelVersions = { + ...typeA.modelVersions, + '2': dummyModelVersion, + }; + + typeB.modelVersions = { + ...typeB.modelVersions, + '2': dummyModelVersion, + }; + + const { runMigrations } = await getKibanaMigratorTestKit({ + ...getBaseMigratorParams(), + logFilePath, + types: [typeA, typeB], + }); + + await runMigrations(); + + const records = await parseLogFile(logFilePath); + + expect(records).toContainLogEntries( + [ + 'INIT -> UPDATE_INDEX_MAPPINGS', + 'UPDATE_INDEX_MAPPINGS -> UPDATE_INDEX_MAPPINGS_WAIT_FOR_TASK', + 'UPDATE_INDEX_MAPPINGS_WAIT_FOR_TASK -> UPDATE_MAPPING_MODEL_VERSIONS', + 'UPDATE_MAPPING_MODEL_VERSIONS -> INDEX_STATE_UPDATE_DONE', + 'INDEX_STATE_UPDATE_DONE -> DOCUMENTS_UPDATE_INIT', + 'DOCUMENTS_UPDATE_INIT -> SET_DOC_MIGRATION_STARTED', + 'SET_DOC_MIGRATION_STARTED -> SET_DOC_MIGRATION_STARTED_WAIT_FOR_INSTANCES', + 'SET_DOC_MIGRATION_STARTED_WAIT_FOR_INSTANCES -> CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS', + 'CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS -> CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS_WAIT_FOR_TASK', + 'CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS_WAIT_FOR_TASK -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT', + 'OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_READ', + 'OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_TRANSFORM', + 'OUTDATED_DOCUMENTS_SEARCH_TRANSFORM -> OUTDATED_DOCUMENTS_SEARCH_BULK_INDEX', + 'OUTDATED_DOCUMENTS_SEARCH_BULK_INDEX -> OUTDATED_DOCUMENTS_SEARCH_READ', + 'OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT', + 'OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT -> OUTDATED_DOCUMENTS_SEARCH_REFRESH', + 'OUTDATED_DOCUMENTS_SEARCH_REFRESH -> UPDATE_DOCUMENT_MODEL_VERSIONS', + 'UPDATE_DOCUMENT_MODEL_VERSIONS -> UPDATE_DOCUMENT_MODEL_VERSIONS_WAIT_FOR_INSTANCES', + 'UPDATE_DOCUMENT_MODEL_VERSIONS_WAIT_FOR_INSTANCES -> DONE', + 'Migration completed', + ], + { ordered: true } + ); + }); +} diff --git a/src/core/server/integration_tests/saved_objects/migrations/test_types.ts b/src/core/server/integration_tests/saved_objects/migrations/test_types.ts new file mode 100644 index 000000000000..ed6990dd2b68 --- /dev/null +++ b/src/core/server/integration_tests/saved_objects/migrations/test_types.ts @@ -0,0 +1,13 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +export interface EsServer { + stop: () => Promise; +} + +export type EsRunner = () => Promise; diff --git a/src/core/server/integration_tests/saved_objects/migrations/zdt_1/basic_document_migration.test.ts b/src/core/server/integration_tests/saved_objects/migrations/zdt_1/basic_document_migration.test.ts index f6234c770eaf..e38207a4ffeb 100644 --- a/src/core/server/integration_tests/saved_objects/migrations/zdt_1/basic_document_migration.test.ts +++ b/src/core/server/integration_tests/saved_objects/migrations/zdt_1/basic_document_migration.test.ts @@ -7,238 +7,13 @@ */ import Path from 'path'; -import fs from 'fs/promises'; -import { range, sortBy } from 'lodash'; -import { type TestElasticsearchUtils } from '@kbn/core-test-helpers-kbn-server'; -import { SavedObjectsBulkCreateObject } from '@kbn/core-saved-objects-api-server'; import '../jest_matchers'; -import { getKibanaMigratorTestKit, startElasticsearch } from '../kibana_migrator_test_kit'; -import { delay, parseLogFile } from '../test_utils'; -import { - getBaseMigratorParams, - getSampleAType, - getSampleBType, -} from '../fixtures/zdt_base.fixtures'; - -export const logFilePath = Path.join(__dirname, 'basic_document_migration.test.log'); +import { startElasticsearch } from '../kibana_migrator_test_kit'; +import { createBasicDocumentsMigrationTest } from '../shared_suites/zdt/basic_document_migration'; describe('ZDT upgrades - basic document migration', () => { - let esServer: TestElasticsearchUtils['es']; - - beforeAll(async () => { - await fs.unlink(logFilePath).catch(() => {}); - esServer = await startElasticsearch(); - }); - - afterAll(async () => { - await esServer?.stop(); - await delay(10); - }); - - const createBaseline = async () => { - const { runMigrations, savedObjectsRepository } = await getKibanaMigratorTestKit({ - ...getBaseMigratorParams(), - types: [getSampleAType(), getSampleBType()], - }); - await runMigrations(); - - const sampleAObjs = range(5).map((number) => ({ - id: `a-${number}`, - type: 'sample_a', - attributes: { - keyword: `a_${number}`, - boolean: true, - }, - })); - - await savedObjectsRepository.bulkCreate(sampleAObjs); - - const sampleBObjs = range(5).map((number) => ({ - id: `b-${number}`, - type: 'sample_b', - attributes: { - text: `i am number ${number}`, - text2: `some static text`, - }, - })); - - await savedObjectsRepository.bulkCreate(sampleBObjs); - }; - - it('migrates the documents', async () => { - await createBaseline(); - - const typeA = getSampleAType(); - const typeB = getSampleBType(); - - // typeA -> we add a new field and bump the model version by one with a migration - - typeA.mappings.properties = { - ...typeA.mappings.properties, - someAddedField: { type: 'keyword' }, - }; - - typeA.modelVersions = { - ...typeA.modelVersions, - '2': { - changes: [ - { - type: 'data_backfill', - backfillFn: (doc) => { - return { - attributes: { - someAddedField: `${doc.attributes.keyword}-mig`, - }, - }; - }, - }, - { - type: 'mappings_addition', - addedMappings: { - someAddedField: { type: 'keyword' }, - }, - }, - ], - }, - }; - - // typeB -> we add two new model version with migrations - - typeB.modelVersions = { - ...typeB.modelVersions, - '2': { - changes: [ - { - type: 'data_backfill', - backfillFn: (doc) => { - return { - attributes: { - text2: `${doc.attributes.text2} - mig2`, - }, - }; - }, - }, - ], - }, - '3': { - changes: [ - { - type: 'data_backfill', - backfillFn: (doc) => { - return { - attributes: { - text2: `${doc.attributes.text2} - mig3`, - }, - }; - }, - }, - ], - }, - }; - - const { runMigrations, client, savedObjectsRepository } = await getKibanaMigratorTestKit({ - ...getBaseMigratorParams(), - logFilePath, - types: [typeA, typeB], - }); - - await runMigrations(); - - const indices = await client.indices.get({ index: '.kibana*' }); - expect(Object.keys(indices)).toEqual(['.kibana_1']); - - const index = indices['.kibana_1']; - const mappings = index.mappings ?? {}; - const mappingMeta = mappings._meta ?? {}; - - expect(mappings.properties).toEqual( - expect.objectContaining({ - sample_a: typeA.mappings, - sample_b: typeB.mappings, - }) - ); - - expect(mappingMeta.docVersions).toEqual({ - sample_a: '10.2.0', - sample_b: '10.3.0', - }); - - const { saved_objects: sampleADocs } = await savedObjectsRepository.find({ type: 'sample_a' }); - const { saved_objects: sampleBDocs } = await savedObjectsRepository.find({ type: 'sample_b' }); - - expect(sampleADocs).toHaveLength(5); - expect(sampleBDocs).toHaveLength(5); - - const sampleAData = sortBy(sampleADocs, 'id').map((object) => ({ - id: object.id, - type: object.type, - attributes: object.attributes, - })); - - expect(sampleAData).toEqual([ - { - id: 'a-0', - type: 'sample_a', - attributes: { boolean: true, keyword: 'a_0', someAddedField: 'a_0-mig' }, - }, - { - id: 'a-1', - type: 'sample_a', - attributes: { boolean: true, keyword: 'a_1', someAddedField: 'a_1-mig' }, - }, - { - id: 'a-2', - type: 'sample_a', - attributes: { boolean: true, keyword: 'a_2', someAddedField: 'a_2-mig' }, - }, - { - id: 'a-3', - type: 'sample_a', - attributes: { boolean: true, keyword: 'a_3', someAddedField: 'a_3-mig' }, - }, - { - id: 'a-4', - type: 'sample_a', - attributes: { boolean: true, keyword: 'a_4', someAddedField: 'a_4-mig' }, - }, - ]); - - const sampleBData = sortBy(sampleBDocs, 'id').map((object) => ({ - id: object.id, - type: object.type, - attributes: object.attributes, - })); - - expect(sampleBData).toEqual([ - { - id: 'b-0', - type: 'sample_b', - attributes: { text: 'i am number 0', text2: 'some static text - mig2 - mig3' }, - }, - { - id: 'b-1', - type: 'sample_b', - attributes: { text: 'i am number 1', text2: 'some static text - mig2 - mig3' }, - }, - { - id: 'b-2', - type: 'sample_b', - attributes: { text: 'i am number 2', text2: 'some static text - mig2 - mig3' }, - }, - { - id: 'b-3', - type: 'sample_b', - attributes: { text: 'i am number 3', text2: 'some static text - mig2 - mig3' }, - }, - { - id: 'b-4', - type: 'sample_b', - attributes: { text: 'i am number 4', text2: 'some static text - mig2 - mig3' }, - }, - ]); - - const records = await parseLogFile(logFilePath); - expect(records).toContainLogEntry('Starting to process 10 documents'); - expect(records).toContainLogEntry('Migration completed'); + createBasicDocumentsMigrationTest({ + startES: startElasticsearch, + logFilePath: Path.join(__dirname, 'basic_document_migration.test.log'), }); }); diff --git a/src/core/server/integration_tests/saved_objects/migrations/zdt_1/standard_workflow.test.ts b/src/core/server/integration_tests/saved_objects/migrations/zdt_1/standard_workflow.test.ts index f8b0cfe78e2e..8fb678e49dcc 100644 --- a/src/core/server/integration_tests/saved_objects/migrations/zdt_1/standard_workflow.test.ts +++ b/src/core/server/integration_tests/saved_objects/migrations/zdt_1/standard_workflow.test.ts @@ -7,133 +7,13 @@ */ import Path from 'path'; -import fs from 'fs/promises'; -import { range } from 'lodash'; -import { type TestElasticsearchUtils } from '@kbn/core-test-helpers-kbn-server'; -import { SavedObjectsBulkCreateObject } from '@kbn/core-saved-objects-api-server'; import '../jest_matchers'; -import { getKibanaMigratorTestKit, startElasticsearch } from '../kibana_migrator_test_kit'; -import { delay, parseLogFile } from '../test_utils'; -import { - getBaseMigratorParams, - getSampleAType, - getSampleBType, - dummyModelVersion, -} from '../fixtures/zdt_base.fixtures'; +import { startElasticsearch } from '../kibana_migrator_test_kit'; +import { createStandardWorkflowTest } from '../shared_suites/zdt/standard_workflow'; -export const logFilePath = Path.join(__dirname, 'standard_workflow.test.log'); - -describe('ZDT upgrades - basic document migration', () => { - let esServer: TestElasticsearchUtils['es']; - - beforeAll(async () => { - await fs.unlink(logFilePath).catch(() => {}); - esServer = await startElasticsearch(); - }); - - afterAll(async () => { - await esServer?.stop(); - await delay(10); - }); - - const createBaseline = async () => { - const { runMigrations, savedObjectsRepository } = await getKibanaMigratorTestKit({ - ...getBaseMigratorParams(), - types: [getSampleAType(), getSampleBType()], - }); - await runMigrations(); - - const sampleAObjs = range(5).map((number) => ({ - id: `a-${number}`, - type: 'sample_a', - attributes: { keyword: `a_${number}`, boolean: true }, - })); - - await savedObjectsRepository.bulkCreate(sampleAObjs); - - const sampleBObjs = range(5).map((number) => ({ - id: `b-${number}`, - type: 'sample_b', - attributes: { text: `i am number ${number}`, text2: `some static text` }, - })); - - await savedObjectsRepository.bulkCreate(sampleBObjs); - }; - - it('follows the expected stages and transitions', async () => { - await createBaseline(); - - const typeA = getSampleAType(); - const typeB = getSampleBType(); - - typeA.modelVersions = { - ...typeA.modelVersions, - '2': dummyModelVersion, - }; - - typeB.modelVersions = { - ...typeB.modelVersions, - '2': dummyModelVersion, - }; - - const { runMigrations } = await getKibanaMigratorTestKit({ - ...getBaseMigratorParams(), - logFilePath, - types: [typeA, typeB], - }); - - await runMigrations(); - - const records = await parseLogFile(logFilePath); - - expect(records).toContainLogEntry('INIT -> UPDATE_INDEX_MAPPINGS'); - expect(records).toContainLogEntry( - 'UPDATE_INDEX_MAPPINGS -> UPDATE_INDEX_MAPPINGS_WAIT_FOR_TASK' - ); - expect(records).toContainLogEntry( - 'UPDATE_INDEX_MAPPINGS_WAIT_FOR_TASK -> UPDATE_MAPPING_MODEL_VERSIONS' - ); - expect(records).toContainLogEntry('UPDATE_MAPPING_MODEL_VERSIONS -> INDEX_STATE_UPDATE_DONE'); - expect(records).toContainLogEntry('INDEX_STATE_UPDATE_DONE -> DOCUMENTS_UPDATE_INIT'); - expect(records).toContainLogEntry('DOCUMENTS_UPDATE_INIT -> SET_DOC_MIGRATION_STARTED'); - expect(records).toContainLogEntry( - 'SET_DOC_MIGRATION_STARTED -> SET_DOC_MIGRATION_STARTED_WAIT_FOR_INSTANCES' - ); - expect(records).toContainLogEntry( - 'SET_DOC_MIGRATION_STARTED_WAIT_FOR_INSTANCES -> CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS' - ); - expect(records).toContainLogEntry( - 'CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS -> CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS_WAIT_FOR_TASK' - ); - expect(records).toContainLogEntry( - 'CLEANUP_UNKNOWN_AND_EXCLUDED_DOCS_WAIT_FOR_TASK -> OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT' - ); - expect(records).toContainLogEntry( - 'OUTDATED_DOCUMENTS_SEARCH_OPEN_PIT -> OUTDATED_DOCUMENTS_SEARCH_READ' - ); - expect(records).toContainLogEntry( - 'OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_TRANSFORM' - ); - expect(records).toContainLogEntry( - 'OUTDATED_DOCUMENTS_SEARCH_TRANSFORM -> OUTDATED_DOCUMENTS_SEARCH_BULK_INDEX' - ); - expect(records).toContainLogEntry( - 'OUTDATED_DOCUMENTS_SEARCH_BULK_INDEX -> OUTDATED_DOCUMENTS_SEARCH_READ' - ); - expect(records).toContainLogEntry( - 'OUTDATED_DOCUMENTS_SEARCH_READ -> OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT' - ); - expect(records).toContainLogEntry( - 'OUTDATED_DOCUMENTS_SEARCH_CLOSE_PIT -> OUTDATED_DOCUMENTS_SEARCH_REFRESH' - ); - expect(records).toContainLogEntry( - 'OUTDATED_DOCUMENTS_SEARCH_REFRESH -> UPDATE_DOCUMENT_MODEL_VERSIONS' - ); - expect(records).toContainLogEntry( - 'UPDATE_DOCUMENT_MODEL_VERSIONS -> UPDATE_DOCUMENT_MODEL_VERSIONS_WAIT_FOR_INSTANCES' - ); - expect(records).toContainLogEntry('UPDATE_DOCUMENT_MODEL_VERSIONS_WAIT_FOR_INSTANCES -> DONE'); - - expect(records).toContainLogEntry('Migration completed'); +describe('ZDT upgrades - standard workflow', () => { + createStandardWorkflowTest({ + startES: startElasticsearch, + logFilePath: Path.join(__dirname, 'standard_workflow.test.log'), }); }); diff --git a/src/core/server/integration_tests/saved_objects/serverless/migrations/actions.test.ts b/src/core/server/integration_tests/saved_objects/serverless/migrations/actions.test.ts new file mode 100644 index 000000000000..bbd4984eae0b --- /dev/null +++ b/src/core/server/integration_tests/saved_objects/serverless/migrations/actions.test.ts @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import { createTestServerlessInstances } from '@kbn/core-test-helpers-kbn-server'; +import { runActionTestSuite } from '../../migrations/group3/actions/actions_test_suite'; + +const { startES } = createTestServerlessInstances({ + adjustTimeout: jest.setTimeout, +}); + +describe('Migration actions - serverless environment', () => { + runActionTestSuite({ + startEs: async () => { + const serverlessEs = await startES(); + const client = serverlessEs.getClient(); + return { + esServer: serverlessEs, + client, + }; + }, + environment: 'serverless', + }); +}); diff --git a/src/core/server/integration_tests/saved_objects/serverless/migrations/basic_document_migration.test.ts b/src/core/server/integration_tests/saved_objects/serverless/migrations/basic_document_migration.test.ts new file mode 100644 index 000000000000..cfc30c5bd226 --- /dev/null +++ b/src/core/server/integration_tests/saved_objects/serverless/migrations/basic_document_migration.test.ts @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import Path from 'path'; +import { createTestServerlessInstances } from '@kbn/core-test-helpers-kbn-server'; +import { createBasicDocumentsMigrationTest } from '../../migrations/shared_suites/zdt/basic_document_migration'; + +describe('serverless - ZDT upgrades - basic document migration', () => { + const startElasticsearch = async () => { + const { startES } = createTestServerlessInstances({ + adjustTimeout: jest.setTimeout, + }); + return await startES(); + }; + + createBasicDocumentsMigrationTest({ + startES: startElasticsearch, + logFilePath: Path.join(__dirname, 'basic_document_migration.test.log'), + }); +}); diff --git a/src/core/server/integration_tests/saved_objects/serverless/migrations/smoke.test.ts b/src/core/server/integration_tests/saved_objects/serverless/migrations/smoke.test.ts index 1d884706fc8b..01be93e7a296 100644 --- a/src/core/server/integration_tests/saved_objects/serverless/migrations/smoke.test.ts +++ b/src/core/server/integration_tests/saved_objects/serverless/migrations/smoke.test.ts @@ -13,10 +13,11 @@ import { createTestServerlessInstances, } from '@kbn/core-test-helpers-kbn-server'; -describe('smoke', () => { +describe('Basic smoke test', () => { let serverlessES: TestServerlessESUtils; let serverlessKibana: TestServerlessKibanaUtils; let root: TestServerlessKibanaUtils['root']; + beforeEach(async () => { const { startES, startKibana } = createTestServerlessInstances({ adjustTimeout: jest.setTimeout, @@ -25,11 +26,13 @@ describe('smoke', () => { serverlessKibana = await startKibana(); root = serverlessKibana.root; }); + afterEach(async () => { await serverlessES?.stop(); await serverlessKibana?.stop(); }); - test('it can start Kibana and ES serverless', async () => { + + test('it can start Kibana running against serverless ES', async () => { const { body } = await request.get(root, '/api/status').expect(200); expect(body).toMatchObject({ status: { overall: { level: 'available' } } }); }); diff --git a/src/core/server/integration_tests/saved_objects/serverless/migrations/standard_workflow.test.ts b/src/core/server/integration_tests/saved_objects/serverless/migrations/standard_workflow.test.ts new file mode 100644 index 000000000000..77368869e6e5 --- /dev/null +++ b/src/core/server/integration_tests/saved_objects/serverless/migrations/standard_workflow.test.ts @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import Path from 'path'; +import { createTestServerlessInstances } from '@kbn/core-test-helpers-kbn-server'; +import { createStandardWorkflowTest } from '../../migrations/shared_suites/zdt/standard_workflow'; + +describe('serverless - ZDT upgrades - standard workflow', () => { + const startElasticsearch = async () => { + const { startES } = createTestServerlessInstances({ + adjustTimeout: jest.setTimeout, + }); + return await startES(); + }; + + createStandardWorkflowTest({ + startES: startElasticsearch, + logFilePath: Path.join(__dirname, 'standard_workflow.test.log'), + }); +});