From 7a00df04cdfb221293cf4c93e0fea1853f68deb4 Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Tue, 16 Apr 2024 15:47:16 -0500 Subject: [PATCH 1/8] feature: manage OCI instance pools --- src/cloud_instance_manager_selector.ts | 12 ++ src/oracle_instance_pool_manager.ts | 182 +++++++++++++++++++++++ src/test/oracle_instance_pool_manager.ts | 51 +++++++ 3 files changed, 245 insertions(+) create mode 100644 src/oracle_instance_pool_manager.ts create mode 100644 src/test/oracle_instance_pool_manager.ts diff --git a/src/cloud_instance_manager_selector.ts b/src/cloud_instance_manager_selector.ts index c9b7c73..086ffa0 100644 --- a/src/cloud_instance_manager_selector.ts +++ b/src/cloud_instance_manager_selector.ts @@ -3,6 +3,7 @@ import CustomInstanceManager from './custom_instance_manager'; import NomadInstanceManager from './nomad_instance_manager'; import DigitalOceanInstanceManager from './digital_ocean_instance_manager'; import { CloudInstanceManager } from './cloud_instance_manager'; +import OracleInstancePoolManager from './oracle_instance_pool_manager'; export interface CloudInstanceManagerSelectorOptions { cloudProviders: string[]; @@ -19,6 +20,7 @@ export interface CloudInstanceManagerSelectorOptions { export class CloudInstanceManagerSelector { private oracleInstanceManager: OracleInstanceManager; + private oracleInstancePoolManager: OracleInstancePoolManager; private digitalOceanInstanceManager: DigitalOceanInstanceManager; private customInstanceManager: CustomInstanceManager; private nomadInstanceManager: NomadInstanceManager; @@ -32,6 +34,14 @@ export class CloudInstanceManagerSelector { }); } + if (options.cloudProviders.includes('oraclepool')) { + this.oracleInstancePoolManager = new OracleInstancePoolManager({ + isDryRun: options.isDryRun, + ociConfigurationFilePath: options.ociConfigurationFilePath, + ociConfigurationProfile: options.ociConfigurationProfile, + }); + } + if (options.cloudProviders.includes('custom')) { this.customInstanceManager = new CustomInstanceManager({ isDryRun: options.isDryRun, @@ -57,6 +67,8 @@ export class CloudInstanceManagerSelector { switch (cloud) { case 'oracle': return this.oracleInstanceManager; + case 'oraclepool': + return this.oracleInstancePoolManager; case 'digitalocean': return this.digitalOceanInstanceManager; case 'nomad': diff --git a/src/oracle_instance_pool_manager.ts b/src/oracle_instance_pool_manager.ts new file mode 100644 index 0000000..fe70dcd --- /dev/null +++ b/src/oracle_instance_pool_manager.ts @@ -0,0 +1,182 @@ +import core = require('oci-core'); +import common = require('oci-common'); +import { InstanceGroup } from './instance_group'; +import { Context } from './context'; +import { CloudRetryStrategy } from './cloud_manager'; +import { CloudInstanceManager, CloudInstance } from './cloud_instance_manager'; +import { workrequests } from 'oci-sdk'; + +const maxTimeInSeconds = 60 * 60; // The duration for waiter configuration before failing. Currently set to 1 hour. +const maxDelayInSeconds = 30; // The max delay for the waiter configuration. Currently set to 30 seconds + +const waiterConfiguration: common.WaiterConfiguration = { + terminationStrategy: new common.MaxTimeTerminationStrategy(maxTimeInSeconds), + delayStrategy: new common.ExponentialBackoffDelayStrategy(maxDelayInSeconds), +}; + +export interface OracleInstancePoolManagerOptions { + isDryRun: boolean; + ociConfigurationFilePath: string; + ociConfigurationProfile: string; +} + +export default class OracleInstancePoolManager implements CloudInstanceManager { + private isDryRun: boolean; + private provider: common.ConfigFileAuthenticationDetailsProvider; + private computeManagementClient: core.ComputeManagementClient; + private workRequestClient: workrequests.WorkRequestClient; + + constructor(options: OracleInstancePoolManagerOptions) { + this.isDryRun = options.isDryRun; + this.provider = new common.ConfigFileAuthenticationDetailsProvider( + options.ociConfigurationFilePath, + options.ociConfigurationProfile, + ); + this.computeManagementClient = new core.ComputeManagementClient({ + authenticationDetailsProvider: this.provider, + }); + this.workRequestClient = new workrequests.WorkRequestClient({ + authenticationDetailsProvider: this.provider, + }); + + this.launchInstances = this.launchInstances.bind(this); + } + + async detachInstance(ctx: Context, group: InstanceGroup, instance: string): Promise { + ctx.logger.info(`[oraclepool] Detaching instance ${instance}`); + this.computeManagementClient.regionId = group.region; + + const cwaiter = this.computeManagementClient.createWaiters(this.workRequestClient, waiterConfiguration); + const response = await cwaiter.forDetachInstancePoolInstance({ + instancePoolId: group.instanceConfigurationId, + detachInstancePoolInstanceDetails: { instanceId: instance }, + }); + ctx.logger.info(`[oraclepool] Finished detaching instance ${instance}`, { response }); + } + + async launchInstances( + ctx: Context, + group: InstanceGroup, + groupCurrentCount: number, + quantity: number, + ): Promise> { + ctx.logger.info(`[oraclepool] Launching a batch of ${quantity} instances in group ${group.name}`); + + this.computeManagementClient.regionId = group.region; + const poolDetails = await this.computeManagementClient.getInstancePool({ + instancePoolId: group.instanceConfigurationId, + }); + + const poolInstances = await this.computeManagementClient.listInstancePoolInstances({ + compartmentId: group.compartmentId, + instancePoolId: group.instanceConfigurationId, + }); + + const existingInstanceIds = poolInstances.items.map((instance) => { + return instance.id; + }); + + ctx.logger.debug(`[oraclepool] Instance pool ${group.name} instances`, { instances: poolInstances.items }); + + const newSize = quantity + groupCurrentCount; + if (groupCurrentCount == poolDetails.instancePool.size) { + ctx.logger.debug(`[oraclepool] Instance pool ${group.name} size matches current count`, { + current: groupCurrentCount, + size: poolDetails.instancePool.size, + newSize, + }); + } else { + ctx.logger.error(`[oraclepool] Instance pool ${group.name} size DOES NOT matches current count`, { + current: groupCurrentCount, + size: poolDetails.instancePool.size, + newSize, + }); + } + const updateResult = await this.computeManagementClient.updateInstancePool({ + instancePoolId: group.instanceConfigurationId, + updateInstancePoolDetails: { + size: newSize, + }, + }); + + ctx.logger.info(`[oraclepool] Updated instance pool size for group ${group.name}`, { updateResult }); + + this.workRequestClient.regionId = group.region; + const cwaiter = this.computeManagementClient.createWaiters(this.workRequestClient, waiterConfiguration); + const runningPool = await cwaiter.forInstancePool( + { + instancePoolId: group.instanceConfigurationId, + }, + core.models.InstancePool.LifecycleState.Running, + ); + + ctx.logger.info(`[oraclepool] Instance pool for ${group.name} back in running state`, { runningPool }); + + if (runningPool.instancePool.size == newSize) { + ctx.logger.debug(`[oraclepool] Instance pool ${group.name} size matches new size`, { + newSize, + }); + } else { + ctx.logger.error(`[oraclepool] Instance pool ${group.name} size DOES NOT matches new size`, { + newSize, + }); + } + + const newPoolInstances = await this.computeManagementClient.listInstancePoolInstances({ + compartmentId: group.compartmentId, + instancePoolId: group.instanceConfigurationId, + }); + + const result = newPoolInstances.items + .map((instance) => { + return instance.id; + }) + .filter((instanceId) => { + return !existingInstanceIds.includes(instanceId); + }); + + ctx.logger.info(`[oraclepool] Finished launching all the instances in group ${group.name}`, { result }); + + return result; + } + + async getInstances( + ctx: Context, + group: InstanceGroup, + cloudRetryStrategy: CloudRetryStrategy, + ): Promise> { + const computeManagementClient = new core.ComputeManagementClient( + { + authenticationDetailsProvider: this.provider, + }, + { + retryConfiguration: { + terminationStrategy: new common.MaxTimeTerminationStrategy(cloudRetryStrategy.maxTimeInSeconds), + delayStrategy: new common.ExponentialBackoffDelayStrategy(cloudRetryStrategy.maxDelayInSeconds), + retryCondition: (response) => { + return ( + cloudRetryStrategy.retryableStatusCodes.filter((retryableStatusCode) => { + return response.statusCode === retryableStatusCode; + }).length > 0 + ); + }, + }, + }, + ); + computeManagementClient.regionId = group.region; + + const poolInstances = await computeManagementClient.listInstancePoolInstances({ + compartmentId: group.compartmentId, + instancePoolId: group.instanceConfigurationId, + }); + + return poolInstances.items.map((instance) => { + ctx.logger.debug('Found instance in oracle pool', { instance }); + return { + instanceId: instance.id, + displayName: instance.displayName, + cloudStatus: instance.state, + }; + }); + } +} diff --git a/src/test/oracle_instance_pool_manager.ts b/src/test/oracle_instance_pool_manager.ts new file mode 100644 index 0000000..94fd129 --- /dev/null +++ b/src/test/oracle_instance_pool_manager.ts @@ -0,0 +1,51 @@ +/* eslint-disable @typescript-eslint/ban-ts-comment */ +// @ts-nocheck + +import assert from 'node:assert'; +import test, { afterEach, describe, mock } from 'node:test'; + +import OracleInstancePoolManager from '../oracle_instance_pool_manager'; + +function log(level, message, data) { + console.log(`${Date.now()} ${level}: ${message}`); + console.log(data); +} + +describe('InstancePoolManager', () => { + const manager = new OracleInstancePoolManager({ + isDryRun: true, + ociConfigurationFilePath: process.env.OCI_CONFIGURATION_FILE_PATH, + ociConfigurationProfile: process.env.OCI_CONFIGURATION_PROFILE, + }); + const context = { + logger: { + debug: mock.fn(log.bind('debug')), + info: mock.fn(log.bind('info')), + error: mock.fn(log.bind('error')), + }, + }; + + afterEach(() => { + mock.restoreAll(); + }); + + describe('getInstances', () => { + // This is a test for the getInstances method + test('will call the correct endpoint', async () => { + console.log('Starting getInstances test'); + const instances = await manager.getInstances( + context, + { + name: 'group', + region: process.env.REGION, + compartmentId: process.env.COMPARTMENT_OCID, + instanceConfigurationId: process.env.INSTANCE_POOL_ID, + }, + { maxAttempts: 1, maxTimeInSeconds: 60, maxDelayInSeconds: 30, retryableStatusCodes: [404, 429] }, + ); + console.log('ended getInstances test'); + assert.ok(instances); + console.log(instances); + }); + }); +}); From 53a6745579eb9225b3308d724e41d5d36f9999ad Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Wed, 17 Apr 2024 15:37:59 -0500 Subject: [PATCH 2/8] instance pool tests --- src/oracle_instance_pool_manager.ts | 72 ++++++----- src/test/oracle_instance_pool_manager.ts | 145 ++++++++++++++++++++--- 2 files changed, 168 insertions(+), 49 deletions(-) diff --git a/src/oracle_instance_pool_manager.ts b/src/oracle_instance_pool_manager.ts index fe70dcd..97dbed2 100644 --- a/src/oracle_instance_pool_manager.ts +++ b/src/oracle_instance_pool_manager.ts @@ -42,6 +42,14 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { this.launchInstances = this.launchInstances.bind(this); } + setComputeManagementClient(client: core.ComputeManagementClient) { + this.computeManagementClient = client; + } + + getComputeManagementClient() { + return this.computeManagementClient; + } + async detachInstance(ctx: Context, group: InstanceGroup, instance: string): Promise { ctx.logger.info(`[oraclepool] Detaching instance ${instance}`); this.computeManagementClient.regionId = group.region; @@ -67,6 +75,8 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { instancePoolId: group.instanceConfigurationId, }); + ctx.logger.debug(`[oraclepool] Instance Pool Details for group ${group.name}`, { poolDetails }); + const poolInstances = await this.computeManagementClient.listInstancePoolInstances({ compartmentId: group.compartmentId, instancePoolId: group.instanceConfigurationId, @@ -92,14 +102,19 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { newSize, }); } - const updateResult = await this.computeManagementClient.updateInstancePool({ - instancePoolId: group.instanceConfigurationId, - updateInstancePoolDetails: { - size: newSize, - }, - }); - ctx.logger.info(`[oraclepool] Updated instance pool size for group ${group.name}`, { updateResult }); + if (this.isDryRun) { + ctx.logger.info(`[oracle] Dry run enabled, instance pool size change skipped`, { newSize }); + } else { + const updateResult = await this.computeManagementClient.updateInstancePool({ + instancePoolId: group.instanceConfigurationId, + updateInstancePoolDetails: { + size: newSize, + }, + }); + + ctx.logger.info(`[oraclepool] Updated instance pool size for group ${group.name}`, { updateResult }); + } this.workRequestClient.regionId = group.region; const cwaiter = this.computeManagementClient.createWaiters(this.workRequestClient, waiterConfiguration); @@ -140,29 +155,26 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { return result; } - async getInstances( - ctx: Context, - group: InstanceGroup, - cloudRetryStrategy: CloudRetryStrategy, - ): Promise> { - const computeManagementClient = new core.ComputeManagementClient( - { - authenticationDetailsProvider: this.provider, - }, - { - retryConfiguration: { - terminationStrategy: new common.MaxTimeTerminationStrategy(cloudRetryStrategy.maxTimeInSeconds), - delayStrategy: new common.ExponentialBackoffDelayStrategy(cloudRetryStrategy.maxDelayInSeconds), - retryCondition: (response) => { - return ( - cloudRetryStrategy.retryableStatusCodes.filter((retryableStatusCode) => { - return response.statusCode === retryableStatusCode; - }).length > 0 - ); - }, - }, - }, - ); + async getInstances(ctx: Context, group: InstanceGroup, _: CloudRetryStrategy): Promise> { + // const computeManagementClient = new core.ComputeManagementClient( + // { + // authenticationDetailsProvider: this.provider, + // }, + // { + // retryConfiguration: { + // terminationStrategy: new common.MaxTimeTerminationStrategy(cloudRetryStrategy.maxTimeInSeconds), + // delayStrategy: new common.ExponentialBackoffDelayStrategy(cloudRetryStrategy.maxDelayInSeconds), + // retryCondition: (response) => { + // return ( + // cloudRetryStrategy.retryableStatusCodes.filter((retryableStatusCode) => { + // return response.statusCode === retryableStatusCode; + // }).length > 0 + // ); + // }, + // }, + // }, + // ); + const computeManagementClient = this.computeManagementClient; computeManagementClient.regionId = group.region; const poolInstances = await computeManagementClient.listInstancePoolInstances({ diff --git a/src/test/oracle_instance_pool_manager.ts b/src/test/oracle_instance_pool_manager.ts index 94fd129..87533fa 100644 --- a/src/test/oracle_instance_pool_manager.ts +++ b/src/test/oracle_instance_pool_manager.ts @@ -1,5 +1,6 @@ /* eslint-disable @typescript-eslint/ban-ts-comment */ // @ts-nocheck +import core = require('oci-core'); import assert from 'node:assert'; import test, { afterEach, describe, mock } from 'node:test'; @@ -7,45 +8,151 @@ import test, { afterEach, describe, mock } from 'node:test'; import OracleInstancePoolManager from '../oracle_instance_pool_manager'; function log(level, message, data) { - console.log(`${Date.now()} ${level}: ${message}`); - console.log(data); + console.log(`${new Date().toISOString()} ${level}: ${message}`); + if (data) { + console.log(JSON.stringify(data, null, 2)); + } } +const group = { + name: 'group', + provider: 'oraclepool', + region: 'testregion', + compartmentId: 'testcpt', + instanceConfigurationId: 'testpoolid', +}; + describe('InstancePoolManager', () => { const manager = new OracleInstancePoolManager({ - isDryRun: true, + isDryRun: false, ociConfigurationFilePath: process.env.OCI_CONFIGURATION_FILE_PATH, ociConfigurationProfile: process.env.OCI_CONFIGURATION_PROFILE, }); + + const mockWaiters = { + forInstancePool: mock.fn((request, _) => { + return { + instancePool: { + id: request.instancePoolId, + name: group.name, + compartmentId: group.compartmentId, + instanceConfigurationId: 'testid', + size: 2, + }, + }; + }), + forDetachInstancePoolInstance: mock.fn((request) => { + return { + instancePoolInstance: request.detachInstancePoolInstanceDetails, + workRequest: { id: 'testworkrequestid' }, + }; + }), + }; + + const mockComputeManagementClient = { + createWaiters: mock.fn(() => { + return mockWaiters; + }), + getInstancePool: mock.fn((request) => { + return { + instancePool: { + id: request.instancePoolId, + name: group.name, + compartmentId: group.compartmentId, + instanceConfigurationId: 'testid', + size: 2, + }, + }; + }), + listInstancePoolInstances: mock.fn(() => { + return { items: [{ id: 'testinstanceid-1' }, { id: 'testinstanceid-2' }] }; + }), + updateInstancePool: mock.fn((request) => { + return { + id: request.instancePoolId, + name: group.name, + compartmentId: group.compartmentId, + instanceConfigurationId: 'testid', + size: request.size, + }; + }), + }; + + manager.setComputeManagementClient(mockComputeManagementClient); + const context = { logger: { - debug: mock.fn(log.bind('debug')), - info: mock.fn(log.bind('info')), - error: mock.fn(log.bind('error')), + debug: mock.fn((message, data) => { + log('DEBUG', message, data); + }), + info: mock.fn((message, data) => { + log('INFO', message, data); + }), + warn: mock.fn((message, data) => { + log('WARN', message, data); + }), + error: mock.fn((message, data) => { + log('ERROR', message, data); + }), }, }; + if (!process.env.OCI_CONFIGURATION_FILE_PATH || !process.env.OCI_CONFIGURATION_PROFILE) { + console.error('Please set OCI_CONFIGURATION_FILE_PATH and OCI_CONFIGURATION_PROFILE env variables'); + process.exit(1); + } + + if (!process.env.COMPARTMENT_OCID || !process.env.INSTANCE_POOL_ID || !process.env.REGION) { + console.error('Please set COMPARTMENT_OCID, INSTANCE_POOL_ID and REGION env variables'); + process.exit(1); + } + afterEach(() => { mock.restoreAll(); }); describe('getInstances', () => { // This is a test for the getInstances method - test('will call the correct endpoint', async () => { + test('will list instances in a group', async () => { console.log('Starting getInstances test'); - const instances = await manager.getInstances( - context, - { - name: 'group', - region: process.env.REGION, - compartmentId: process.env.COMPARTMENT_OCID, - instanceConfigurationId: process.env.INSTANCE_POOL_ID, - }, - { maxAttempts: 1, maxTimeInSeconds: 60, maxDelayInSeconds: 30, retryableStatusCodes: [404, 429] }, - ); - console.log('ended getInstances test'); + const instances = await manager.getInstances(context, group, { + maxAttempts: 1, + maxTimeInSeconds: 60, + maxDelayInSeconds: 30, + retryableStatusCodes: [404, 429], + }); + log('TEST', 'ended getInstances test'); + assert.ok(instances); + log('TEST', 'found instances', instances); + }); + }); + + describe('launchInstances', () => { + // This is a test for the launchInstances method + test('will launch instances in a group', async () => { + console.log('Starting launchInstances test'); + mockWaiters.forInstancePool.mock.mockImplementationOnce((_) => { + return { + instancePool: { + id: group.instanceConfigurationId, + name: group.name, + compartmentId: group.compartmentId, + instanceConfigurationId: 'testid', + size: 3, + }, + }; + }); + mockComputeManagementClient.listInstancePoolInstances.mock.mockImplementationOnce((_) => { + return { items: [{ id: 'testinstanceid-1' }, { id: 'testinstanceid-2' }, { id: 'new-instance-id' }] }; + }, 2); + const instances = await manager.launchInstances(context, group, 2, 1); + console.log(mockComputeManagementClient.updateInstancePool.mock.callCount()); + assert.equal(mockComputeManagementClient.updateInstancePool.mock.callCount(), 1); assert.ok(instances); - console.log(instances); + assert.equal(instances[0], 'new-instance-id'); + assert.equal(instances.length, 1); + log('TEST', 'ended launchInstances test'); + log('TEST', 'launched instances', instances); }); }); }); From 0040e993c2ba01b4b48abfe21628e2ecb4239124 Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Thu, 18 Apr 2024 10:01:21 -0500 Subject: [PATCH 3/8] dummy API key for OCI SDK testing --- src/test/oci_api_key.pem | 28 ++++++++++++++++++++++++ src/test/oracle_instance_pool_manager.ts | 14 ++---------- src/test/test_oracle_config | 6 +++++ 3 files changed, 36 insertions(+), 12 deletions(-) create mode 100644 src/test/oci_api_key.pem create mode 100644 src/test/test_oracle_config diff --git a/src/test/oci_api_key.pem b/src/test/oci_api_key.pem new file mode 100644 index 0000000..56ba78b --- /dev/null +++ b/src/test/oci_api_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC2II2TQtTjNvhF +MgQwsrDcBD4QY56tlxpLfUUM6nNQVl30ABOG1HKKxvMOwMD9hAtffCX2Wzr9h13J +sWwOxxtJlf5vK+9ghJ2u3pD3ZtNxTi0Owmjy6nGne5kvVwphIUp/XyHodIu68udy +JCrUeq/Z2YNbrFS3+coSjshRxgfDV0S+n3HkEjxcCkw9dxDloIF2XrBCNk8A3nU6 +05DVFnChxksZBJub8DBH0YY20iLpBVppiT8mevaYgL6ft0TcJ6i7e2t3mHiYnNsS +Rd0lQdIyMDcNDC1Ccq8IWHMz2ewtd24uzYzkNn0PYsxh7ay3VOeM4AXI2V4pzxr/ +YHxVdVYdAgMBAAECggEAViYf0cCTIscluNVn1gEwV2JKWUZGEKag8YRX3TwpP58a +4ggHNIfZhnuFDT1wDHshRhFbg6Szyqj1XCpoirTz22Lv73ZAzM7uJ4Fv4RbqfXYV +Kh4btovfJvH4C8SITFmK+6TsTSCT1+J+SCeDeu5tsAaLdBmYGU42n4CakNOzaFpa +AAg7yuKnRbImx8h9hQxTHz6wEzoOvAqCZOoCG8OgMdOYTLCmmSPYWvPGuG0ZLHqj +CJjkWmOcNK+cc2D3c9ud8Rr1JSbtgaVTUlzZdAuB1nUWFGnOW0oTfIXr+Hqlp5oI +cyG1SNyhBxqK6KERxZXh1Q7wt6uGNpmiqNXuksOpoQKBgQDu4/ARFnxDGT25VxTP +R11J1mACktTJg9Fl1aEospSym647IGFs2g+J08m/+QAVIrnv+Yme7t0avl+YhPdd +OLlCUby9kojOavXQjw8bREbwVJeCHkpQN69gMsw4GjYG80Pvk0flummEO8IER8xM +WJLgI35/5gGpydOUIHe+5iiqowKBgQDDK9wjZ0PFh49+RZa7258LqaAUZkvFH9Le +xs3McGpjiVU4d2hKwP+fQxvHMp/D963sB4Sjjzfuz2VAgW9qNUa/GUVVTSUuCdJ4 +ckzKl4qXEHmedVTmcnsLIS4VuIlrQPE7Pr/rH1i/t1KtQPGGofLKFFl7Qt2JJrSx +/Y7lqYzIPwKBgQDHEFOUuar0cs0Xa3QgwXQAOk4o9tev0FMmS50Of7kfGFFYXcQx +Y4g9LqlYehJSimxshwDh2mOOjIK2Yl6KZOEjXV8sjTXI5CJQF1XcP8npLGRP/hOo +wq42OD4qpdTNd9rtgUoRKYQRt4YV8QpRuCEz+v/Hbg+Gv3K6ZU/DWKEchwKBgG9U +rZZkaXxb/7Z/66Pte6QjdnUshTxFEvyRWwOrfISa6knzk0JRJX+rwES40/ntZwCk +qTzDjZxkIdXR/8QN7/VCbRJjlznT0IN0b+mA2EWj2DxTocJyu01199vcEEdmHDU3 +bjKpBu8Vm8vIp/2sk9pCEWkGk2iQ87I0eTb4qJ3xAoGAFu6pHept9i7gFU/XQoIE +IAnHyhibRgbDcD7FrFNKuj4AATQ0jEtuBnm50lTdOXXqkdvC/z4hIHoJNn4L9ay/ +3rGPAR63TmW2+FiVfj/KELliqUJB3XutTWoMgme8hp3uVXxchGNVMVYEo0qjMLOS +SsZ+1SC2x16mEkpTvm6fZaA= +-----END PRIVATE KEY----- diff --git a/src/test/oracle_instance_pool_manager.ts b/src/test/oracle_instance_pool_manager.ts index 87533fa..efdcd5b 100644 --- a/src/test/oracle_instance_pool_manager.ts +++ b/src/test/oracle_instance_pool_manager.ts @@ -25,8 +25,8 @@ const group = { describe('InstancePoolManager', () => { const manager = new OracleInstancePoolManager({ isDryRun: false, - ociConfigurationFilePath: process.env.OCI_CONFIGURATION_FILE_PATH, - ociConfigurationProfile: process.env.OCI_CONFIGURATION_PROFILE, + ociConfigurationFilePath: __dirname + '/test_oracle_config', + ociConfigurationProfile: 'TEST', }); const mockWaiters = { @@ -97,16 +97,6 @@ describe('InstancePoolManager', () => { }, }; - if (!process.env.OCI_CONFIGURATION_FILE_PATH || !process.env.OCI_CONFIGURATION_PROFILE) { - console.error('Please set OCI_CONFIGURATION_FILE_PATH and OCI_CONFIGURATION_PROFILE env variables'); - process.exit(1); - } - - if (!process.env.COMPARTMENT_OCID || !process.env.INSTANCE_POOL_ID || !process.env.REGION) { - console.error('Please set COMPARTMENT_OCID, INSTANCE_POOL_ID and REGION env variables'); - process.exit(1); - } - afterEach(() => { mock.restoreAll(); }); diff --git a/src/test/test_oracle_config b/src/test/test_oracle_config new file mode 100644 index 0000000..985e1d4 --- /dev/null +++ b/src/test/test_oracle_config @@ -0,0 +1,6 @@ +[TEST] +user=ocid1.user.oc1..aaaaaaaabbbbbbbbbbbbccccccccddddddddeeeeeeeeffffff +fingerprint=4d:c5:f3:d4:91:47:40:37:e0:ce:a1:8d:2d:74:31:42 +key_file=src/test/oci_api_key.pem +tenancy=ocid1.tenancy.oc1..aaaaaaaabbbbbbbbbbbbccccccccddddddddeeeeeeeeffffff +region=eu-frankfurt-1 \ No newline at end of file From d472ada57b54903da0d7f17253c098f9a1f2341c Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Thu, 18 Apr 2024 11:48:18 -0500 Subject: [PATCH 4/8] pass current inventory instead of count to cloud-specific implementations --- src/cloud_instance_manager.ts | 5 +- src/cloud_manager.ts | 4 +- src/custom_instance_manager.ts | 3 +- src/instance_launcher.ts | 2 +- src/oracle_instance_manager.ts | 3 +- src/oracle_instance_pool_manager.ts | 56 ++++--- src/test/oracle_instance_pool_manager.ts | 183 ++++++++++++++++++----- 7 files changed, 190 insertions(+), 66 deletions(-) diff --git a/src/cloud_instance_manager.ts b/src/cloud_instance_manager.ts index 2e548f2..171097e 100644 --- a/src/cloud_instance_manager.ts +++ b/src/cloud_instance_manager.ts @@ -1,6 +1,7 @@ import { InstanceGroup } from './instance_group'; import { Context } from './context'; import { CloudRetryStrategy } from './cloud_manager'; +import { InstanceState } from './instance_tracker'; export interface CloudInstance { instanceId: string; @@ -12,7 +13,7 @@ export interface CloudInstanceManager { launchInstances( ctx: Context, group: InstanceGroup, - groupCurrentCount: number, + currentInventory: InstanceState[], quantity: number, ): Promise>; @@ -33,7 +34,7 @@ export abstract class AbstractCloudInstanceManager implements CloudInstanceManag async launchInstances( ctx: Context, group: InstanceGroup, - groupCurrentCount: number, + currentInventory: InstanceState[], quantity: number, ): Promise> { ctx.logger.info(`[CloudInstanceManager] Launching a batch of ${quantity} instances in group ${group.name}`); diff --git a/src/cloud_manager.ts b/src/cloud_manager.ts index eb5862c..926732c 100644 --- a/src/cloud_manager.ts +++ b/src/cloud_manager.ts @@ -78,7 +78,7 @@ export default class CloudManager { async scaleUp( ctx: Context, group: InstanceGroup, - groupCurrentCount: number, + currentInventory: InstanceState[], quantity: number, isScaleDownProtected: boolean, ): Promise { @@ -91,7 +91,7 @@ export default class CloudManager { return 0; } - const scaleUpResult = await instanceManager.launchInstances(ctx, group, groupCurrentCount, quantity); + const scaleUpResult = await instanceManager.launchInstances(ctx, group, currentInventory, quantity); let scaleUpCount = 0; await Promise.all( diff --git a/src/custom_instance_manager.ts b/src/custom_instance_manager.ts index f10108c..71dbb2b 100644 --- a/src/custom_instance_manager.ts +++ b/src/custom_instance_manager.ts @@ -2,6 +2,7 @@ import { execFile } from 'child_process'; import { InstanceGroup } from './instance_group'; import { Context } from './context'; import { AbstractCloudInstanceManager } from './cloud_instance_manager'; +import { InstanceState } from './instance_tracker'; export interface CustomInstanceManagerOptions { isDryRun: boolean; @@ -27,7 +28,7 @@ export default class CustomInstanceManager extends AbstractCloudInstanceManager async launchInstances( ctx: Context, group: InstanceGroup, - groupCurrentCount: number, + currentInventory: InstanceState[], quantity: number, ): Promise> { ctx.logger.info(`[custom] Launching a batch of ${quantity} instances in group ${group.name}`); diff --git a/src/instance_launcher.ts b/src/instance_launcher.ts index bd291b4..d894b8b 100644 --- a/src/instance_launcher.ts +++ b/src/instance_launcher.ts @@ -126,7 +126,7 @@ export default class InstanceLauncher { const scaleUpCount = await this.cloudManager.scaleUp( ctx, group, - count, + currentInventory, actualScaleUpQuantity, scaleDownProtected, ); diff --git a/src/oracle_instance_manager.ts b/src/oracle_instance_manager.ts index 619799c..5e98a3d 100644 --- a/src/oracle_instance_manager.ts +++ b/src/oracle_instance_manager.ts @@ -7,6 +7,7 @@ import { ResourceSearchClient } from 'oci-resourcesearch'; import * as resourceSearch from 'oci-resourcesearch'; import { CloudRetryStrategy } from './cloud_manager'; import { AbstractCloudInstanceManager, CloudInstanceManager, CloudInstance } from './cloud_instance_manager'; +import { InstanceState } from './instance_tracker'; interface FaultDomainMap { [key: string]: string[]; @@ -43,7 +44,7 @@ export default class OracleInstanceManager implements CloudInstanceManager { async launchInstances( ctx: Context, group: InstanceGroup, - groupCurrentCount: number, + currentInventory: InstanceState[], quantity: number, ): Promise> { ctx.logger.info(`[oracle] Launching a batch of ${quantity} instances in group ${group.name}`); diff --git a/src/oracle_instance_pool_manager.ts b/src/oracle_instance_pool_manager.ts index 97dbed2..33638b6 100644 --- a/src/oracle_instance_pool_manager.ts +++ b/src/oracle_instance_pool_manager.ts @@ -5,6 +5,7 @@ import { Context } from './context'; import { CloudRetryStrategy } from './cloud_manager'; import { CloudInstanceManager, CloudInstance } from './cloud_instance_manager'; import { workrequests } from 'oci-sdk'; +import { InstanceState } from './instance_tracker'; const maxTimeInSeconds = 60 * 60; // The duration for waiter configuration before failing. Currently set to 1 hour. const maxDelayInSeconds = 30; // The max delay for the waiter configuration. Currently set to 30 seconds @@ -65,11 +66,13 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { async launchInstances( ctx: Context, group: InstanceGroup, - groupCurrentCount: number, + currentInventory: InstanceState[], quantity: number, ): Promise> { ctx.logger.info(`[oraclepool] Launching a batch of ${quantity} instances in group ${group.name}`); + const result = []; + this.computeManagementClient.regionId = group.region; const poolDetails = await this.computeManagementClient.getInstancePool({ instancePoolId: group.instanceConfigurationId, @@ -86,25 +89,36 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { return instance.id; }); - ctx.logger.debug(`[oraclepool] Instance pool ${group.name} instances`, { instances: poolInstances.items }); + const currentInstanceIds = currentInventory.map((instance) => { + return instance.instanceId; + }); - const newSize = quantity + groupCurrentCount; - if (groupCurrentCount == poolDetails.instancePool.size) { - ctx.logger.debug(`[oraclepool] Instance pool ${group.name} size matches current count`, { - current: groupCurrentCount, - size: poolDetails.instancePool.size, - newSize, + // mark any instances not previously seen as being launched now + result.push( + ...existingInstanceIds.filter((instanceId) => { + return !currentInstanceIds.includes(instanceId); + }), + ); + + ctx.logger.debug(`[oraclepool] Instance pool ${group.name} instances`, { instances: poolInstances.items }); + if (result.length > 0) { + ctx.logger.warn(`[oraclepool] Found instances in pool not in inventory, marking as launched now`, { + result, }); - } else { - ctx.logger.error(`[oraclepool] Instance pool ${group.name} size DOES NOT matches current count`, { - current: groupCurrentCount, - size: poolDetails.instancePool.size, + } + + // always use the group desired count for instance pools + const newSize = group.scalingOptions.desiredCount; + if (newSize == poolDetails.instancePool.size) { + // underlying pool size matches the desired count, so no need to update group + ctx.logger.info(`[oraclepool] Instance pool ${group.name} size matches desired count, no changes needed`, { newSize, }); + return result; } if (this.isDryRun) { - ctx.logger.info(`[oracle] Dry run enabled, instance pool size change skipped`, { newSize }); + ctx.logger.info(`[oraclepool] Dry run enabled, instance pool size change skipped`, { newSize }); } else { const updateResult = await this.computeManagementClient.updateInstancePool({ instancePoolId: group.instanceConfigurationId, @@ -142,13 +156,15 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { instancePoolId: group.instanceConfigurationId, }); - const result = newPoolInstances.items - .map((instance) => { - return instance.id; - }) - .filter((instanceId) => { - return !existingInstanceIds.includes(instanceId); - }); + result.push( + ...newPoolInstances.items + .map((instance) => { + return instance.id; + }) + .filter((instanceId) => { + return !existingInstanceIds.includes(instanceId); + }), + ); ctx.logger.info(`[oraclepool] Finished launching all the instances in group ${group.name}`, { result }); diff --git a/src/test/oracle_instance_pool_manager.ts b/src/test/oracle_instance_pool_manager.ts index efdcd5b..ba9ac90 100644 --- a/src/test/oracle_instance_pool_manager.ts +++ b/src/test/oracle_instance_pool_manager.ts @@ -20,8 +20,35 @@ const group = { region: 'testregion', compartmentId: 'testcpt', instanceConfigurationId: 'testpoolid', + enableAutoScale: true, + enableLaunch: true, + scalingOptions: { + minDesired: 1, + maxDesired: 3, + desiredCount: 2, + scaleUpQuantity: 1, + scaleDownQuantity: 1, + scaleUpThreshold: 0.8, + scaleDownThreshold: 0.3, + scalePeriod: 60, + scaleUpPeriodsCount: 2, + scaleDownPeriodsCount: 2, + }, }; +const instancePool = { + id: group.instanceConfigurationId, + name: group.name, + compartmentId: group.compartmentId, + instanceConfigurationId: 'testid', + size: 2, +}; + +const instancePoolInstances = [{ id: 'testinstanceid-1' }, { id: 'testinstanceid-2' }]; +const currentInventoryInstances = instancePoolInstances.map((instance) => { + return { instanceId: instance.id }; +}); + describe('InstancePoolManager', () => { const manager = new OracleInstancePoolManager({ isDryRun: false, @@ -30,15 +57,9 @@ describe('InstancePoolManager', () => { }); const mockWaiters = { - forInstancePool: mock.fn((request, _) => { + forInstancePool: mock.fn(() => { return { - instancePool: { - id: request.instancePoolId, - name: group.name, - compartmentId: group.compartmentId, - instanceConfigurationId: 'testid', - size: 2, - }, + instancePool, }; }), forDetachInstancePoolInstance: mock.fn((request) => { @@ -53,27 +74,20 @@ describe('InstancePoolManager', () => { createWaiters: mock.fn(() => { return mockWaiters; }), - getInstancePool: mock.fn((request) => { + getInstancePool: mock.fn((_) => { return { - instancePool: { - id: request.instancePoolId, - name: group.name, - compartmentId: group.compartmentId, - instanceConfigurationId: 'testid', - size: 2, - }, + instancePool, }; }), listInstancePoolInstances: mock.fn(() => { - return { items: [{ id: 'testinstanceid-1' }, { id: 'testinstanceid-2' }] }; + return { items: instancePoolInstances }; }), updateInstancePool: mock.fn((request) => { - return { - id: request.instancePoolId, - name: group.name, - compartmentId: group.compartmentId, - instanceConfigurationId: 'testid', - size: request.size, + return { + instancePool: { + ...instancePool, + size: request.updateInstancePoolDetails.size, + }, }; }), }; @@ -98,6 +112,10 @@ describe('InstancePoolManager', () => { }; afterEach(() => { + mockComputeManagementClient.createWaiters.mock.resetCalls(); + mockComputeManagementClient.getInstancePool.mock.resetCalls(); + mockComputeManagementClient.listInstancePoolInstances.mock.resetCalls(); + mockComputeManagementClient.updateInstancePool.mock.resetCalls(); mock.restoreAll(); }); @@ -112,37 +130,124 @@ describe('InstancePoolManager', () => { retryableStatusCodes: [404, 429], }); log('TEST', 'ended getInstances test'); - assert.ok(instances); + assert.ok(instances, 'some instances should be returned'); log('TEST', 'found instances', instances); + assert.equal(instances.length, 2, 'two instances should be returned'); }); }); describe('launchInstances', () => { + console.log('Starting launchInstances test'); // This is a test for the launchInstances method test('will launch instances in a group', async () => { - console.log('Starting launchInstances test'); + console.log('Starting single launch test'); + const desiredCount = 3; mockWaiters.forInstancePool.mock.mockImplementationOnce((_) => { return { instancePool: { - id: group.instanceConfigurationId, - name: group.name, - compartmentId: group.compartmentId, - instanceConfigurationId: 'testid', - size: 3, + ...instancePool, + size: desiredCount, // this is the critical bit for showing that the instance pool has been updated }, }; }); - mockComputeManagementClient.listInstancePoolInstances.mock.mockImplementationOnce((_) => { - return { items: [{ id: 'testinstanceid-1' }, { id: 'testinstanceid-2' }, { id: 'new-instance-id' }] }; - }, 2); - const instances = await manager.launchInstances(context, group, 2, 1); - console.log(mockComputeManagementClient.updateInstancePool.mock.callCount()); - assert.equal(mockComputeManagementClient.updateInstancePool.mock.callCount(), 1); - assert.ok(instances); - assert.equal(instances[0], 'new-instance-id'); - assert.equal(instances.length, 1); + + // the second time listInstancePoolInstances is called, return a new instance with id 'new-instance-id' + mockComputeManagementClient.listInstancePoolInstances.mock.mockImplementationOnce( + (_) => { + return { + // list now includes new-instance-id + items: [...instancePoolInstances, { id: 'new-instance-id' }], + }; + }, + 1, // this is the critical count for mocking the second call instead of the first + ); + + // override group.scalingOptions.desiredCount to control size of instance pool + const lgroup = { ...group, scalingOptions: { ...group.scalingOptions, desiredCount: desiredCount } }; + const instances = await manager.launchInstances(context, lgroup, currentInventoryInstances, 1); + assert.equal( + mockComputeManagementClient.updateInstancePool.mock.callCount(), + 1, + 'updateInstancePool should be called', + ); + assert.ok(instances, 'some instances should be returned'); + assert.equal(instances[0], 'new-instance-id', 'new instance id should be returned'); + assert.equal(instances.length, 1, 'only one instance should be returned'); log('TEST', 'ended launchInstances test'); log('TEST', 'launched instances', instances); }); + test('will not launch instances in a group if desiredCount is already reached', async () => { + console.log('Starting skip launch test'); + const desiredCount = 3; + // return pool already in desired state + mockComputeManagementClient.getInstancePool.mock.mockImplementationOnce((_) => { + return { + instancePool: { + ...instancePool, + size: desiredCount, + }, + }; + }); + // when listInstancePoolInstances is called, return 3 instances including newest with id 'new-instance-id' + mockComputeManagementClient.listInstancePoolInstances.mock.mockImplementationOnce((_) => { + return { items: [...instancePoolInstances, { id: 'new-instance-id' }] }; + }); + + // override group.scalingOptions.desiredCount to control size of instance pool + const lgroup = { ...group, scalingOptions: { ...group.scalingOptions, desiredCount: desiredCount } }; + const instances = await manager.launchInstances( + context, + lgroup, + [...currentInventoryInstances, { instanceId: 'new-instance-id' }], + 1, + ); + assert.equal( + mockComputeManagementClient.updateInstancePool.mock.callCount(), + 0, + 'updateInstancePool should not be called', + ); + assert.equal(instances.length, 0, 'no instances should be returned'); + log('TEST', 'ended skip launch test'); + log('TEST', 'launched instances', instances); + }); + + test('will see previously launched instances in a group if missed the first time', async () => { + console.log('Starting find missing instances test'); + const desiredCount = 3; + // return pool already in desired state + mockComputeManagementClient.getInstancePool.mock.mockImplementationOnce((_) => { + return { + instancePool: { + ...instancePool, + size: desiredCount, + }, + }; + }); + // when listInstancePoolInstances is called, return 3 instances including newest with id 'new-instance-id' + mockComputeManagementClient.listInstancePoolInstances.mock.mockImplementationOnce((_) => { + return { items: [...instancePoolInstances, { id: 'new-instance-id' }] }; + }); + + // override group.scalingOptions.desiredCount to control size of instance pool + const lgroup = { ...group, scalingOptions: { ...group.scalingOptions, desiredCount: desiredCount } }; + + const instances = await manager.launchInstances( + context, + lgroup, + currentInventoryInstances, // we pass in currentInventoryInstances (with 2 entries) and expect to see the new instance as launched + 1, + ); + // still expect no pool updates + assert.equal( + mockComputeManagementClient.updateInstancePool.mock.callCount(), + 0, + 'updateInstancePool should not be called', + ); + assert.ok(instances, 'some instances should be returned'); + assert.equal(instances[0], 'new-instance-id', 'new instance id should be returned'); + assert.equal(instances.length, 1, 'only one instance should be returned'); + log('TEST', 'ended find missing instances test'); + log('TEST', 'launched instances', instances); + }); }); }); From a7dc6b3d1c7baec05f3bbf0ec31dae557d701619 Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Thu, 18 Apr 2024 11:50:12 -0500 Subject: [PATCH 5/8] fix current count in oracle instance manager --- src/oracle_instance_manager.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/src/oracle_instance_manager.ts b/src/oracle_instance_manager.ts index 5e98a3d..037935d 100644 --- a/src/oracle_instance_manager.ts +++ b/src/oracle_instance_manager.ts @@ -58,6 +58,7 @@ export default class OracleInstanceManager implements CloudInstanceManager { for (let i = 0; i < quantity; i++) { indexes.push(i); } + const groupCurrentCount = currentInventory.length; const result = await Promise.all( indexes.map(async (index) => { From c323a9a4d557f92802c21bfe64fdac9fe8fb1a7e Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Thu, 18 Apr 2024 12:34:26 -0500 Subject: [PATCH 6/8] better delay and wait strategies --- src/oracle_instance_pool_manager.ts | 83 ++++++++++++++--------------- 1 file changed, 40 insertions(+), 43 deletions(-) diff --git a/src/oracle_instance_pool_manager.ts b/src/oracle_instance_pool_manager.ts index 33638b6..9e4236c 100644 --- a/src/oracle_instance_pool_manager.ts +++ b/src/oracle_instance_pool_manager.ts @@ -7,12 +7,20 @@ import { CloudInstanceManager, CloudInstance } from './cloud_instance_manager'; import { workrequests } from 'oci-sdk'; import { InstanceState } from './instance_tracker'; -const maxTimeInSeconds = 60 * 60; // The duration for waiter configuration before failing. Currently set to 1 hour. -const maxDelayInSeconds = 30; // The max delay for the waiter configuration. Currently set to 30 seconds +const maxLaunchTimeInSeconds = 30; // The duration for waiter configuration before failing. Currently set to 30 seconds +const launchDelayInSeconds = 5; // The max delay for the waiter configuration. Currently set to 10 seconds -const waiterConfiguration: common.WaiterConfiguration = { - terminationStrategy: new common.MaxTimeTerminationStrategy(maxTimeInSeconds), - delayStrategy: new common.ExponentialBackoffDelayStrategy(maxDelayInSeconds), +const maxDetachTimeInSeconds = 180; // The duration for waiter configuration before failing. Currently set to 180 seconds +const maxDetachDelayInSeconds = 30; // The max delay for the waiter configuration. Currently set to 30 seconds + +const launchWaiterConfiguration: common.WaiterConfiguration = { + terminationStrategy: new common.MaxTimeTerminationStrategy(maxLaunchTimeInSeconds), + delayStrategy: new common.FixedTimeDelayStrategy(launchDelayInSeconds), +}; + +const detachWaiterConfiguration: common.WaiterConfiguration = { + terminationStrategy: new common.MaxTimeTerminationStrategy(maxDetachTimeInSeconds), + delayStrategy: new common.ExponentialBackoffDelayStrategy(maxDetachDelayInSeconds), }; export interface OracleInstancePoolManagerOptions { @@ -55,7 +63,7 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { ctx.logger.info(`[oraclepool] Detaching instance ${instance}`); this.computeManagementClient.regionId = group.region; - const cwaiter = this.computeManagementClient.createWaiters(this.workRequestClient, waiterConfiguration); + const cwaiter = this.computeManagementClient.createWaiters(this.workRequestClient, detachWaiterConfiguration); const response = await cwaiter.forDetachInstancePoolInstance({ instancePoolId: group.instanceConfigurationId, detachInstancePoolInstanceDetails: { instanceId: instance }, @@ -131,26 +139,33 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { } this.workRequestClient.regionId = group.region; - const cwaiter = this.computeManagementClient.createWaiters(this.workRequestClient, waiterConfiguration); - const runningPool = await cwaiter.forInstancePool( - { - instancePoolId: group.instanceConfigurationId, - }, - core.models.InstancePool.LifecycleState.Running, - ); - - ctx.logger.info(`[oraclepool] Instance pool for ${group.name} back in running state`, { runningPool }); - - if (runningPool.instancePool.size == newSize) { - ctx.logger.debug(`[oraclepool] Instance pool ${group.name} size matches new size`, { - newSize, - }); - } else { - ctx.logger.error(`[oraclepool] Instance pool ${group.name} size DOES NOT matches new size`, { - newSize, - }); + const cwaiter = this.computeManagementClient.createWaiters(this.workRequestClient, launchWaiterConfiguration); + try { + const runningPool = await cwaiter.forInstancePool( + { + instancePoolId: group.instanceConfigurationId, + }, + core.models.InstancePool.LifecycleState.Running, + ); + + ctx.logger.info(`[oraclepool] Instance pool for ${group.name} back in running state`, { runningPool }); + + if (runningPool.instancePool.size == newSize) { + ctx.logger.debug(`[oraclepool] Instance pool ${group.name} size matches new size`, { + newSize, + }); + } else { + ctx.logger.error(`[oraclepool] Instance pool ${group.name} size DOES NOT matches new size`, { + newSize, + }); + } + } catch (err) { + ctx.logger.error(`[oraclepool] Instance pool for ${group.name} failed to return to running state`, { err }); + // the next launch job will eventually see the new instances and return them } + ctx.logger.debug(`[oraclepool] Instance pool ${group.name} listing pool instances`); + const newPoolInstances = await this.computeManagementClient.listInstancePoolInstances({ compartmentId: group.compartmentId, instancePoolId: group.instanceConfigurationId, @@ -172,24 +187,6 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { } async getInstances(ctx: Context, group: InstanceGroup, _: CloudRetryStrategy): Promise> { - // const computeManagementClient = new core.ComputeManagementClient( - // { - // authenticationDetailsProvider: this.provider, - // }, - // { - // retryConfiguration: { - // terminationStrategy: new common.MaxTimeTerminationStrategy(cloudRetryStrategy.maxTimeInSeconds), - // delayStrategy: new common.ExponentialBackoffDelayStrategy(cloudRetryStrategy.maxDelayInSeconds), - // retryCondition: (response) => { - // return ( - // cloudRetryStrategy.retryableStatusCodes.filter((retryableStatusCode) => { - // return response.statusCode === retryableStatusCode; - // }).length > 0 - // ); - // }, - // }, - // }, - // ); const computeManagementClient = this.computeManagementClient; computeManagementClient.regionId = group.region; @@ -199,7 +196,7 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { }); return poolInstances.items.map((instance) => { - ctx.logger.debug('Found instance in oracle pool', { instance }); + ctx.logger.debug('[oraclepool] Found instance in oracle pool', { instance }); return { instanceId: instance.id, displayName: instance.displayName, From b76b53264dbc7c2dd218e31f6f1e8684ed58114e Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Tue, 23 Apr 2024 12:21:37 -0500 Subject: [PATCH 7/8] draining instances support --- src/cloud_instance_manager_selector.ts | 3 ++ src/oracle_instance_pool_manager.ts | 31 +++++++++++++++--- src/test/oracle_instance_pool_manager.ts | 40 ++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 4 deletions(-) diff --git a/src/cloud_instance_manager_selector.ts b/src/cloud_instance_manager_selector.ts index 086ffa0..aab695a 100644 --- a/src/cloud_instance_manager_selector.ts +++ b/src/cloud_instance_manager_selector.ts @@ -4,9 +4,11 @@ import NomadInstanceManager from './nomad_instance_manager'; import DigitalOceanInstanceManager from './digital_ocean_instance_manager'; import { CloudInstanceManager } from './cloud_instance_manager'; import OracleInstancePoolManager from './oracle_instance_pool_manager'; +import { InstanceTracker } from './instance_tracker'; export interface CloudInstanceManagerSelectorOptions { cloudProviders: string[]; + instanceTracker: InstanceTracker; isDryRun: boolean; ociConfigurationFilePath: string; ociConfigurationProfile: string; @@ -37,6 +39,7 @@ export class CloudInstanceManagerSelector { if (options.cloudProviders.includes('oraclepool')) { this.oracleInstancePoolManager = new OracleInstancePoolManager({ isDryRun: options.isDryRun, + instanceTracker: options.instanceTracker, ociConfigurationFilePath: options.ociConfigurationFilePath, ociConfigurationProfile: options.ociConfigurationProfile, }); diff --git a/src/oracle_instance_pool_manager.ts b/src/oracle_instance_pool_manager.ts index 9e4236c..492690a 100644 --- a/src/oracle_instance_pool_manager.ts +++ b/src/oracle_instance_pool_manager.ts @@ -5,7 +5,7 @@ import { Context } from './context'; import { CloudRetryStrategy } from './cloud_manager'; import { CloudInstanceManager, CloudInstance } from './cloud_instance_manager'; import { workrequests } from 'oci-sdk'; -import { InstanceState } from './instance_tracker'; +import { InstanceState, InstanceTracker } from './instance_tracker'; const maxLaunchTimeInSeconds = 30; // The duration for waiter configuration before failing. Currently set to 30 seconds const launchDelayInSeconds = 5; // The max delay for the waiter configuration. Currently set to 10 seconds @@ -25,11 +25,13 @@ const detachWaiterConfiguration: common.WaiterConfiguration = { export interface OracleInstancePoolManagerOptions { isDryRun: boolean; + instanceTracker: InstanceTracker; ociConfigurationFilePath: string; ociConfigurationProfile: string; } export default class OracleInstancePoolManager implements CloudInstanceManager { + private instanceTracker: InstanceTracker; private isDryRun: boolean; private provider: common.ConfigFileAuthenticationDetailsProvider; private computeManagementClient: core.ComputeManagementClient; @@ -37,6 +39,7 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { constructor(options: OracleInstancePoolManagerOptions) { this.isDryRun = options.isDryRun; + this.instanceTracker = options.instanceTracker; this.provider = new common.ConfigFileAuthenticationDetailsProvider( options.ociConfigurationFilePath, options.ociConfigurationProfile, @@ -97,14 +100,24 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { return instance.id; }); + const fullInventory = await this.instanceTracker.trimCurrent(ctx, group.name, false); + const currentInstanceIds = currentInventory.map((instance) => { return instance.instanceId; }); + const shuttingDownInstances = fullInventory + .filter((instance) => { + return !currentInstanceIds.includes(instance.instanceId); + }) + .map((instance) => { + return instance.instanceId; + }); + // mark any instances not previously seen as being launched now result.push( ...existingInstanceIds.filter((instanceId) => { - return !currentInstanceIds.includes(instanceId); + return !shuttingDownInstances.includes(instanceId) && !currentInstanceIds.includes(instanceId); }), ); @@ -115,8 +128,8 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { }); } - // always use the group desired count for instance pools - const newSize = group.scalingOptions.desiredCount; + // always use the group desired count + shutting down count for instance pools + const newSize = group.scalingOptions.desiredCount + shuttingDownInstances.length; if (newSize == poolDetails.instancePool.size) { // underlying pool size matches the desired count, so no need to update group ctx.logger.info(`[oraclepool] Instance pool ${group.name} size matches desired count, no changes needed`, { @@ -125,6 +138,16 @@ export default class OracleInstancePoolManager implements CloudInstanceManager { return result; } + // never scale down via size, always do so by detaching instances on shutdown confirmation + if (newSize < poolDetails.instancePool.size) { + // underlying pool size would shrink with new size, so waiting for instances to be detached after confirming shutdown + ctx.logger.warn(`[oraclepool] Instance pool ${group.name} size would shrink, no changes applied`, { + size: poolDetails.instancePool.size, + newSize, + }); + return result; + } + if (this.isDryRun) { ctx.logger.info(`[oraclepool] Dry run enabled, instance pool size change skipped`, { newSize }); } else { diff --git a/src/test/oracle_instance_pool_manager.ts b/src/test/oracle_instance_pool_manager.ts index ba9ac90..28b5213 100644 --- a/src/test/oracle_instance_pool_manager.ts +++ b/src/test/oracle_instance_pool_manager.ts @@ -50,8 +50,13 @@ const currentInventoryInstances = instancePoolInstances.map((instance) => { }); describe('InstancePoolManager', () => { + const mockInstanceTracker = { + trimCurrent: mock.fn(() => Promise.resolve(currentInventoryInstances)), + }; + const manager = new OracleInstancePoolManager({ isDryRun: false, + instanceTracker: mockInstanceTracker, ociConfigurationFilePath: __dirname + '/test_oracle_config', ociConfigurationProfile: 'TEST', }); @@ -211,6 +216,41 @@ describe('InstancePoolManager', () => { log('TEST', 'launched instances', instances); }); + test('will not launch instances in a group if size would go down', async () => { + console.log('Starting skip scale down test'); + const desiredCount = 2; + // return pool with 1 more than desired + mockComputeManagementClient.getInstancePool.mock.mockImplementationOnce((_) => { + return { + instancePool: { + ...instancePool, + size: desiredCount + 1, + }, + }; + }); + // when listInstancePoolInstances is called, return 3 instances including newest with id 'new-instance-id' + mockComputeManagementClient.listInstancePoolInstances.mock.mockImplementationOnce((_) => { + return { items: [...instancePoolInstances, { id: 'new-instance-id' }] }; + }); + + // override group.scalingOptions.desiredCount to control size of instance pool + const lgroup = { ...group, scalingOptions: { ...group.scalingOptions, desiredCount: desiredCount } }; + const instances = await manager.launchInstances( + context, + lgroup, + [...currentInventoryInstances, { instanceId: 'new-instance-id' }], + 1, + ); + assert.equal( + mockComputeManagementClient.updateInstancePool.mock.callCount(), + 0, + 'updateInstancePool should not be called', + ); + assert.equal(instances.length, 0, 'no instances should be returned'); + log('TEST', 'ended skip scale down test'); + log('TEST', 'launched instances', instances); + }); + test('will see previously launched instances in a group if missed the first time', async () => { console.log('Starting find missing instances test'); const desiredCount = 3; From 2511da5b161b806f50e279224caca49fa683b014 Mon Sep 17 00:00:00 2001 From: Aaron van Meerten Date: Wed, 15 May 2024 09:28:20 -0500 Subject: [PATCH 8/8] wip --- src/test/oracle_instance_pool_manager.ts | 41 ++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/test/oracle_instance_pool_manager.ts b/src/test/oracle_instance_pool_manager.ts index 28b5213..ad91919 100644 --- a/src/test/oracle_instance_pool_manager.ts +++ b/src/test/oracle_instance_pool_manager.ts @@ -181,6 +181,7 @@ describe('InstancePoolManager', () => { log('TEST', 'ended launchInstances test'); log('TEST', 'launched instances', instances); }); + test('will not launch instances in a group if desiredCount is already reached', async () => { console.log('Starting skip launch test'); const desiredCount = 3; @@ -216,6 +217,46 @@ describe('InstancePoolManager', () => { log('TEST', 'launched instances', instances); }); + test('will not launch instances in a group if desiredCount plus shutdown count matches size', async () => { + console.log('Starting skip launch with shutdown test'); + const desiredCount = 2; + // return pool already has desired plus shutdown + mockComputeManagementClient.getInstancePool.mock.mockImplementationOnce((_) => { + return { + instancePool: { + ...instancePool, + size: desiredCount + 1, + }, + }; + }); + // when listInstancePoolInstances is called, return 3 instances including newest with id 'new-instance-id' + mockComputeManagementClient.listInstancePoolInstances.mock.mockImplementationOnce((_) => { + return { items: [...instancePoolInstances, { id: 'shutting-down-instance-id' }] }; + }); + + // override group.scalingOptions.desiredCount to control size of instance pool + const lgroup = { ...group, scalingOptions: { ...group.scalingOptions, desiredCount: desiredCount } }; + + // override the second trimCurrent call to include a shutting down instance + mockInstanceTracker.trimCurrent.mock.mockImplementationOnce(() => { + return Promise.resolve([...currentInventoryInstances, { instanceId: 'shutting-down-instance-id' }]); + }, 1); + + // we pass in currentInventoryInstances (with 2 entries) and expect to have the shutdown instance found via the mock above + const instances = await manager.launchInstances(context, lgroup, currentInventoryInstances, 1); + + console.log(mockInstanceTracker.trimCurrent.calls); + + assert.equal( + mockComputeManagementClient.updateInstancePool.mock.callCount(), + 0, + 'updateInstancePool should not be called', + ); + assert.equal(instances.length, 0, 'no instances should be returned'); + log('TEST', 'ended skip launch with shutdown test'); + log('TEST', 'launched instances', instances); + }); + test('will not launch instances in a group if size would go down', async () => { console.log('Starting skip scale down test'); const desiredCount = 2;