Skip to content

Commit

Permalink
[8.12] [Enterprise Search] Fix Continue button not activating when de…
Browse files Browse the repository at this point in the history
…ploying model (elastic#173878) (elastic#173885)

# Backport

This will backport the following commits from `main` to `8.12`:
- [[Enterprise Search] Fix Continue button not activating when deploying
model (elastic#173878)](elastic#173878)

<!--- Backport version: 8.9.7 -->

### Questions ?
Please refer to the [Backport tool
documentation](https://github.com/sqren/backport)

<!--BACKPORT [{"author":{"name":"Adam
Demjen","email":"[email protected]"},"sourceCommit":{"committedDate":"2023-12-21T23:42:46Z","message":"[Enterprise
Search] Fix Continue button not activating when deploying model
(elastic#173878)\n\n## Summary\r\n\r\nFix for a minor usability issue the ML
inference pipeline creation flow.\r\nWhen selecting an undeployed model,
the Continue button is disabled,\r\nbecause the model needs to be at
least in the deploying state to be\r\neligible for selection. However
after clicking Deploy and seeing the\r\nmodel transition to deploying,
the button is still disabled. This PR\r\nfixes this
issue.\r\n\r\nBefore\r\n\r\n\r\nhttps://github.com/elastic/kibana/assets/14224983/f0633b42-6c3c-4aaa-8ffb-f516c3fe8376\r\n\r\nAfter\r\n\r\n\r\nhttps://github.com/elastic/kibana/assets/14224983/5ac37471-86fd-4fee-beb2-d6e89af17902\r\n\r\n###
Checklist\r\n- [x] [Unit or
functional\r\ntests](https://www.elastic.co/guide/en/kibana/master/development-tests.html)\r\nwere
updated or added to match the most common
scenarios","sha":"3b04e80c4b97afcaa302c81e391e24f0b3200d68","branchLabelMapping":{"^v8.13.0$":"main","^v(\\d+).(\\d+).\\d+$":"$1.$2"}},"sourcePullRequest":{"labels":["bug","release_note:skip","Team:EnterpriseSearch","v8.12.0","v8.12.1","v8.13.0"],"number":173878,"url":"https://github.com/elastic/kibana/pull/173878","mergeCommit":{"message":"[Enterprise
Search] Fix Continue button not activating when deploying model
(elastic#173878)\n\n## Summary\r\n\r\nFix for a minor usability issue the ML
inference pipeline creation flow.\r\nWhen selecting an undeployed model,
the Continue button is disabled,\r\nbecause the model needs to be at
least in the deploying state to be\r\neligible for selection. However
after clicking Deploy and seeing the\r\nmodel transition to deploying,
the button is still disabled. This PR\r\nfixes this
issue.\r\n\r\nBefore\r\n\r\n\r\nhttps://github.com/elastic/kibana/assets/14224983/f0633b42-6c3c-4aaa-8ffb-f516c3fe8376\r\n\r\nAfter\r\n\r\n\r\nhttps://github.com/elastic/kibana/assets/14224983/5ac37471-86fd-4fee-beb2-d6e89af17902\r\n\r\n###
Checklist\r\n- [x] [Unit or
functional\r\ntests](https://www.elastic.co/guide/en/kibana/master/development-tests.html)\r\nwere
updated or added to match the most common
scenarios","sha":"3b04e80c4b97afcaa302c81e391e24f0b3200d68"}},"sourceBranch":"main","suggestedTargetBranches":["8.12"],"targetPullRequestStates":[{"branch":"8.12","label":"v8.12.0","labelRegex":"^v(\\d+).(\\d+).\\d+$","isSourceBranch":false,"state":"NOT_CREATED"},{"branch":"main","label":"v8.13.0","labelRegex":"^v8.13.0$","isSourceBranch":true,"state":"MERGED","url":"https://github.com/elastic/kibana/pull/173878","number":173878,"mergeCommit":{"message":"[Enterprise
Search] Fix Continue button not activating when deploying model
(elastic#173878)\n\n## Summary\r\n\r\nFix for a minor usability issue the ML
inference pipeline creation flow.\r\nWhen selecting an undeployed model,
the Continue button is disabled,\r\nbecause the model needs to be at
least in the deploying state to be\r\neligible for selection. However
after clicking Deploy and seeing the\r\nmodel transition to deploying,
the button is still disabled. This PR\r\nfixes this
issue.\r\n\r\nBefore\r\n\r\n\r\nhttps://github.com/elastic/kibana/assets/14224983/f0633b42-6c3c-4aaa-8ffb-f516c3fe8376\r\n\r\nAfter\r\n\r\n\r\nhttps://github.com/elastic/kibana/assets/14224983/5ac37471-86fd-4fee-beb2-d6e89af17902\r\n\r\n###
Checklist\r\n- [x] [Unit or
functional\r\ntests](https://www.elastic.co/guide/en/kibana/master/development-tests.html)\r\nwere
updated or added to match the most common
scenarios","sha":"3b04e80c4b97afcaa302c81e391e24f0b3200d68"}}]}]
BACKPORT-->

Co-authored-by: Adam Demjen <[email protected]>
  • Loading branch information
kibanamachine and demjened authored Dec 22, 2023
1 parent 609ad99 commit 8794c84
Show file tree
Hide file tree
Showing 4 changed files with 61 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -573,6 +573,32 @@ describe('MlInferenceLogic', () => {
});

describe('listeners', () => {
describe('clearModelPlaceholderFlag', () => {
it('sets placeholder flag false for selected model', () => {
MLInferenceLogic.actions.setInferencePipelineConfiguration({
...MLInferenceLogic.values.addInferencePipelineModal.configuration,
modelID: 'unit-test-model',
isModelPlaceholderSelected: true,
});
MLInferenceLogic.actions.clearModelPlaceholderFlag('unit-test-model');

expect(
MLInferenceLogic.values.addInferencePipelineModal.configuration.isModelPlaceholderSelected
).toBe(false);
});
it('leaves placeholder flag unmodified if another model was selected', () => {
MLInferenceLogic.actions.setInferencePipelineConfiguration({
...MLInferenceLogic.values.addInferencePipelineModal.configuration,
modelID: 'unit-test-model',
isModelPlaceholderSelected: true,
});
MLInferenceLogic.actions.clearModelPlaceholderFlag('some-other-model-id');

expect(
MLInferenceLogic.values.addInferencePipelineModal.configuration.isModelPlaceholderSelected
).toBe(true);
});
});
describe('createPipeline', () => {
const mockModelConfiguration = {
...DEFAULT_VALUES.addInferencePipelineModal,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ export interface MLInferenceProcessorsActions {
>['apiSuccess'];
attachPipeline: () => void;
clearFetchedPipeline: FetchPipelineApiLogicActions['apiReset'];
clearModelPlaceholderFlag: (modelId: string) => { modelId: string };
createApiError: Actions<
CreateMlInferencePipelineApiLogicArgs,
CreateMlInferencePipelineResponse
Expand Down Expand Up @@ -222,13 +223,13 @@ export const MLInferenceLogic = kea<
}),
attachPipeline: true,
clearFormErrors: true,
clearModelPlaceholderFlag: (modelId: string) => ({ modelId }),
createPipeline: true,
onAddInferencePipelineStepChange: (step: AddInferencePipelineSteps) => ({ step }),
removeFieldFromMapping: (fieldName: string) => ({ fieldName }),
selectExistingPipeline: (pipelineName: string) => ({ pipelineName }),
selectFields: (fieldNames: string[]) => ({ fieldNames }),
setAddInferencePipelineStep: (step: AddInferencePipelineSteps) => ({ step }),
setFormErrors: (inputErrors: AddInferencePipelineFormErrors) => ({ inputErrors }),
setIndexName: (indexName: string) => ({ indexName }),
setInferencePipelineConfiguration: (configuration: InferencePipelineConfiguration) => ({
configuration,
Expand Down Expand Up @@ -299,6 +300,19 @@ export const MLInferenceLogic = kea<
pipelineName,
});
},
clearModelPlaceholderFlag: ({ modelId }) => {
const {
addInferencePipelineModal: { configuration },
} = values;

// Don't change the flag if the user clicked away from the selected model
if (modelId !== configuration.modelID) return;

actions.setInferencePipelineConfiguration({
...configuration,
isModelPlaceholderSelected: false,
});
},
createPipeline: () => {
const {
addInferencePipelineModal: { configuration, indexName },
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,15 @@ describe('ModelSelectLogic', () => {

expect(ModelSelectLogic.actions.startPollingModels).toHaveBeenCalled();
});
it('sets selected model as non-placeholder', () => {
jest.spyOn(ModelSelectLogic.actions, 'clearModelPlaceholderFlagFromMLInferenceLogic');

ModelSelectLogic.actions.createModelSuccess(CREATE_MODEL_API_RESPONSE);

expect(
ModelSelectLogic.actions.clearModelPlaceholderFlagFromMLInferenceLogic
).toHaveBeenCalledWith(CREATE_MODEL_API_RESPONSE.modelId);
});
});

describe('fetchModels', () => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,22 @@ import {
} from './ml_inference_logic';

export interface ModelSelectActions {
clearModelPlaceholderFlagFromMLInferenceLogic: MLInferenceProcessorsActions['clearModelPlaceholderFlag'];
createModel: (modelId: string) => { modelId: string };
createModelError: CreateModelApiLogicActions['apiError'];
createModelMakeRequest: CreateModelApiLogicActions['makeRequest'];
createModelSuccess: CreateModelApiLogicActions['apiSuccess'];

fetchModels: () => void;
fetchModelsError: CachedFetchModlesApiLogicActions['apiError'];
fetchModelsMakeRequest: CachedFetchModlesApiLogicActions['makeRequest'];
fetchModelsSuccess: CachedFetchModlesApiLogicActions['apiSuccess'];
startPollingModels: CachedFetchModlesApiLogicActions['startPolling'];

setInferencePipelineConfiguration: MLInferenceProcessorsActions['setInferencePipelineConfiguration'];
setInferencePipelineConfigurationFromMLInferenceLogic: MLInferenceProcessorsActions['setInferencePipelineConfiguration'];
startModel: (modelId: string) => { modelId: string };
startModelError: CreateModelApiLogicActions['apiError'];
startModelMakeRequest: StartModelApiLogicActions['makeRequest'];
startModelSuccess: StartModelApiLogicActions['apiSuccess'];

setInferencePipelineConfiguration: MLInferenceProcessorsActions['setInferencePipelineConfiguration'];
setInferencePipelineConfigurationFromMLInferenceLogic: MLInferenceProcessorsActions['setInferencePipelineConfiguration'];
startPollingModels: CachedFetchModlesApiLogicActions['startPolling'];
}

export interface ModelSelectValues {
Expand Down Expand Up @@ -96,6 +94,7 @@ export const ModelSelectLogic = kea<MakeLogicType<ModelSelectValues, ModelSelect
MLInferenceLogic,
[
'setInferencePipelineConfiguration as setInferencePipelineConfigurationFromMLInferenceLogic',
'clearModelPlaceholderFlag as clearModelPlaceholderFlagFromMLInferenceLogic',
],
StartModelApiLogic,
[
Expand Down Expand Up @@ -126,18 +125,20 @@ export const ModelSelectLogic = kea<MakeLogicType<ModelSelectValues, ModelSelect
createModel: ({ modelId }) => {
actions.createModelMakeRequest({ modelId });
},
createModelSuccess: () => {
createModelSuccess: (response) => {
actions.startPollingModels();
// The create action succeeded, so the model is no longer a placeholder
actions.clearModelPlaceholderFlagFromMLInferenceLogic(response.modelId);
},
fetchModels: () => {
actions.fetchModelsMakeRequest({});
},
startModel: ({ modelId }) => {
actions.startModelMakeRequest({ modelId });
},
setInferencePipelineConfiguration: ({ configuration }) => {
actions.setInferencePipelineConfigurationFromMLInferenceLogic(configuration);
},
startModel: ({ modelId }) => {
actions.startModelMakeRequest({ modelId });
},
startModelSuccess: () => {
actions.startPollingModels();
},
Expand Down

0 comments on commit 8794c84

Please sign in to comment.