Skip to content

Commit

Permalink
[Obs AI Assistant] Fix alerts function (elastic#203695)
Browse files Browse the repository at this point in the history
## Summary

### Problem
With the merge of the PR elastic#183756,
the alerts function has stopped working in the Obs AI Assistant, because
there has been a change to the query (when finding alerts)

### Solution
Revert the change made to the query.

### Checklist

- [x] The PR description includes the appropriate Release Notes section,
and the correct `release_note:*` label is applied per the
[guidelines](https://www.elastic.co/guide/en/kibana/master/contributing.html#kibana-release-notes-process)

(cherry picked from commit d9c1cd3)
  • Loading branch information
viduni94 committed Dec 11, 2024
1 parent e410895 commit 4351013
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 61 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -196,17 +196,17 @@ export function registerAlertsFunction({
lte: end,
},
},
...kqlQuery,
...(!includeRecovered
? [
{
term: {
[ALERT_STATUS]: ALERT_STATUS_ACTIVE,
},
},
]
: []),
},
...kqlQuery,
...(!includeRecovered
? [
{
term: {
[ALERT_STATUS]: ALERT_STATUS_ACTIVE,
},
},
]
: []),
],
},
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,57 +137,6 @@ export default function ApiTest({ getService }: FtrProviderContext) {
]);
});

it.skip('returns a useful error if the request fails', async () => {
const interceptor = proxy.intercept('conversation', () => true);

const passThrough = new PassThrough();

supertest
.post(CHAT_API_URL)
.set('kbn-xsrf', 'foo')
.send({
name: 'my_api_call',
messages,
connectorId,
functions: [],
scopes: ['all'],
})
.expect(200)
.pipe(passThrough);

let data: string = '';

passThrough.on('data', (chunk) => {
data += chunk.toString('utf-8');
});

const simulator = await interceptor.waitForIntercept();

await simulator.status(400);

await simulator.rawWrite(
JSON.stringify({
error: {
code: 'context_length_exceeded',
message:
"This model's maximum context length is 8192 tokens. However, your messages resulted in 11036 tokens. Please reduce the length of the messages.",
param: 'messages',
type: 'invalid_request_error',
},
})
);

await simulator.rawEnd();

await new Promise<void>((resolve) => passThrough.on('end', () => resolve()));

const response = JSON.parse(data.trim());

expect(response.error.message).to.be(
`Token limit reached. Token limit is 8192, but the current conversation has 11036 tokens.`
);
});

describe('security roles and access privileges', () => {
it('should deny access for users without the ai_assistant privilege', async () => {
try {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

import { MessageRole, MessageAddEvent } from '@kbn/observability-ai-assistant-plugin/common';
import expect from '@kbn/expect';
import { LlmProxy, createLlmProxy } from '../../../common/create_llm_proxy';
import { FtrProviderContext } from '../../../common/ftr_provider_context';
import { getMessageAddedEvents, invokeChatCompleteWithFunctionRequest } from './helpers';
import {
createProxyActionConnector,
deleteActionConnector,
} from '../../../common/action_connectors';

export default function ApiTest({ getService }: FtrProviderContext) {
const supertest = getService('supertest');
const log = getService('log');
const observabilityAIAssistantAPIClient = getService('observabilityAIAssistantAPIClient');

describe('when calling the alerts function', () => {
let proxy: LlmProxy;
let connectorId: string;
let alertsEvents: MessageAddEvent[];

const start = 'now-100h';
const end = 'now';

before(async () => {
proxy = await createLlmProxy(log);
connectorId = await createProxyActionConnector({ supertest, log, port: proxy.getPort() });

void proxy
.intercept('conversation', () => true, 'Hello from LLM Proxy')
.completeAfterIntercept();

const alertsResponseBody = await invokeChatCompleteWithFunctionRequest({
connectorId,
observabilityAIAssistantAPIClient,
functionCall: {
name: 'alerts',
trigger: MessageRole.Assistant,
arguments: JSON.stringify({ start, end }),
},
});

await proxy.waitForAllInterceptorsSettled();

alertsEvents = getMessageAddedEvents(alertsResponseBody);
});

after(async () => {
proxy.close();
await deleteActionConnector({ supertest, connectorId, log });
});

// This test ensures that invoking the alerts function does not result in an error.
it('should execute the function without any errors', async () => {
const alertsFunctionResponse = alertsEvents[0];
expect(alertsFunctionResponse.message.message.name).to.be('alerts');

const parsedAlertsResponse = JSON.parse(alertsFunctionResponse.message.message.content!);

expect(parsedAlertsResponse).not.to.have.property('error');
expect(parsedAlertsResponse).to.have.property('total');
expect(parsedAlertsResponse).to.have.property('alerts');
expect(parsedAlertsResponse.alerts).to.be.an('array');
expect(parsedAlertsResponse.total).to.be(0);
expect(parsedAlertsResponse.alerts.length).to.be(0);
});
});
}

0 comments on commit 4351013

Please sign in to comment.