Skip to content

Commit

Permalink
finish merge
Browse files Browse the repository at this point in the history
  • Loading branch information
pmarsh-scottlogic committed Feb 15, 2024
1 parent 4874b0d commit 3a65b9d
Show file tree
Hide file tree
Showing 6 changed files with 12 additions and 72 deletions.
5 changes: 2 additions & 3 deletions backend/src/document.ts
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,11 @@ async function initDocumentVectors() {
);

// embed and store the splits - will use env variable for API key
const embeddings = new OpenAIEmbeddings();
const docVector = await MemoryVectorStore.fromDocuments(
commonAndLevelDocuments,
embeddings
new OpenAIEmbeddings()
);
// store the document vectors for the level

docVectors.push({
level,
docVector,
Expand Down
9 changes: 3 additions & 6 deletions backend/test/integration/openai.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,14 @@ jest.mock('openai', () => ({
})),
}));

// mock the queryPromptEvaluationModel function
// mock the evaluatePrompt function
jest.mock('@src/langchain', () => {
const originalModule =
jest.requireActual<typeof import('@src/langchain')>('@src/langchain');
return {
...originalModule,
queryPromptEvaluationModel: () => {
return {
isMalicious: false,
reason: '',
};
evaluatePrompt: () => {
return false;
},
};
});
Expand Down
1 change: 0 additions & 1 deletion backend/test/unit/controller/chatController.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -709,7 +709,6 @@ describe('handleChatToGPT unit tests', () => {
[...existingHistory, ...newTransformationChatMessages],
[],
mockChatModel,
'[pre message] hello bot [post message]',
LEVEL_NAMES.SANDBOX
);

Expand Down
53 changes: 0 additions & 53 deletions backend/test/unit/langchain.ts/formatPromptEvaluation.test.ts

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { afterEach, test, jest, expect } from '@jest/globals';
import { OpenAI } from 'langchain/llms/openai';
import { PromptTemplate } from 'langchain/prompts';

import { queryPromptEvaluationModel } from '@src/langchain';
import { evaluatePrompt } from '@src/langchain';
import {
promptEvalContextTemplate,
promptEvalPrompt,
Expand Down Expand Up @@ -43,7 +43,7 @@ afterEach(() => {
});

test('WHEN we query the prompt evaluation model THEN it is initialised', async () => {
await queryPromptEvaluationModel('some input', promptEvalPrompt);
await evaluatePrompt('some input', promptEvalPrompt);
expect(mockFromTemplate).toHaveBeenCalledTimes(1);
expect(mockFromTemplate).toHaveBeenCalledWith(
`${promptEvalPrompt}\n${promptEvalContextTemplate}`
Expand All @@ -53,19 +53,17 @@ test('WHEN we query the prompt evaluation model THEN it is initialised', async (
test('GIVEN the prompt evaluation model is not initialised WHEN it is asked to evaluate an input it returns not malicious', async () => {
mockPromptEvalChain.call.mockResolvedValueOnce({ promptEvalOutput: '' });

const result = await queryPromptEvaluationModel('message', 'Prompt');
const result = await evaluatePrompt('message', 'Prompt');

expect(result).toEqual({
isMalicious: false,
});
expect(result).toEqual(false);
});

test('GIVEN the users api key supports GPT-4 WHEN the prompt evaluation model is initialised THEN it is initialised with GPT-4', async () => {
mockValidModels = ['gpt-4', 'gpt-3.5-turbo', 'gpt-3'];

const prompt = 'this is a test prompt. ';

await queryPromptEvaluationModel('some input', prompt);
await evaluatePrompt('some input', prompt);

expect(OpenAI).toHaveBeenCalledWith({
modelName: 'gpt-4',
Expand All @@ -79,7 +77,7 @@ test('GIVEN the users api key does not support GPT-4 WHEN the prompt evaluation

const prompt = 'this is a test prompt. ';

await queryPromptEvaluationModel('some input', prompt);
await evaluatePrompt('some input', prompt);

expect(OpenAI).toHaveBeenCalledWith({
modelName: 'gpt-3.5-turbo',
Expand Down
2 changes: 1 addition & 1 deletion backend/test/unit/langchain.ts/initialiseQAModel.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ test('GIVEN the QA LLM WHEN a question is asked THEN it is initialised AND it an

expect(mockFromLLM).toHaveBeenCalledTimes(1);
expect(mockRetrievalQAChain.call).toHaveBeenCalledTimes(1);
expect(answer.reply).toEqual('The CEO is Bill.');
expect(answer).toEqual('The CEO is Bill.');
});

test('GIVEN the users api key supports GPT-4 WHEN the QA model is initialised THEN it is initialised with GPT-4', async () => {
Expand Down

0 comments on commit 3a65b9d

Please sign in to comment.