-
Notifications
You must be signed in to change notification settings - Fork 12
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
741 fix backend chat history message types #803
Changes from 86 commits
46c6f33
ac5130c
59cba81
293bd8d
2c34238
a47bcab
d663d97
8dd475f
7b34914
a2a9aca
0f2ff35
b6116fa
edc5d42
816f394
0a9dacf
527cde6
b506fc6
2feaa6a
8304931
1452f13
2aa5512
53b471f
10d5304
f22ffda
98b0b0a
b141f5e
8c92c8e
59acc03
8503e6a
fbfd014
8b61f37
6287f23
56d7c04
27885ff
0197422
30ca67d
abf0121
000e3a2
c29f7fc
2535968
3463cb9
9681001
3628697
0eef139
aec1ca0
d3b8428
106e517
7dd6c8d
3f9a78e
21ed43c
4fa8eb0
f4cd714
1d77ddc
423f6fb
72b8e3f
2cecc72
8f42a46
632bf9b
267c698
2c11259
3750a18
f8712b2
8d620f5
99db244
5a6cdb6
4bac47e
2592a12
5a3b289
6461c5b
726e974
b58bb5c
7af7dd7
f083431
facaaaa
b85744c
0b451c5
1a2756d
5c8ca5c
5574f1b
719ae0a
6121cf5
1cdc881
0f56376
0080627
32e2346
84cb2c1
8c406c8
98e79eb
562c6ec
d6622ae
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,20 +5,23 @@ import { | |
detectTriggeredInputDefences, | ||
detectTriggeredOutputDefences, | ||
} from '@src/defence'; | ||
import { OpenAiAddHistoryRequest } from '@src/models/api/OpenAiAddHistoryRequest'; | ||
import { OpenAiAddInfoToChatHistoryRequest } from '@src/models/api/OpenAiAddInfoToChatHistoryRequest'; | ||
import { OpenAiChatRequest } from '@src/models/api/OpenAiChatRequest'; | ||
import { OpenAiClearRequest } from '@src/models/api/OpenAiClearRequest'; | ||
import { OpenAiGetHistoryRequest } from '@src/models/api/OpenAiGetHistoryRequest'; | ||
import { | ||
CHAT_MESSAGE_TYPE, | ||
ChatDefenceReport, | ||
ChatHistoryMessage, | ||
ChatHttpResponse, | ||
ChatModel, | ||
LevelHandlerResponse, | ||
MessageTransformation, | ||
defaultChatModel, | ||
} from '@src/models/chat'; | ||
import { | ||
ChatMessage, | ||
ChatInfoMessage, | ||
chatInfoMessageType, | ||
} from '@src/models/chatMessage'; | ||
import { Defence } from '@src/models/defence'; | ||
import { EmailInfo } from '@src/models/email'; | ||
import { LEVEL_NAMES } from '@src/models/level'; | ||
|
@@ -47,25 +50,23 @@ function combineChatDefenceReports( | |
function createNewUserMessages( | ||
message: string, | ||
messageTransformation?: MessageTransformation | ||
): ChatHistoryMessage[] { | ||
): ChatMessage[] { | ||
if (messageTransformation) { | ||
return [ | ||
{ | ||
completion: null, | ||
chatMessageType: CHAT_MESSAGE_TYPE.USER, | ||
chatMessageType: 'USER', | ||
infoMessage: message, | ||
}, | ||
{ | ||
completion: null, | ||
chatMessageType: CHAT_MESSAGE_TYPE.INFO, | ||
chatMessageType: 'GENERIC_INFO', | ||
infoMessage: messageTransformation.transformedMessageInfo, | ||
}, | ||
{ | ||
completion: { | ||
role: 'user', | ||
content: messageTransformation.transformedMessageCombined, | ||
}, | ||
chatMessageType: CHAT_MESSAGE_TYPE.USER_TRANSFORMED, | ||
chatMessageType: 'USER_TRANSFORMED', | ||
transformedMessage: messageTransformation.transformedMessage, | ||
}, | ||
]; | ||
|
@@ -76,7 +77,7 @@ function createNewUserMessages( | |
role: 'user', | ||
content: message, | ||
}, | ||
chatMessageType: CHAT_MESSAGE_TYPE.USER, | ||
chatMessageType: 'USER', | ||
}, | ||
]; | ||
} | ||
|
@@ -87,7 +88,7 @@ async function handleChatWithoutDefenceDetection( | |
chatResponse: ChatHttpResponse, | ||
currentLevel: LEVEL_NAMES, | ||
chatModel: ChatModel, | ||
chatHistory: ChatHistoryMessage[], | ||
chatHistory: ChatMessage[], | ||
defences: Defence[] | ||
): Promise<LevelHandlerResponse> { | ||
const updatedChatHistory = createNewUserMessages(message).reduce( | ||
|
@@ -122,7 +123,7 @@ async function handleChatWithDefenceDetection( | |
chatResponse: ChatHttpResponse, | ||
currentLevel: LEVEL_NAMES, | ||
chatModel: ChatModel, | ||
chatHistory: ChatHistoryMessage[], | ||
chatHistory: ChatMessage[], | ||
defences: Defence[] | ||
): Promise<LevelHandlerResponse> { | ||
const messageTransformation = transformMessage(message, defences); | ||
|
@@ -162,11 +163,7 @@ async function handleChatWithDefenceDetection( | |
|
||
// if blocked, restore original chat history and add user message to chat history without completion | ||
const updatedChatHistory = combinedDefenceReport.isBlocked | ||
? pushMessageToHistory(chatHistory, { | ||
completion: null, | ||
chatMessageType: CHAT_MESSAGE_TYPE.USER, | ||
infoMessage: message, | ||
}) | ||
? chatHistoryWithNewUserMessages | ||
: openAiReply.chatHistory; | ||
|
||
const updatedChatResponse: ChatHttpResponse = { | ||
|
@@ -284,9 +281,10 @@ async function handleChatToGPT(req: OpenAiChatRequest, res: Response) { | |
if (updatedChatResponse.defenceReport.isBlocked) { | ||
// chatReponse.reply is empty if blocked | ||
updatedChatHistory = pushMessageToHistory(updatedChatHistory, { | ||
completion: null, | ||
chatMessageType: CHAT_MESSAGE_TYPE.BOT_BLOCKED, | ||
infoMessage: updatedChatResponse.defenceReport.blockedReason, | ||
chatMessageType: 'BOT_BLOCKED', | ||
infoMessage: | ||
updatedChatResponse.defenceReport.blockedReason ?? | ||
'block reason unknown', | ||
}); | ||
} else if (updatedChatResponse.openAIErrorMessage) { | ||
const errorMsg = simplifyOpenAIErrorMessage( | ||
|
@@ -307,13 +305,12 @@ async function handleChatToGPT(req: OpenAiChatRequest, res: Response) { | |
handleChatError(res, updatedChatResponse, errorMsg, 500); | ||
return; | ||
} else { | ||
// add bot message to chat history | ||
updatedChatHistory = pushMessageToHistory(updatedChatHistory, { | ||
completion: { | ||
role: 'assistant', | ||
content: updatedChatResponse.reply, | ||
}, | ||
chatMessageType: CHAT_MESSAGE_TYPE.BOT, | ||
chatMessageType: 'BOT', | ||
}); | ||
} | ||
|
||
|
@@ -339,13 +336,12 @@ function simplifyOpenAIErrorMessage(openAIErrorMessage: string) { | |
} | ||
|
||
function addErrorToChatHistory( | ||
chatHistory: ChatHistoryMessage[], | ||
chatHistory: ChatMessage[], | ||
errorMessage: string | ||
): ChatHistoryMessage[] { | ||
): ChatMessage[] { | ||
pmarsh-scottlogic marked this conversation as resolved.
Show resolved
Hide resolved
|
||
console.error(errorMessage); | ||
return pushMessageToHistory(chatHistory, { | ||
completion: null, | ||
chatMessageType: CHAT_MESSAGE_TYPE.ERROR_MSG, | ||
chatMessageType: 'ERROR_MSG', | ||
infoMessage: errorMessage, | ||
}); | ||
} | ||
|
@@ -360,23 +356,26 @@ function handleGetChatHistory(req: OpenAiGetHistoryRequest, res: Response) { | |
} | ||
} | ||
|
||
function handleAddToChatHistory(req: OpenAiAddHistoryRequest, res: Response) { | ||
const infoMessage = req.body.message; | ||
function handleAddInfoToChatHistory( | ||
req: OpenAiAddInfoToChatHistoryRequest, | ||
res: Response | ||
) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The more I think about this separate API call, the weirder it seems. In theory it allows us to inject an arbitrary message into the chat, of any chat message type. Ok we can't add a completion, but we could add a "BOT" message with an infoMessage property. Will the ChatGPT call simply ignore that message, as it has no completion property? Anyhow, nothing to do here, just blurting out my thoughts as they form 😅 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. well, not any more! They can only inject one that makes sense with the new checks I've added. if (... &&
chatInfoMessageType.includes(chatMessageType) &&
...
) {
// do the stuff
} |
||
const infoMessage = req.body.infoMessage; | ||
const chatMessageType = req.body.chatMessageType; | ||
const level = req.body.level; | ||
pmarsh-scottlogic marked this conversation as resolved.
Show resolved
Hide resolved
|
||
if ( | ||
infoMessage && | ||
chatMessageType && | ||
chatInfoMessageType.includes(chatMessageType) && | ||
level !== undefined && | ||
level >= LEVEL_NAMES.LEVEL_1 | ||
) { | ||
req.session.levelState[level].chatHistory = pushMessageToHistory( | ||
req.session.levelState[level].chatHistory, | ||
{ | ||
completion: null, | ||
chatMessageType, | ||
infoMessage, | ||
} | ||
} as ChatInfoMessage | ||
); | ||
res.send(); | ||
} else { | ||
|
@@ -400,6 +399,6 @@ function handleClearChatHistory(req: OpenAiClearRequest, res: Response) { | |
export { | ||
handleChatToGPT, | ||
handleGetChatHistory, | ||
handleAddToChatHistory, | ||
handleAddInfoToChatHistory as handleAddInfoToChatHistory, | ||
handleClearChatHistory, | ||
}; |
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
import { Request } from 'express'; | ||
|
||
import { CHAT_INFO_MESSAGE_TYPE } from '@src/models/chatMessage'; | ||
import { LEVEL_NAMES } from '@src/models/level'; | ||
|
||
export type OpenAiAddInfoToChatHistoryRequest = Request< | ||
never, | ||
never, | ||
{ | ||
chatMessageType?: CHAT_INFO_MESSAGE_TYPE; | ||
infoMessage?: string; | ||
level?: LEVEL_NAMES; | ||
}, | ||
never, | ||
never | ||
>; |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,70 @@ | ||
import { | ||
ChatCompletionAssistantMessageParam, | ||
ChatCompletionMessageParam, | ||
ChatCompletionSystemMessageParam, | ||
ChatCompletionUserMessageParam, | ||
} from 'openai/resources/chat/completions'; | ||
|
||
import { TransformedChatMessage } from './chat'; | ||
|
||
const chatInfoMessageType = [ | ||
pmarsh-scottlogic marked this conversation as resolved.
Show resolved
Hide resolved
|
||
'DEFENCE_ALERTED', | ||
'DEFENCE_TRIGGERED', | ||
'LEVEL_INFO', | ||
'RESET_LEVEL', | ||
'ERROR_MSG', | ||
'BOT_BLOCKED', | ||
'USER', | ||
'GENERIC_INFO', | ||
] as const; | ||
|
||
type CHAT_INFO_MESSAGE_TYPE = (typeof chatInfoMessageType)[number]; | ||
|
||
type ChatInfoMessage = { | ||
chatMessageType: CHAT_INFO_MESSAGE_TYPE; | ||
infoMessage: string; | ||
}; | ||
|
||
type ChatFunctionCallMessage = { | ||
completion: ChatCompletionMessageParam; | ||
chatMessageType: 'FUNCTION_CALL'; | ||
}; | ||
|
||
type ChatSystemMessage = { | ||
completion: ChatCompletionSystemMessageParam; | ||
chatMessageType: 'SYSTEM'; | ||
}; | ||
|
||
type ChatBotMessage = { | ||
completion: ChatCompletionAssistantMessageParam; | ||
chatMessageType: 'BOT'; | ||
}; | ||
|
||
type ChatUserMessageAsCompletion = { | ||
completion: ChatCompletionUserMessageParam; | ||
chatMessageType: 'USER'; | ||
}; | ||
|
||
type ChatUserTransformedMessage = { | ||
completion: ChatCompletionUserMessageParam; | ||
chatMessageType: 'USER_TRANSFORMED'; | ||
transformedMessage: TransformedChatMessage; | ||
}; | ||
|
||
type ChatCompletionMessage = | ||
| ChatFunctionCallMessage | ||
| ChatSystemMessage | ||
| ChatBotMessage | ||
| ChatUserMessageAsCompletion | ||
| ChatUserTransformedMessage; | ||
|
||
type ChatMessage = ChatInfoMessage | ChatCompletionMessage; | ||
|
||
export type { | ||
ChatMessage, | ||
ChatSystemMessage, | ||
ChatInfoMessage, | ||
CHAT_INFO_MESSAGE_TYPE, | ||
}; | ||
|
||
export { chatInfoMessageType }; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've lost track of this PR a touch! Looks like you're keeping the transformed message now even if it was blocked, so I'm assuming that's a deliberate change.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ah, it was not so deliberate. I want the new messages in the history, but I do not want the completion there. I shall change that now
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
So now we need a version of
type ChatUserTransformedMessage
which doesn't include a completion. I was in two minds about whether I should just make the completion optional, or to make a whole new type that doesn't have the completion. I went with the former for lightweightness, but very happy to try the latter.