From e048a84dc084e2601f8e4f4f2ba85dc22da09585 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=BB=84=E5=AE=87=E6=89=AC?= Date: Fri, 7 Jun 2024 08:54:01 +0800 Subject: [PATCH] fix glm4 template --- src/model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/model.cpp b/src/model.cpp index 1f9e0723..9e497ff4 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -448,7 +448,7 @@ namespace fastllm { // ChatGLM采用拼接token的方法,需要强行指定分割词的TokenID model->pre_prompt = ""; model->user_role = ("weight.tokenizer.GetTokenId("<|user|>")) + ">\n"); - model->bot_role = ("weight.tokenizer.GetTokenId("<|assistant|>")) + ">"); + model->bot_role = ("weight.tokenizer.GetTokenId("<|assistant|>")) + ">\n"); model->history_sep = ""; model->weight.tokenizer.type = Tokenizer::TokenizerType::QWEN; } else {