Skip to content

Commit

Permalink
Fix prompt output issue
Browse files Browse the repository at this point in the history
  • Loading branch information
ProbablyFaiz committed Sep 22, 2024
1 parent 7cee793 commit 2bb0a02
Showing 1 changed file with 4 additions and 1 deletion.
5 changes: 4 additions & 1 deletion rl/llm/engines/client.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import copy
import json
import re
import tempfile
Expand Down Expand Up @@ -249,6 +250,8 @@ def generate(self, prompt: ChatInput) -> InferenceOutput:
"ClientEngine requires a list of dicts, in the OpenAI API style."
)

original_prompt = copy.deepcopy(prompt)

system_prompt = None
if prompt[0]["role"] == "system":
system_prompt = prompt[0]["content"]
Expand All @@ -261,7 +264,7 @@ def generate(self, prompt: ChatInput) -> InferenceOutput:
max_tokens=self.llm_config.max_new_tokens,
)
return InferenceOutput(
prompt=prompt, # type: ignore
prompt=original_prompt,
text=message.content[0].text,
metadata={
"model": self.llm_config.model_name_or_path,
Expand Down

0 comments on commit 2bb0a02

Please sign in to comment.