diff --git a/lightrag/CHANGELOG.md b/lightrag/CHANGELOG.md index 9136b8b0..d9f8a191 100644 --- a/lightrag/CHANGELOG.md +++ b/lightrag/CHANGELOG.md @@ -1,3 +1,9 @@ +## [0.0.0-alpha.16] - 2024-07-08 + +### Fixed +- Anthropic client message does not use system role. For now, we put the whole prompt as the first user message. +- Update the `DEDEFAULT_LIGHTRAG_SYSTEM_PROMPT` to include 'You are a helpful assistant' as default prompt. + ## [0.0.0-alpha.15] - 2024-07-07 ### Fixed diff --git a/lightrag/lightrag/components/model_client/anthropic_client.py b/lightrag/lightrag/components/model_client/anthropic_client.py index 53e51c5e..c53049ef 100644 --- a/lightrag/lightrag/components/model_client/anthropic_client.py +++ b/lightrag/lightrag/components/model_client/anthropic_client.py @@ -72,19 +72,26 @@ def parse_chat_completion(self, completion: Message) -> str: log.debug(f"completion: {completion}") return completion.content[0].text + # TODO: potentially use to separate the system and user messages. This requires user to follow it. If it is not found, then we will only use user message. def convert_inputs_to_api_kwargs( self, input: Optional[Any] = None, model_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINED, ) -> dict: + r"""Anthropic API messages separates the system and the user messages. + + As we focus on one prompt, we have to use the user message as the input. + + api: https://docs.anthropic.com/en/api/messages + """ api_kwargs = model_kwargs.copy() if model_type == ModelType.LLM: - # api_kwargs["messages"] = [ - # {"role": "user", "content": input}, - # ] - if input and input != "": - api_kwargs["system"] = input + api_kwargs["messages"] = [ + {"role": "user", "content": input}, + ] + # if input and input != "": + # api_kwargs["system"] = input else: raise ValueError(f"Model type {model_type} not supported") return api_kwargs diff --git a/lightrag/lightrag/core/default_prompt_template.py b/lightrag/lightrag/core/default_prompt_template.py index 3355a8cf..6c8084b6 100644 --- a/lightrag/lightrag/core/default_prompt_template.py +++ b/lightrag/lightrag/core/default_prompt_template.py @@ -25,12 +25,12 @@ User: {{input_str}} You:""" -DEFAULT_LIGHTRAG_SYSTEM_PROMPT = r"""{% if task_desc_str or output_format_str or tools_str or examples_str or chat_history_str or context_str or steps_str %} - -{% endif %} +DEFAULT_LIGHTRAG_SYSTEM_PROMPT = r""" {# task desc #} {% if task_desc_str %} {{task_desc_str}} +{% else %} +You are a helpful assistant. {% endif %} {# output format #} {% if output_format_str %} @@ -68,9 +68,7 @@ {{steps_str}} {% endif %} -{% if task_desc_str or output_format_str or tools_str or examples_str or chat_history_str or context_str or steps_str %} -{% endif %} {% if input_str %} {{input_str}} diff --git a/lightrag/lightrag/core/generator.py b/lightrag/lightrag/core/generator.py index 4c671cd3..0e078ea3 100644 --- a/lightrag/lightrag/core/generator.py +++ b/lightrag/lightrag/core/generator.py @@ -232,10 +232,11 @@ def call( } prompt_kwargs.update(trained_prompt_kwargs) - log.info(f"prompt_kwargs: {prompt_kwargs}") - log.info(f"model_kwargs: {model_kwargs}") + log.debug(f"prompt_kwargs: {prompt_kwargs}") + log.debug(f"model_kwargs: {model_kwargs}") api_kwargs = self._pre_call(prompt_kwargs, model_kwargs) + log.debug(f"api_kwargs: {api_kwargs}") output: GeneratorOutputType = None # call the model client try: diff --git a/lightrag/pyproject.toml b/lightrag/pyproject.toml index 9f8c7c81..bc8c91d7 100644 --- a/lightrag/pyproject.toml +++ b/lightrag/pyproject.toml @@ -1,7 +1,7 @@ [tool.poetry] name = "lightrag" -version = "0.0.0-alpha.15" +version = "0.0.0-alpha.16" description = "The 'PyTorch' library for LLM applications. RAG=Retriever-Agent-Generator." authors = ["Li Yin "] readme = "README.md"