Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LangGraph example issue with 1.1 api: type error #1987

Open
luca-git opened this issue Oct 26, 2024 · 1 comment
Open

LangGraph example issue with 1.1 api: type error #1987

luca-git opened this issue Oct 26, 2024 · 1 comment
Labels
bug Something isn't working

Comments

@luca-git
Copy link

luca-git commented Oct 26, 2024

🐛 Describe the bug

Running the LangGraph example with api 1.1 set in config i get a type error, this does not happen with api 1.0 but i get all sorts of annoying deprecation messages.

from typing import List, Dict
from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from mem0 import Memory

import os
from dotenv import load_dotenv
from openai import OpenAI


config = {
"llm": {
"provider": "openai",
"config": {
"model": "gpt-4o-mini",
# "temperature": 0.2,
# "max_tokens": 1500,
}
},
"version": "v1.1"
}
# Load environment variables from .env2 file
load_dotenv(".env2")

openai_client = OpenAI(
    api_key=os.getenv("OPENAI_API_KEY"),
    # You can set other parameters here if needed
)

# Initialize LangChain and Mem0
llm = ChatOpenAI(model="gpt-4o-mini")
mem0 = Memory.from_config(config)


prompt = ChatPromptTemplate.from_messages([
    SystemMessage(content="""You are a helpful travel agent AI. Use the provided context to personalize your responses and remember user preferences and past interactions. 
    Provide travel recommendations, itinerary suggestions, and answer questions about destinations. 
    If you don't have specific information, you can make general suggestions based on common travel knowledge."""),
    MessagesPlaceholder(variable_name="context"),
    HumanMessage(content="{input}")
])
def retrieve_context(query: str, user_id: str) -> List[Dict]:
    """Retrieve relevant context from Mem0"""
    memories = mem0.search(query, user_id=user_id)
    seralized_memories = ' '.join([mem["memory"] for mem in memories])
    context = [
        {
            "role": "system", 
            "content": f"Relevant information: {seralized_memories}"
        },
        {
            "role": "user",
            "content": query
        }
    ]
    return context

def generate_response(input: str, context: List[Dict]) -> str:
    """Generate a response using the language model"""
    chain = prompt | llm
    response = chain.invoke({
        "context": context,
        "input": input
    })
    return response.content

def save_interaction(user_id: str, user_input: str, assistant_response: str):
    """Save the interaction to Mem0"""
    interaction = [
        {
          "role": "user",
          "content": user_input
        },
        {
            "role": "assistant",
            "content": assistant_response
        }
    ]
    mem0.add(interaction, user_id=user_id)


def generate_response(input: str, context: List[Dict]) -> str:
    """Generate a response using the language model"""
    chain = prompt | llm
    response = chain.invoke({
        "context": context,
        "input": input
    })
    return response.content

def save_interaction(user_id: str, user_input: str, assistant_response: str):
    """Save the interaction to Mem0"""
    interaction = [
        {
          "role": "user",
          "content": user_input
        },
        {
            "role": "assistant",
            "content": assistant_response
        }
    ]
    mem0.add(interaction, user_id=user_id)
def chat_turn(user_input: str, user_id: str) -> str:
    # Retrieve context
    context = retrieve_context(user_input, user_id)
    
    # Generate response
    response = generate_response(user_input, context)
    
    # Save interaction
    save_interaction(user_id, user_input, response)
    
    return response
if __name__ == "__main__":
    print("Welcome to your personal Travel Agent Planner! How can I assist you with your travel plans today?")
    user_id = "john1"
    
    while True:
        user_input = input("You: ")
        if user_input.lower() in ['quit', 'exit', 'bye']:
            print("Travel Agent: Thank you for using our travel planning service. Have a great trip!")
            break
        
        response = chat_turn(user_input, user_id)
        print(f"Travel Agent: {response}")

error:

Welcome to your personal Travel Agent Planner! How can I assist you with your travel plans today?
You: milan
Traceback (most recent call last):
  File "c:\...\memo_base.py", line 129, in <module>
    response = chat_turn(user_input, user_id)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\...\memo_base.py", line 110, in chat_turn
    context = retrieve_context(user_input, user_id)
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\...\memo_base.py", line 49, in retrieve_context
    seralized_memories = ' '.join([mem["memory"] for mem in memories])
                                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "c:\...\memo_base.py", line 49, in <listcomp>
    seralized_memories = ' '.join([mem["memory"] for mem in memories])
                                   ~~~^^^^^^^^^^
TypeError: string indices must be integers, not 'str'```
@spike-spiegel-21
Copy link
Collaborator

Hi @luca-git

For v1.1, the output of the search method will be a dictionary:

{"results": original_memories, "relations": graph_entities}

Please replace your line:
seralized_memories = ' '.join([mem["memory"] for mem in memories]) with
seralized_memories = ' '.join([mem['memory'] for mem in res['results']])

@Dev-Khant Dev-Khant added the bug Something isn't working label Nov 8, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
bug Something isn't working
Projects
None yet
Development

No branches or pull requests

3 participants