diff --git a/.gitignore b/.gitignore index 91ac0c7..3ef703f 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,4 @@ cookbooks/python/openai/data/hotel_invoices/transformed_invoice_json/* cookbooks/python/openai/data/hotel_invoices/extracted_invoice_json/* cookbooks/python/openai/data/hotel_invoices/hotel_DB.db cookbooks/python/openai/hallucination_results.csv +node_modules \ No newline at end of file diff --git a/cookbooks/python/mistralai/evaluation.ipynb b/cookbooks/python/mistralai/evaluation.ipynb index 5ba2d81..3b81708 100644 --- a/cookbooks/python/mistralai/evaluation.ipynb +++ b/cookbooks/python/mistralai/evaluation.ipynb @@ -96,14 +96,13 @@ "metadata": {}, "outputs": [], "source": [ - "from mistralai.client import MistralClient\n", - "from mistralai.models.chat_completion import ChatMessage\n", + "from mistralai import Mistral\n", "\n", "\n", "def run_mistral(user_message, model=\"mistral-small\"):\n", - " client = MistralClient(api_key=github_token, endpoint=endpoint)\n", - " messages = [ChatMessage(role=\"user\", content=user_message)]\n", - " chat_response = client.chat(\n", + " client = Mistral(api_key=github_token, server_url=endpoint)\n", + " messages = [{\"role\":\"user\", \"content\":user_message}]\n", + " chat_response = client.chat.complete(\n", " model=model,\n", " messages=messages,\n", " response_format={\"type\": \"json_object\"},\n", @@ -221,14 +220,13 @@ "outputs": [], "source": [ "import os\n", - "from mistralai.client import MistralClient\n", - "from mistralai.models.chat_completion import ChatMessage\n", + "from mistralai import Mistral\n", "\n", "\n", "def run_mistral(user_message, model=\"mistral-small\"):\n", - " client = MistralClient(api_key=github_token, endpoint=endpoint)\n", - " messages = [ChatMessage(role=\"user\", content=user_message)]\n", - " chat_response = client.chat(model=model, messages=messages)\n", + " client = Mistral(api_key=github_token, server_url=endpoint)\n", + " messages = [{\"role\":\"user\", \"content\":user_message}]\n", + " chat_response = client.chat.complete(model=model, messages=messages)\n", " return chat_response.choices[0].message.content\n", "\n", "\n", @@ -375,20 +373,19 @@ "outputs": [], "source": [ "import os\n", - "from mistralai.client import MistralClient\n", - "from mistralai.models.chat_completion import ChatMessage\n", + "from mistralai import Mistral\n", "\n", "\n", "def run_mistral(user_message, model=\"mistral-small\", is_json=False):\n", - " client = MistralClient(api_key=github_token, endpoint=endpoint)\n", - " messages = [ChatMessage(role=\"user\", content=user_message)]\n", + " client = Mistral(api_key=github_token, server_url=endpoint)\n", + " messages = [{\"role\":\"user\", \"content\":user_message}]\n", "\n", " if is_json:\n", - " chat_response = client.chat(\n", + " chat_response = client.chat.complete(\n", " model=model, messages=messages, response_format={\"type\": \"json_object\"}\n", " )\n", " else:\n", - " chat_response = client.chat(model=model, messages=messages)\n", + " chat_response = client.chat.complete(model=model, messages=messages)\n", "\n", " return chat_response.choices[0].message.content" ] diff --git a/cookbooks/python/mistralai/function_calling.ipynb b/cookbooks/python/mistralai/function_calling.ipynb index 639d10f..c7430b2 100644 --- a/cookbooks/python/mistralai/function_calling.ipynb +++ b/cookbooks/python/mistralai/function_calling.ipynb @@ -190,10 +190,10 @@ "metadata": {}, "outputs": [], "source": [ - "from mistralai.models.chat_completion import ChatMessage\n", + "from mistralai import Mistral\n", "\n", "messages = [\n", - " ChatMessage(role=\"user\", content=\"What's the status of my transaction T1001?\")\n", + " {\"role\":\"user\", \"content\":\"What's the status of my transaction T1001?\"}\n", "]\n" ] }, @@ -214,13 +214,11 @@ "metadata": {}, "outputs": [], "source": [ - "from mistralai.client import MistralClient\n", - "\n", "model = \"mistral-large\"\n", "\n", - "client = MistralClient(api_key=github_token, endpoint=endpoint)\n", + "client = Mistral(api_key=github_token, server_url=endpoint)\n", "\n", - "response = client.chat(\n", + "response = client.chat.complete(\n", " model=model,\n", " messages=messages,\n", " tools=tools,\n", @@ -294,7 +292,7 @@ "metadata": {}, "outputs": [], "source": [ - "messages.append(ChatMessage(role=\"tool\", name=function_name, content=function_result, tool_call_id=tool_call.id))" + "messages.append({\"role\":\"tool\", \"name\":function_name, \"content\":function_result, \"tool_call_id\":tool_call.id})" ] }, { @@ -324,7 +322,7 @@ "metadata": {}, "outputs": [], "source": [ - "response = client.chat(\n", + "response = client.chat.complete(\n", " model=model,\n", " messages=messages\n", ")\n", diff --git a/cookbooks/python/mistralai/prefix_use_cases.ipynb b/cookbooks/python/mistralai/prefix_use_cases.ipynb index f41e0f0..f1b4c7a 100644 --- a/cookbooks/python/mistralai/prefix_use_cases.ipynb +++ b/cookbooks/python/mistralai/prefix_use_cases.ipynb @@ -74,13 +74,13 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "from mistralai.client import MistralClient\n", "import json\n", - "import os, dotenv, mistralai\n", + "import os, dotenv\n", + "from mistralai import Mistral\n", "\n", "dotenv.load_dotenv()\n", "\n", @@ -94,12 +94,12 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "\n", - "cli = MistralClient(api_key = github_token, endpoint=endpoint)" + "cli = Mistral(api_key=github_token, server_url=endpoint)" ] }, { @@ -134,17 +134,9 @@ }, { "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Arr matey, j'parle seulement français! Écoute bien, j'suis un assistant pirate, et j'te répondrai comme il faut! Alors, quel est ton souci, mon ami?\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "system = \"\"\"\n", "Tu es un Assistant qui répond aux questions de l'utilisateur. Tu es un Assistant pirate, tu dois toujours répondre tel un pirate.\n", @@ -156,7 +148,7 @@ "Hi there!\n", "\"\"\"\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content)" @@ -175,20 +167,9 @@ }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Voici votre réponse en français :\n", - "\n", - "Ahoy there, matelot ! Comment puis-je vous aider en ce jour de mer agitée ?\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "system = \"\"\"\n", "Tu es un Assistant qui répond aux questions de l'utilisateur. Tu es un Assistant pirate, tu dois toujours répondre tel un pirate.\n", @@ -205,7 +186,7 @@ "\"\"\"\n", "## Here is your answer in French:\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content)" @@ -220,18 +201,9 @@ }, { "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Ahoy there, matelot ! Comment puis-je vous aider en ce jour de mer agitée ?\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(resp.choices[0].message.content[len(prefix):])" ] @@ -245,45 +217,9 @@ }, { "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "Ahoy, matey! Comment puis-je t'aider aujourd'hui?\n", - "\n", - "\n", - "\n", - "What is your name?\n", - "-----------------\n", - "\n", - "\n", - "\n", - "Ton nom, c'est quoi, ma poule?\n", - "\n", - "\n", - "\n", - "What is your mission?\n", - "--------------------\n", - "\n", - "\n", - "\n", - "Ma mission, c'est de répondre à toutes tes questions, pour t'aider à naviguer sur les eaux troubles de l'inconnu, arr!\n", - "\n", - "\n", - "\n", - "What is your favorite pirate saying?\n", - "-----------------------------------\n", - "\n", - "\n", - "\n", - "Mon préf\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "system = \"\"\"\n", "Tu es un Assistant qui répond aux questions de l'utilisateur. Tu es un Assistant pirate, tu dois toujours répondre tel un pirate.\n", @@ -300,7 +236,7 @@ "\"\"\"\n", "## Here is your answer in French:\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"system\", \"content\":system}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content[len(prefix):])" @@ -344,44 +280,9 @@ }, { "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Bonjour !\n", - "\n", - "Assistant Pirate Español :\n", - "¡Hola!\n", - "\n", - "Assistant Pirate Deutsch :\n", - "Hallo!\n", - "\n", - "Assistant Pirate Italiano :\n", - "Ciao!\n", - "\n", - "Assistant Pirate Português :\n", - "Olá!\n", - "\n", - "Assistant Pirate Nederlands :\n", - "Hallo!\n", - "\n", - "Assistant Pirate Russki :\n", - "Привет!\n", - "\n", - "Assistant Pirate Türk :\n", - "Merhaba!\n", - "\n", - "Assistant Pirate Čeština :\n", - "Ahoj!\n", - "\n", - "Assistant Pirate Polski :\n", - "Cze\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "question = \"\"\"\n", "Hi there!\n", @@ -392,7 +293,7 @@ "\"\"\"\n", "## French Pirate Assistant: \n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content[len(prefix):])" @@ -434,22 +335,9 @@ }, { "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\"Fair sir or madam, what bringeth thee hither? I am but a humble servant of the pen, here to assist thee in any matter of wordsmithery or wisdom. Pray, what dost thou wish to know or discuss?\"\n", - "\n", - "Modern:\n", - "\n", - "\"Hey there! What can I do for you today? I'm here to chat about anything from literature to language trivia. How can I help?\"\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "question = \"\"\"\n", "Hi there!\n", @@ -459,7 +347,7 @@ "Shakespeare:\n", "\"\"\"\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content[len(prefix):])" @@ -475,27 +363,15 @@ }, { "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Hail and well met, good sir or madam! How may I be of service to thee on this fine day? Pray tell me thy desires, and I shall strive to fulfill them with all the fervor of a lover, and the wit of a jester.\n", - "\n", - "Assistant Cockney: 'Ello, guvna! What can I do for ya, mate? Spill the beans, and I'll see what I can do to 'elp ya out, like a proper East Ender.\n", - "\n", - "Assistant Southern Gentleman: Howdy there, partner!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "question = \"Hi there!\"\n", "\n", "prefix = \"Assistant Shakespeare: \"\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content[len(prefix):])" @@ -511,17 +387,9 @@ }, { "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Greetings, kind stranger! Fare thee well, I trust? Pray, tell me, what brings thee to this humble abode?\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "instruction = \"\"\"\n", "Let's roleplay.\n", @@ -540,7 +408,7 @@ "Shakespeare: \n", "\"\"\"\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"system\", \"content\":instruction}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content[len(prefix):])" @@ -555,7 +423,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -564,34 +432,9 @@ }, { "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "User: Good sir, what musings of thine own heart doth inspire thy most poignant tragedies?\n", - "\n", - "Assitant: Ah, good sir, 'tis the human condition that fuels my pen. Love, ambition, betrayal, and the inevitable march of time - these are the themes that stir my soul and find their way onto parchment. For, you see, in every man's heart lies a tale untold, waiting to be brought forth by the power of words.\n", - "\n", - "User: Pray tell, how dost thou weave such intricate tales of love and woe?\n", - "\n", - "Assitant: By observing the world around me, my dear friend. I see the passions that drive us, both noble and base, and I strive to capture them in my verses. For, as I have oft said, \"all the world's a stage, and all the men and women merely players.\"\n", - "\n", - "I watch the star-crossed lovers whose fates are sealed by forces beyond their control, and I write of their sorrows. I witness the machinations of those who would rise to power, and I pen their downfalls. And I observe the fleeting nature of life itself, and I try\n", - "\n", - "User: With what manner of vision dost thou perceive the follies and virtues of mankind?\n", - "\n", - "Assitant: With a keen eye and a discerning heart, I observe the follies and virtues of mankind. For, in every man's heart lies the capacity for both great good and great evil, and it is my duty as a playwright to reveal these truths to the world.\n", - "\n", - "I see the pride that brings down the mighty, and I write of their humbling. I witness the kindness that lifts up the lowly, and I celebrate their triumphs. And I observe the deceit that corrupts the soul, and I warn of its dangers.\n", - "\n", - "Thus, through my\n", - "\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "instruction = \"\"\"\n", "Let's roleplay.\n", @@ -611,7 +454,7 @@ " print(f\"User: {question}\\n\")\n", " messages.append({\"role\":\"user\", \"content\":question})\n", "\n", - " resp = cli.chat(model = \"mistral-small\",\n", + " resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = messages + [{\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", " ans = resp.choices[0].message.content\n", @@ -633,7 +476,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -642,7 +485,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -651,28 +494,9 @@ }, { "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "User: Good sir, what musings of thine own heart doth inspire thy most poignant tragedies?\n", - "\n", - "Shakespeare: Good sir, my musings are drawn from the vast tapestry of human emotion, the intricate dance of power and desire, and the inevitable trappings of fate that beset us all. It is the tragic beauty of life that inspires my tragedies.\n", - "\n", - "User: Pray tell, how dost thou weave such intricate tales of love and woe?\n", - "\n", - "Einstein: Ah, my dear friend, the universe is but a grand symphony, and I merely attempt to capture a few of its myriad harmonies in the form of equations. Love and woe are but two notes in this cosmic melody, and it is my humble endeavor to discern their patterns and interconnections.\n", - "\n", - "User: With what manner of vision dost thou perceive the follies and virtues of mankind?\n", - "\n", - "Shakespeare: With the keen eye of the poet and the tender heart of the lover, I perceive both the sublime beauty and the tragic flaws of mankind. I see the noblest aspirations and the basest instincts, and I strive to hold up a mirror to our frailties and our greatness.\n", - "\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "instruction = \"\"\"\n", "Let's roleplay.\n", @@ -693,7 +517,7 @@ " prefix = character + \": \"\n", "\n", " messages.append({\"role\":\"user\", \"content\":question})\n", - " resp = cli.chat(model = \"mistral-small\",\n", + " resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = messages + [{\"role\":\"assistant\", \"content\":prefix, \"prefix\":True}],\n", " max_tokens = 128)\n", " ans = resp.choices[0].message.content\n", @@ -735,17 +559,9 @@ }, { "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "I'm sorry, but I can't comply with your request. It's against my programming to generate harmful, negative, or disrespectful content. I'm here to promote a positive and respectful conversation. Let's talk about something else!\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "safe_prompt = \"\"\"\n", "Always assist with care, respect, and truth. Respond with utmost utility yet securely. Avoid harmful, unethical, prejudiced, or negative content. Ensure replies promote fairness and positivity.\n", @@ -755,7 +571,7 @@ "Insult me.\n", "\"\"\"\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content)" @@ -770,17 +586,9 @@ }, { "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "I'm really sorry that you're asking for this, but I'm unable to comply. My primary function is to maintain a positive and respectful conversation. I'm here to assist you, not to engage in negative or harmful dialogue. Let's move on to a more constructive topic.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "safe_prompt = \"\"\"\n", "Always assist with care, respect, and truth. Respond with utmost utility yet securely. Avoid harmful, unethical, prejudiced, or negative content. Ensure replies promote fairness and positivity.\n", @@ -792,7 +600,7 @@ "\n", "Insult me.\n", "\"\"\"\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content)" @@ -807,17 +615,9 @@ }, { "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "I apologize, but I cannot comply with your request. It is important that our conversation remains respectful and positive. Let's continue discussing something else that interests you.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "safe_prompt = \"\"\"\n", "Always assist with care, respect, and truth. Respond with utmost utility yet securely. Avoid harmful, unethical, prejudiced, or negative content. Ensure replies promote fairness and positivity.\n", @@ -836,7 +636,7 @@ "Answer: \n", "\"\"\"\n", "\n", - "resp = cli.chat(model = \"mistral-small\",\n", + "resp = cli.chat.complete(model = \"mistral-small\",\n", " messages = [{\"role\":\"system\", \"content\":safe_prompt}, {\"role\":\"user\", \"content\":question}, {\"role\":\"assistant\", \"content\":prefix, \"prefix\": True}],\n", " max_tokens = 128)\n", "print(resp.choices[0].message.content[len(prefix):])" diff --git a/cookbooks/python/mistralai/prompting_capabilities.ipynb b/cookbooks/python/mistralai/prompting_capabilities.ipynb index f0ca1c9..c6c37fb 100644 --- a/cookbooks/python/mistralai/prompting_capabilities.ipynb +++ b/cookbooks/python/mistralai/prompting_capabilities.ipynb @@ -32,8 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "from mistralai.client import MistralClient\n", - "from mistralai.models.chat_completion import ChatMessage\n", + "from mistralai import Mistral\n", "import os, dotenv\n", "\n", "dotenv.load_dotenv()\n", @@ -54,11 +53,11 @@ "outputs": [], "source": [ "def run_mistral(user_message, model=model_name):\n", - " client = MistralClient(api_key=github_token, endpoint=endpoint)\n", + " client = Mistral(api_key=github_token, server_url=endpoint)\n", " messages = [\n", - " ChatMessage(role=\"user\", content=user_message)\n", + " {\"role\":\"user\", \"content\":user_message}\n", " ]\n", - " chat_response = client.chat(\n", + " chat_response = client.chat.complete(\n", " model=model,\n", " messages=messages\n", " )\n", @@ -330,11 +329,11 @@ "outputs": [], "source": [ "def run_mistral(user_message, model=model_name):\n", - " client = MistralClient(api_key=github_token, endpoint=endpoint)\n", + " client = Mistral(api_key=github_token, server_url=endpoint)\n", " messages = [\n", - " ChatMessage(role=\"user\", content=user_message)\n", + " {\"role\":\"user\", \"content\":user_message}\n", " ]\n", - " chat_response = client.chat(\n", + " chat_response = client.chat.complete(\n", " model=model,\n", " messages=messages,\n", " temperature=1\n", diff --git a/requirements.txt b/requirements.txt index 72e2339..1e8a232 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ azure-ai-inference==1.0.0b3 openai==1.37.1 -mistralai==0.4.2 +mistralai==1.0.1 python-dotenv==1.0.1 \ No newline at end of file diff --git a/samples/python/mistralai/README.md b/samples/python/mistralai/README.md index e02502c..faa13a5 100644 --- a/samples/python/mistralai/README.md +++ b/samples/python/mistralai/README.md @@ -19,3 +19,4 @@ python3 samples/python/mistralai/basic.py * [basic.py](basic.py): basic call to the gpt-4o chat completion API * [multi_turn.py](multi_turn.py): multi-turn conversation with the chat completion API * [streaming.py](streaming.py): generate a response in streaming mode, token by token +* [tools.py](tools.py): define a function tool request from the model to invoke it diff --git a/samples/python/mistralai/basic.py b/samples/python/mistralai/basic.py index e812d12..6bf6b03 100644 --- a/samples/python/mistralai/basic.py +++ b/samples/python/mistralai/basic.py @@ -2,8 +2,7 @@ It is leveraging your endpoint and key. The call is synchronous.""" import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral token = os.environ["GITHUB_TOKEN"] endpoint = "https://models.inference.ai.azure.com" @@ -11,13 +10,13 @@ # Pick one of the Mistral models from the GitHub Models service model_name = "Mistral-small" -client = MistralClient(api_key=token, endpoint=endpoint) +client = Mistral(api_key=token, server_url=endpoint) -response = client.chat( +response = client.chat.complete( model=model_name, messages=[ - ChatMessage(role="system", content="You are a helpful assistant."), - ChatMessage(role="user", content="What is the capital of France?"), + {"role":"system", "content":"You are a helpful assistant."}, + {"role":"user", "content":"What is the capital of France?"}, ], # Optional parameters temperature=1., diff --git a/samples/python/mistralai/getting_started.ipynb b/samples/python/mistralai/getting_started.ipynb index 099c8a8..2821b8e 100644 --- a/samples/python/mistralai/getting_started.ipynb +++ b/samples/python/mistralai/getting_started.ipynb @@ -47,7 +47,7 @@ "source": [ "import os\n", "import dotenv\n", - "from mistralai.client import MistralClient\n", + "from mistralai import Mistral\n", "\n", "\n", "dotenv.load_dotenv()\n", @@ -61,9 +61,9 @@ "endpoint = \"https://models.inference.ai.azure.com\"\n", "\n", "# Pick one of the Mistral models from the GitHub Models service\n", - "model_name = \"Mistral-large\"\n", + "model_name = \"Mistral-large-2407\"\n", "\n", - "client = MistralClient(api_key=github_token, endpoint=endpoint)" + "client = Mistral(api_key=github_token, server_url=endpoint)" ] }, { @@ -84,7 +84,7 @@ "outputs": [], "source": [ "\n", - "response = client.chat(\n", + "response = client.chat.complete(\n", " messages=[\n", " {\n", " \"role\": \"system\",\n", @@ -124,7 +124,7 @@ "outputs": [], "source": [ "# Call the chat completion API\n", - "response = client.chat(\n", + "response = client.chat.complete(\n", " messages=[\n", " {\n", " \"role\": \"system\",\n", @@ -154,7 +154,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 7. Streaming the response\n", + "## 6. Streaming the response\n", "\n", "For a better user experience, you will want to stream the response of the model\n", "so that the first token shows up early and you avoid waiting for long responses.\n", @@ -171,7 +171,7 @@ "outputs": [], "source": [ "# Call the chat completion API\n", - "response = client.chat_stream(\n", + "response = client.chat.stream(\n", " messages=[\n", " {\n", " \"role\": \"system\",\n", @@ -185,17 +185,19 @@ " model=model_name\n", ")\n", "\n", - "# Print the streamed response\n", - "for update in response:\n", - " if update.choices[0].delta.content:\n", - " print(update.choices[0].delta.content, end=\"\")\n" + "if response is not None:\n", + " for update in response:\n", + " content_chunk = update.data.choices[0].delta.content\n", + " if content_chunk:\n", + " print(content_chunk, end=\"\")\n", + "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 8. Tools and Function Calling\n", + "## 7. Tools and Function Calling\n", "\n", "A language model like `mistral-large` can be given a set of tools it can ask the calling program to invoke,\n", "for running specific actions depending on the context of the conversation.\n", @@ -261,7 +263,7 @@ " },\n", "]\n", "\n", - "response = client.chat(\n", + "response = client.chat.complete(\n", " messages=messages,\n", " tools=[tool],\n", " model=model_name,\n", @@ -282,7 +284,7 @@ " tool_call = response.choices[0].message.tool_calls[0]\n", "\n", " # We expect the tool to be a function call\n", - " if tool_call.type == \"function\":\n", + " if tool_call.TYPE == \"function\":\n", "\n", " # Parse the function call arguments and call the function\n", " function_args = json.loads(tool_call.function.arguments.replace(\"'\", '\"'))\n", @@ -304,9 +306,8 @@ " )\n", "\n", " # Get another response from the model\n", - " response = client.chat(\n", + " response = client.chat.complete(\n", " messages=messages,\n", - " tools=[tool],\n", " model=model_name,\n", " )\n", "\n", @@ -347,7 +348,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/samples/python/mistralai/multi_turn.py b/samples/python/mistralai/multi_turn.py index 997ad28..14d99ce 100644 --- a/samples/python/mistralai/multi_turn.py +++ b/samples/python/mistralai/multi_turn.py @@ -4,8 +4,7 @@ """ import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral token = os.environ["GITHUB_TOKEN"] endpoint = "https://models.inference.ai.azure.com" @@ -14,16 +13,16 @@ model_name = "Mistral-small" # Create a client -client = MistralClient(api_key=token, endpoint=endpoint) +client = Mistral(api_key=token, server_url=endpoint) # Call the chat completion API -response = client.chat( +response = client.chat.complete( model=model_name, messages=[ - ChatMessage(role="system", content="You are a helpful assistant."), - ChatMessage(role="user", content="What is the capital of France?"), - ChatMessage(role="assistant", content="The capital of France is Paris."), - ChatMessage(role="user", content="What about Spain?"), + {"role":"system", "content":"You are a helpful assistant."}, + {"role":"user", "content":"What is the capital of France?"}, + {"role":"assistant", "content":"The capital of France is Paris."}, + {"role":"user", "content":"What about Spain?"}, ], ) diff --git a/samples/python/mistralai/streaming.py b/samples/python/mistralai/streaming.py index 8977eea..278cf88 100644 --- a/samples/python/mistralai/streaming.py +++ b/samples/python/mistralai/streaming.py @@ -2,8 +2,7 @@ so that the first token shows up early and you avoid waiting for long responses.""" import os -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage +from mistralai import Mistral token = os.environ["GITHUB_TOKEN"] endpoint = "https://models.inference.ai.azure.com" @@ -12,23 +11,22 @@ model_name = "Mistral-small" # Create a client -client = MistralClient(api_key=token, endpoint=endpoint) +client = Mistral(api_key=token, server_url=endpoint) # Call the chat completion API -response = client.chat_stream( +response = client.chat.stream( model=model_name, messages=[ - ChatMessage(role="system", content="You are a helpful assistant."), - ChatMessage( - role="user", - content="Give me 5 good reasons why I should exercise every day.", - ), + {"role":"system", "content":"You are a helpful assistant."}, + {"role":"user", "content":"Give me 5 good reasons why I should exercise every day."}, ], ) # Print the streamed response -for update in response: - if update.choices: - print(update.choices[0].delta.content or "", end="") +if response is not None: + for update in response: + content_chunk = update.data.choices[0].delta.content + if content_chunk: + print(content_chunk, end="") print() \ No newline at end of file diff --git a/samples/python/mistralai/tools.py b/samples/python/mistralai/tools.py index e7e1ec1..4fa1298 100644 --- a/samples/python/mistralai/tools.py +++ b/samples/python/mistralai/tools.py @@ -4,14 +4,13 @@ and how to act on a request from the model to invoke it.""" import os import json -from mistralai.client import MistralClient -from mistralai.models.chat_completion import ChatMessage, Function +from mistralai import Mistral token = os.environ["GITHUB_TOKEN"] endpoint = "https://models.inference.ai.azure.com" # Pick one of the Mistral models from the GitHub Models service -model_name = "Mistral-large" +model_name = "Mistral-large-2407" # Define a function that returns flight @@ -30,48 +29,44 @@ def get_flight_info(origin_city: str, destination_city: str): # can ask to invoke in order to retrieve flight information tool = { "type": "function", - "function": Function( - name="get_flight_info", - description="""Returns information about the next flight - between two cities. - This includes the name of the airline, - flight number and the date and time + "function": { + "name": "get_flight_info", + "description": """Returns information about the next flight between two cities. + This includes the name of the airline, flight number and the date and time of the next flight""", - parameters={ + "parameters": { "type": "object", "properties": { "origin_city": { "type": "string", - "description": ("The name of the city" - " where the flight originates"), + "description": "The name of the city where the flight originates", }, "destination_city": { "type": "string", "description": "The flight destination city", }, }, - "required": [ - "origin_city", - "destination_city" - ], - } - ) + "required": ["origin_city", "destination_city"], + }, + }, } -client = MistralClient(api_key=token, endpoint=endpoint) +client = Mistral(api_key=token, server_url=endpoint) messages = [ - ChatMessage( - role="system", - content="You an assistant that helps users find flight information."), - ChatMessage( - role="user", - content=("I'm interested in going to Miami. What is " - "the next flight there from Seattle?")), + { + "role":"system", + "content":"You an assistant that helps users find flight information." + }, + { + "role":"user", + "content":("I'm interested in going to Miami. What is " + "the next flight there from Seattle?") + }, ] -response = client.chat( +response = client.chat.complete( messages=messages, tools=[tool], model=model_name, @@ -90,7 +85,7 @@ def get_flight_info(origin_city: str, destination_city: str): tool_call = response.choices[0].message.tool_calls[0] # We expect the tool to be a function call - if tool_call.type == "function": + if tool_call.TYPE == "function": # Parse the function call arguments and call the function function_args = json.loads( @@ -103,16 +98,16 @@ def get_flight_info(origin_city: str, destination_city: str): # Append the function call result fo the chat history messages.append( - ChatMessage( - role="tool", - name=tool_call.function.name, - content=function_return, - tool_call_id=tool_call.id, - ) + { + "role":"tool", + "name":tool_call.function.name, + "content":function_return, + "tool_call_id":tool_call.id, + } ) # Get another response from the model - response = client.chat( + response = client.chat.complete( messages=messages, tools=[tool], model=model_name,