From 5fa3045bfc39457f0b0c5ce5bed7290ad2b9171e Mon Sep 17 00:00:00 2001 From: AgentGenie Date: Thu, 2 Jan 2025 22:51:52 -0800 Subject: [PATCH 01/61] PDF RAG: Use unstructed io to parse a file --- .gitattributes | 6 +- .../agentchat_rag_workflow.ipynb | 248 ++++++++++++++++++ .../input_files/nvidia_10k_2024.pdf | Bin 0 -> 132 bytes .../agentchat_pdf_rag/parsed_elements.json | 3 + .../parsed_pdf_info/figure-1-1.jpg | 3 + .../parsed_pdf_info/figure-33-2.jpg | 3 + .../parsed_pdf_info/figure-92-3.jpg | 3 + .../parsed_pdf_info/figure-93-4.jpg | 3 + .../parsed_pdf_info/figure-94-5.jpg | 3 + .../parsed_pdf_info/figure-95-6.jpg | 3 + .../parsed_pdf_info/table-12-2.jpg | 3 + .../parsed_pdf_info/table-2-1.jpg | 3 + .../parsed_pdf_info/table-32-3.jpg | 3 + .../parsed_pdf_info/table-33-4.jpg | 3 + .../parsed_pdf_info/table-36-5.jpg | 3 + .../parsed_pdf_info/table-39-6.jpg | 3 + .../parsed_pdf_info/table-39-7.jpg | 3 + .../parsed_pdf_info/table-39-8.jpg | 3 + .../parsed_pdf_info/table-40-9.jpg | 3 + .../parsed_pdf_info/table-41-10.jpg | 3 + .../parsed_pdf_info/table-42-11.jpg | 3 + .../parsed_pdf_info/table-42-12.jpg | 3 + .../parsed_pdf_info/table-43-13.jpg | 3 + .../parsed_pdf_info/table-47-14.jpg | 3 + .../parsed_pdf_info/table-50-15.jpg | 3 + .../parsed_pdf_info/table-51-16.jpg | 3 + .../parsed_pdf_info/table-52-17.jpg | 3 + .../parsed_pdf_info/table-52-18.jpg | 3 + .../parsed_pdf_info/table-53-19.jpg | 3 + .../parsed_pdf_info/table-54-20.jpg | 3 + .../parsed_pdf_info/table-60-21.jpg | 3 + .../parsed_pdf_info/table-61-22.jpg | 3 + .../parsed_pdf_info/table-61-23.jpg | 3 + .../parsed_pdf_info/table-61-24.jpg | 3 + .../parsed_pdf_info/table-62-25.jpg | 3 + .../parsed_pdf_info/table-63-26.jpg | 3 + .../parsed_pdf_info/table-63-27.jpg | 3 + .../parsed_pdf_info/table-64-28.jpg | 3 + .../parsed_pdf_info/table-64-29.jpg | 3 + .../parsed_pdf_info/table-65-30.jpg | 3 + .../parsed_pdf_info/table-65-31.jpg | 3 + .../parsed_pdf_info/table-66-32.jpg | 3 + .../parsed_pdf_info/table-66-33.jpg | 3 + .../parsed_pdf_info/table-66-34.jpg | 3 + .../parsed_pdf_info/table-67-35.jpg | 3 + .../parsed_pdf_info/table-68-36.jpg | 3 + .../parsed_pdf_info/table-68-37.jpg | 3 + .../parsed_pdf_info/table-68-38.jpg | 3 + .../parsed_pdf_info/table-69-39.jpg | 3 + .../parsed_pdf_info/table-69-40.jpg | 3 + .../parsed_pdf_info/table-70-41.jpg | 3 + .../parsed_pdf_info/table-70-42.jpg | 3 + .../parsed_pdf_info/table-70-43.jpg | 3 + .../parsed_pdf_info/table-71-44.jpg | 3 + .../parsed_pdf_info/table-72-45.jpg | 3 + .../parsed_pdf_info/table-73-46.jpg | 3 + .../parsed_pdf_info/table-73-47.jpg | 3 + .../parsed_pdf_info/table-75-48.jpg | 3 + .../parsed_pdf_info/table-75-49.jpg | 3 + .../parsed_pdf_info/table-75-50.jpg | 3 + .../parsed_pdf_info/table-76-51.jpg | 3 + .../parsed_pdf_info/table-77-52.jpg | 3 + .../parsed_pdf_info/table-78-53.jpg | 3 + .../parsed_pdf_info/table-79-54.jpg | 3 + .../parsed_pdf_info/table-79-55.jpg | 3 + .../parsed_pdf_info/table-79-56.jpg | 3 + .../parsed_pdf_info/table-80-57.jpg | 3 + .../parsed_pdf_info/table-81-58.jpg | 3 + .../parsed_pdf_info/table-82-59.jpg | 3 + .../parsed_pdf_info/table-83-60.jpg | 3 + .../parsed_pdf_info/table-85-61.jpg | 3 + .../parsed_pdf_info/table-95-62.jpg | 3 + 72 files changed, 457 insertions(+), 4 deletions(-) create mode 100644 notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb create mode 100644 notebook/agentchat_pdf_rag/input_files/nvidia_10k_2024.pdf create mode 100644 notebook/agentchat_pdf_rag/parsed_elements.json create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/figure-1-1.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/figure-33-2.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/figure-92-3.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/figure-93-4.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/figure-94-5.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/figure-95-6.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-12-2.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-2-1.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-32-3.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-33-4.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-36-5.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-6.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-7.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-8.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-40-9.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-41-10.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-11.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-12.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-43-13.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-47-14.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-50-15.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-51-16.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-17.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-18.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-53-19.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-54-20.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-60-21.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-22.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-23.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-24.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-62-25.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-26.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-27.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-28.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-29.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-30.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-31.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-32.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-33.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-34.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-67-35.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-36.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-37.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-38.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-39.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-40.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-41.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-42.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-43.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-71-44.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-72-45.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-46.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-47.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-48.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-49.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-50.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-76-51.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-77-52.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-78-53.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-54.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-55.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-56.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-80-57.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-81-58.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-82-59.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-83-60.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-85-61.jpg create mode 100644 notebook/agentchat_pdf_rag/parsed_pdf_info/table-95-62.jpg diff --git a/.gitattributes b/.gitattributes index 513c7ecbf0..b417b197fc 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,10 +33,8 @@ *.tsx text *.xml text *.xhtml text diff=html - # Docker Dockerfile text eol=lf - # Documentation *.ipynb text *.markdown text diff=markdown eol=lf @@ -62,7 +60,6 @@ NEWS text eol=lf readme text eol=lf *README* text eol=lf TODO text - # Configs *.cnf text eol=lf *.conf text eol=lf @@ -84,8 +81,9 @@ yarn.lock text -diff browserslist text Makefile text eol=lf makefile text eol=lf - # Images *.png filter=lfs diff=lfs merge=lfs -text *.jpg filter=lfs diff=lfs merge=lfs -text *.jpeg filter=lfs diff=lfs merge=lfs -text +notebook/agentchat_pdf_rag/parsed_elements.json filter=lfs diff=lfs merge=lfs -text +notebook/agentchat_pdf_rag/input_files/nvidia_10k_2024.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb b/notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb new file mode 100644 index 0000000000..5a8c010d1c --- /dev/null +++ b/notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb @@ -0,0 +1,248 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Defaulting to user installation because normal site-packages is not writeable\n", + "Requirement already satisfied: unstructured==0.16.11 in /home/autogen/.local/lib/python3.11/site-packages (0.16.11)\n", + "Requirement already satisfied: pi-heif==0.21.0 in /home/autogen/.local/lib/python3.11/site-packages (0.21.0)\n", + "Requirement already satisfied: unstructured_inference==0.8.1 in /home/autogen/.local/lib/python3.11/site-packages (0.8.1)\n", + "Requirement already satisfied: unstructured.pytesseract==0.3.13 in /home/autogen/.local/lib/python3.11/site-packages (0.3.13)\n", + "Collecting pytesseract==0.3.13\n", + " Downloading pytesseract-0.3.13-py3-none-any.whl.metadata (11 kB)\n", + "Requirement already satisfied: chardet in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (5.2.0)\n", + "Requirement already satisfied: filetype in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.2.0)\n", + "Requirement already satisfied: python-magic in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.4.27)\n", + "Requirement already satisfied: lxml in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (5.3.0)\n", + "Requirement already satisfied: nltk in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (3.9.1)\n", + "Requirement already satisfied: requests in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (2.32.3)\n", + "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (4.12.3)\n", + "Requirement already satisfied: emoji in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (2.14.0)\n", + "Requirement already satisfied: dataclasses-json in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.6.7)\n", + "Requirement already satisfied: python-iso639 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (2024.10.22)\n", + "Requirement already satisfied: langdetect in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.0.9)\n", + "Requirement already satisfied: numpy<2 in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.26.4)\n", + "Requirement already satisfied: rapidfuzz in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (3.11.0)\n", + "Requirement already satisfied: backoff in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (2.2.1)\n", + "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (4.12.2)\n", + "Requirement already satisfied: unstructured-client in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.28.1)\n", + "Requirement already satisfied: wrapt in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.17.0)\n", + "Requirement already satisfied: tqdm in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (4.67.1)\n", + "Requirement already satisfied: psutil in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (6.1.0)\n", + "Requirement already satisfied: python-oxmsg in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.0.1)\n", + "Requirement already satisfied: html5lib in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.1)\n", + "Requirement already satisfied: pillow>=10.1.0 in /home/autogen/.local/lib/python3.11/site-packages (from pi-heif==0.21.0) (10.4.0)\n", + "Requirement already satisfied: layoutparser in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (0.3.4)\n", + "Requirement already satisfied: python-multipart in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (0.0.20)\n", + "Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (0.26.2)\n", + "Requirement already satisfied: opencv-python!=4.7.0.68 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (4.10.0.84)\n", + "Requirement already satisfied: onnx in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (1.17.0)\n", + "Requirement already satisfied: onnxruntime>=1.17.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (1.19.2)\n", + "Requirement already satisfied: matplotlib in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (3.9.2)\n", + "Requirement already satisfied: torch in /usr/local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (2.5.1)\n", + "Requirement already satisfied: timm in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (1.0.12)\n", + "Requirement already satisfied: transformers>=4.25.1 in /usr/local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (4.46.3)\n", + "Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.11/site-packages (from unstructured.pytesseract==0.3.13) (24.2)\n", + "Requirement already satisfied: coloredlogs in /usr/local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (15.0.1)\n", + "Requirement already satisfied: flatbuffers in /usr/local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (24.3.25)\n", + "Requirement already satisfied: protobuf in /home/autogen/.local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (5.29.1)\n", + "Requirement already satisfied: sympy in /usr/local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (1.13.1)\n", + "Requirement already satisfied: filelock in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (3.16.1)\n", + "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (6.0.2)\n", + "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (2024.11.6)\n", + "Requirement already satisfied: tokenizers<0.21,>=0.20 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (0.20.3)\n", + "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (0.4.5)\n", + "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/site-packages (from huggingface-hub->unstructured_inference==0.8.1) (2024.10.0)\n", + "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.11/site-packages (from beautifulsoup4->unstructured==0.16.11) (2.6)\n", + "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /home/autogen/.local/lib/python3.11/site-packages (from dataclasses-json->unstructured==0.16.11) (3.23.1)\n", + "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /home/autogen/.local/lib/python3.11/site-packages (from dataclasses-json->unstructured==0.16.11) (0.9.0)\n", + "Requirement already satisfied: six>=1.9 in /usr/local/lib/python3.11/site-packages (from html5lib->unstructured==0.16.11) (1.16.0)\n", + "Requirement already satisfied: webencodings in /usr/local/lib/python3.11/site-packages (from html5lib->unstructured==0.16.11) (0.5.1)\n", + "Requirement already satisfied: scipy in /usr/local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (1.14.1)\n", + "Requirement already satisfied: pandas in /usr/local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (2.2.3)\n", + "Requirement already satisfied: iopath in /home/autogen/.local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (0.1.10)\n", + "Requirement already satisfied: pdfplumber in /home/autogen/.local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (0.11.5)\n", + "Requirement already satisfied: pdf2image in /home/autogen/.local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (1.17.0)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (1.3.1)\n", + "Requirement already satisfied: cycler>=0.10 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (0.12.1)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (4.55.0)\n", + "Requirement already satisfied: kiwisolver>=1.3.1 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (1.4.7)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (3.2.0)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (2.9.0.post0)\n", + "Requirement already satisfied: click in /usr/local/lib/python3.11/site-packages (from nltk->unstructured==0.16.11) (8.1.7)\n", + "Requirement already satisfied: joblib in /usr/local/lib/python3.11/site-packages (from nltk->unstructured==0.16.11) (1.4.2)\n", + "Requirement already satisfied: olefile in /home/autogen/.local/lib/python3.11/site-packages (from python-oxmsg->unstructured==0.16.11) (0.47)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (3.4.0)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (3.10)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (2.2.3)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (2024.8.30)\n", + "Requirement already satisfied: torchvision in /home/autogen/.local/lib/python3.11/site-packages (from timm->unstructured_inference==0.8.1) (0.20.1)\n", + "Requirement already satisfied: networkx in /usr/local/lib/python3.11/site-packages (from torch->unstructured_inference==0.8.1) (3.4.2)\n", + "Requirement already satisfied: jinja2 in /usr/local/lib/python3.11/site-packages (from torch->unstructured_inference==0.8.1) (3.1.4)\n", + "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/site-packages (from sympy->onnxruntime>=1.17.0->unstructured_inference==0.8.1) (1.3.0)\n", + "Requirement already satisfied: aiofiles>=24.1.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (24.1.0)\n", + "Requirement already satisfied: cryptography>=3.1 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (44.0.0)\n", + "Requirement already satisfied: eval-type-backport<0.3.0,>=0.2.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (0.2.0)\n", + "Requirement already satisfied: httpx>=0.27.0 in /usr/local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (0.27.2)\n", + "Requirement already satisfied: jsonpath-python<2.0.0,>=1.0.6 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (1.0.6)\n", + "Requirement already satisfied: nest-asyncio>=1.6.0 in /usr/local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (1.6.0)\n", + "Requirement already satisfied: pydantic<2.10.0,>=2.9.2 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (2.9.2)\n", + "Requirement already satisfied: pypdf>=4.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (4.3.1)\n", + "Requirement already satisfied: requests-toolbelt>=1.0.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (1.0.0)\n", + "Requirement already satisfied: cffi>=1.12 in /home/autogen/.local/lib/python3.11/site-packages (from cryptography>=3.1->unstructured-client->unstructured==0.16.11) (1.17.1)\n", + "Requirement already satisfied: anyio in /usr/local/lib/python3.11/site-packages (from httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (4.6.2.post1)\n", + "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/site-packages (from httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (1.0.7)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.11/site-packages (from httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.11/site-packages (from httpcore==1.*->httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (0.14.0)\n", + "Requirement already satisfied: annotated-types>=0.6.0 in /home/autogen/.local/lib/python3.11/site-packages (from pydantic<2.10.0,>=2.9.2->unstructured-client->unstructured==0.16.11) (0.7.0)\n", + "Requirement already satisfied: pydantic-core==2.23.4 in /home/autogen/.local/lib/python3.11/site-packages (from pydantic<2.10.0,>=2.9.2->unstructured-client->unstructured==0.16.11) (2.23.4)\n", + "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.11/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json->unstructured==0.16.11) (1.0.0)\n", + "Requirement already satisfied: humanfriendly>=9.1 in /usr/local/lib/python3.11/site-packages (from coloredlogs->onnxruntime>=1.17.0->unstructured_inference==0.8.1) (10.0)\n", + "Requirement already satisfied: portalocker in /home/autogen/.local/lib/python3.11/site-packages (from iopath->layoutparser->unstructured_inference==0.8.1) (2.10.1)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.11/site-packages (from jinja2->torch->unstructured_inference==0.8.1) (3.0.2)\n", + "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/site-packages (from pandas->layoutparser->unstructured_inference==0.8.1) (2024.2)\n", + "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/site-packages (from pandas->layoutparser->unstructured_inference==0.8.1) (2024.2)\n", + "Requirement already satisfied: pdfminer.six==20231228 in /home/autogen/.local/lib/python3.11/site-packages (from pdfplumber->layoutparser->unstructured_inference==0.8.1) (20231228)\n", + "Requirement already satisfied: pypdfium2>=4.18.0 in /home/autogen/.local/lib/python3.11/site-packages (from pdfplumber->layoutparser->unstructured_inference==0.8.1) (4.30.1)\n", + "Requirement already satisfied: pycparser in /home/autogen/.local/lib/python3.11/site-packages (from cffi>=1.12->cryptography>=3.1->unstructured-client->unstructured==0.16.11) (2.22)\n", + "Downloading pytesseract-0.3.13-py3-none-any.whl (14 kB)\n", + "Installing collected packages: pytesseract\n", + "Successfully installed pytesseract-0.3.13\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], + "source": [ + "# need to install Poppler https://pdf2image.readthedocs.io/en/latest/installation.html\n", + "# need to install Tesseract https://tesseract-ocr.github.io/tessdoc/Installation.html\n", + "%pip install unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "import autogen\n", + "\n", + "config_list = autogen.config_list_from_json(\"../OAI_CONFIG_LIST\")\n", + "os.environ[\"OPENAI_API_KEY\"] = config_list[0][\"api_key\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Use [Nvidia 2024 10-K](https://investor.nvidia.com/financial-info/sec-filings/sec-filings-details/default.aspx?FilingId=17293267) as an example ([file download link](https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "584f040446bc45958bf2c6f7bf0b90e2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "config.json: 0%| | 0.00/1.47k [00:00)+$6L2K57~`*^m%)-EdSe? zmONfYj+)Ko)aXS^iymo5ASNm+y9~iWOo=H#wbA-iY>bJJU~`VR5?Jq%fj2YwBGA8=~ literal 0 HcmV?d00001 diff --git a/notebook/agentchat_pdf_rag/parsed_elements.json b/notebook/agentchat_pdf_rag/parsed_elements.json new file mode 100644 index 0000000000..9d77d034c6 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_elements.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:439725d7c4cae3b10dc6aec6203e9c0b08407c38375abcbca60067301ab32d55 +size 2322533 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-1-1.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-1-1.jpg new file mode 100644 index 0000000000..5bfd432354 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-1-1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b60a18030bb7a01079ad8dd3ae662c431f6ce686db7fbf1380031acebc93d0a +size 2145 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-33-2.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-33-2.jpg new file mode 100644 index 0000000000..6d89302884 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-33-2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:442defe14cb733e85cf7a821cbec2d20f559b3c603cc3c8bec329c9fe4d8f6d9 +size 69750 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-92-3.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-92-3.jpg new file mode 100644 index 0000000000..9c8646db5a --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-92-3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d137771b5d03ba4715a8e3c0d128988e0ad0a5cef5dcbe4d940b5b3c3a32a8d +size 5566 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-93-4.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-93-4.jpg new file mode 100644 index 0000000000..aa5e0f897a --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-93-4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:529984bfdfd9836b0142291207909d4cd01f7c97f201a6a3dfc88257e1c311db +size 5397 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-94-5.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-94-5.jpg new file mode 100644 index 0000000000..eacde13f01 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-94-5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf16c57b061b039c8e9930efa11fdeb565110ce91fa1e9cb55e5b2e1996638ca +size 5200 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-95-6.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-95-6.jpg new file mode 100644 index 0000000000..1921a22507 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/figure-95-6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1c93fe1144bc0d163f8dcea0551892f114e2ff68ad2538ed6aa1cee8cce3a60 +size 5364 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-12-2.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-12-2.jpg new file mode 100644 index 0000000000..6eed50cf0b --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-12-2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74cd46f89df486b07553ca7eb3bef9a87fe431c96b1b11e0977fa815270735f0 +size 42660 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-2-1.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-2-1.jpg new file mode 100644 index 0000000000..05f34f1e52 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-2-1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de8520ec58bc6c472aa6f910e8ad0a72de01baedadaa43dfa4652bb059dcec9f +size 189286 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-32-3.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-32-3.jpg new file mode 100644 index 0000000000..948d68f0a8 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-32-3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b634d9b4b4921f85e62f0473237192e65a241dd4df4305caf417da3b80a1e861 +size 62089 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-33-4.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-33-4.jpg new file mode 100644 index 0000000000..bb2d8eec9d --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-33-4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc27fe4b5af14fd610c6ec93156993f0f5330e19624fb1f81ecab99309518ce6 +size 32682 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-36-5.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-36-5.jpg new file mode 100644 index 0000000000..3697eee0ff --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-36-5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ced809bb969f7605e49ccdbdb3a79901bea5a9a201035251a1c39adf7cd4df8 +size 54461 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-6.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-6.jpg new file mode 100644 index 0000000000..f2797e1276 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a13d2574a49df5d346e80b5066fecdb0c6378888a691204ef976f9d56397d0c +size 83482 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-7.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-7.jpg new file mode 100644 index 0000000000..08e35caaac --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd739a1c862e65db4e5c375519184e3634f3fc12094649f296e3be0ac0079ec5 +size 40082 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-8.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-8.jpg new file mode 100644 index 0000000000..6a6e4020a2 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-39-8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42845bdd91bac5198e80b84697a284d7dc7f507427b197bf47390e40731783a0 +size 46386 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-40-9.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-40-9.jpg new file mode 100644 index 0000000000..4c269157c8 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-40-9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9cc8f53b64555ca5eb51701e3fb3b6a60d6db589a463ed0a52ae5d9bf98e371 +size 68682 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-41-10.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-41-10.jpg new file mode 100644 index 0000000000..d8ae96f21d --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-41-10.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30992b3f47a4305e23ba46c7992a8c7620006a312ea724458284427150d2dae3 +size 39630 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-11.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-11.jpg new file mode 100644 index 0000000000..3345dceb56 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b59d455a2329f125ae170731b6847fe2b7a88f29e9032493ce0535c04cd85ca +size 28007 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-12.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-12.jpg new file mode 100644 index 0000000000..3b35ff1ff6 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-42-12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c507d4e0df2605769f297c9e2fdd91ec2aafb9a8385297cedff48d3f4d45349a +size 35733 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-43-13.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-43-13.jpg new file mode 100644 index 0000000000..932a160da7 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-43-13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c45ccc4af87c41dc9572729c2b5995d6540f651415f37d3bd62a0643cb32b0f +size 44445 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-47-14.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-47-14.jpg new file mode 100644 index 0000000000..94fb72d0ef --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-47-14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:563a30606b8dd01ee22e0ea9ecd8d4bdf22913b7585320f339acbe290af4f7b9 +size 142237 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-50-15.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-50-15.jpg new file mode 100644 index 0000000000..62dff895a7 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-50-15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e498696df863256c4c65783422d5476282375a7594e78675c8dc836b05677448 +size 139375 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-51-16.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-51-16.jpg new file mode 100644 index 0000000000..c2ea65f2a2 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-51-16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c21dbe5bb978e846e0ecffc1dc9d76cbd805bb8da6b6525d49dce9868bf614a +size 102190 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-17.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-17.jpg new file mode 100644 index 0000000000..245cf166d8 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f39216a5c51643583d9a4f027ee7cd7b01829372aaec539e29441ab677994a55 +size 138826 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-18.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-18.jpg new file mode 100644 index 0000000000..2940359e02 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-52-18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8e4c906a1a925e1fdb14c06e0ac7ecb8246fa2a0bc981a47e3105cae2767385 +size 63739 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-53-19.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-53-19.jpg new file mode 100644 index 0000000000..36d3862996 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-53-19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e56e1d862f3e84238df2ad0b4d45c0924128149eb88ce470ad53ed555259cd75 +size 183427 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-54-20.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-54-20.jpg new file mode 100644 index 0000000000..36fe781073 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-54-20.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ddde44818844e984ebd200e7c6fe09d045b2baa3819726010c19eb14cbdf2a5f +size 303686 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-60-21.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-60-21.jpg new file mode 100644 index 0000000000..084d6cd46d --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-60-21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5f4bdfb7e9626f95019ec3ddd1f46450ae54d123c50d661d93e36f61c9c3c10 +size 46261 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-22.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-22.jpg new file mode 100644 index 0000000000..732d85f483 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-22.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7c426680e5fa4dd56d90eaf5d0b0545dc6036dd49b3391293cdb84cf8034e70 +size 38499 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-23.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-23.jpg new file mode 100644 index 0000000000..31b2294589 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-23.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6e18c0496cf3948b13ae5d910c49d30b5af1bd0987760cb3b9feedce8d8e713 +size 35416 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-24.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-24.jpg new file mode 100644 index 0000000000..c2e661176c --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-61-24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:642ac19c86c63b9c31ffb04f8e416dcebce5b1ba79b628ae32d35e48b826f1ed +size 64583 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-62-25.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-62-25.jpg new file mode 100644 index 0000000000..3af3a5cabf --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-62-25.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e62ea4ba74a3e85135baefb2eced2b8b7e23dfd22c62ab156ee8c8423dfbe63 +size 41601 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-26.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-26.jpg new file mode 100644 index 0000000000..bc34c7a277 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f68b51a527740cecc7dfd4fbf9e9ba82405f7df361425aed7bee9f7f045cc00 +size 55318 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-27.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-27.jpg new file mode 100644 index 0000000000..952e53a326 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-63-27.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d45b21c0594d8f463e0e44aef25af7e744e95718991fb11f96506f029ff2dfe6 +size 78562 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-28.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-28.jpg new file mode 100644 index 0000000000..76d24798f7 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-28.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:906a491e9032a523892afae9e9f5fc69bff604f2fa801a97007c863c8ff5aae5 +size 64014 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-29.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-29.jpg new file mode 100644 index 0000000000..6fe15ffe7e --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-64-29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a549eaf6b28d04e866c72ee053eda033978c26665f4ecf4f190e3665d3a7a0de +size 29749 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-30.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-30.jpg new file mode 100644 index 0000000000..fddc072f74 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-30.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:883ab7f4e489106c38b32c094fdf4ca31175fe2f918261d0ff6cec49bc947d29 +size 85531 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-31.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-31.jpg new file mode 100644 index 0000000000..6ffa0d0887 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-65-31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a2f6ab861fc3a1995d513dbc13d98644f2c3406c36ab9a7ff336960a1551be4 +size 77384 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-32.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-32.jpg new file mode 100644 index 0000000000..ffdf160e64 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:833d8b3b852d2b2d145916ebbbee5fa1e791eaff99ba52c9b90b9d69789a30f5 +size 74378 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-33.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-33.jpg new file mode 100644 index 0000000000..cbe4fcc428 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc04f5a0d4aae0f711a0b530d92af7d89adc69f517b3cd27fd73624f3720fca7 +size 73124 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-34.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-34.jpg new file mode 100644 index 0000000000..c1ff302f47 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-66-34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be81bf660c87ee3cf6736797b82e475231dfd577bf405b490b8c618eb1bfe88d +size 43613 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-67-35.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-67-35.jpg new file mode 100644 index 0000000000..7e28565273 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-67-35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbe703c4a52c8d717ffc5f49a10f221b9aba46ec53a82f06c20c1aabdc8de8aa +size 131663 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-36.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-36.jpg new file mode 100644 index 0000000000..b6aced9a5d --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81f4561d6f7da14a58df8ea7ec81af66c1d24a3c4b26d602af5a221f15664b82 +size 40822 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-37.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-37.jpg new file mode 100644 index 0000000000..0865736d75 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2192eaa49a0b9c9aeac180598a6137b723e06a9a87c890ae6af33d9c4cf0022 +size 18702 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-38.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-38.jpg new file mode 100644 index 0000000000..cd36ebb15b --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-68-38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a3e7f97120b89ecf399e433a67dc2928706c89b05e0c1450381fbf81d4e5f96 +size 30398 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-39.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-39.jpg new file mode 100644 index 0000000000..3cc9f2f391 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-39.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73c69d7edf6614b28f5335a9156f63d4e4420edf536874039cf788426d33cbe0 +size 61561 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-40.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-40.jpg new file mode 100644 index 0000000000..1a9cf1fddd --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-69-40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4a257651a15d3d7aa1dee120dbb3461210f49b0e2b5ea40b1b404223c5ec06f +size 35857 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-41.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-41.jpg new file mode 100644 index 0000000000..2de7c908f7 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfab13b9565d292b821f35cd62a7dd0df1fcdae681c48d6aafaa265931f64338 +size 74040 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-42.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-42.jpg new file mode 100644 index 0000000000..1b23e53bab --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f676741f3c619861a8c7b37c6448c66ea9e3adcd61c0cd2125cc004ec2faae70 +size 38337 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-43.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-43.jpg new file mode 100644 index 0000000000..eb1a3ffb24 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-70-43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d36bda4731a9927506fde1f5e1cff3d09bef4b5353b0b71e264705d2d64ee61f +size 35349 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-71-44.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-71-44.jpg new file mode 100644 index 0000000000..b25fdc8524 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-71-44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4014e10cbec3bf345cd3a62198e07b35dc88bcac9a2808779ab13128f5d23c23 +size 20683 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-72-45.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-72-45.jpg new file mode 100644 index 0000000000..b459853e13 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-72-45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:642be8df0f925dc484d8b3356720635230afaedaba7d07ae46170de27014d2c7 +size 94505 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-46.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-46.jpg new file mode 100644 index 0000000000..fe40d57c53 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-46.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89c40584884d7b3b72f0104279d2e06d5ba5198356daba85ed5ad8d2dc8c2409 +size 28198 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-47.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-47.jpg new file mode 100644 index 0000000000..df1f009df1 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-73-47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05caa76fd824ff956d5749dacfa635bbfc01758c47ac95477a1f9d1cffede277 +size 38362 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-48.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-48.jpg new file mode 100644 index 0000000000..24148cd78d --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-48.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bc9e7b6f97fb9f05a670e73b2b69cb1785a7cc7beee008de3ff5cce43a46be6 +size 62731 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-49.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-49.jpg new file mode 100644 index 0000000000..c995ca766a --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a30d066ca7d6a67b3bed4f8a140db099d3f716d865293c96ad8daf0e0e0ba277 +size 28709 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-50.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-50.jpg new file mode 100644 index 0000000000..50e54ec7c6 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-75-50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a4f14977f23284199170a7b3d3188bcd42110e1aa402b2df616985d76baf949 +size 107963 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-76-51.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-76-51.jpg new file mode 100644 index 0000000000..1a68cbd88c --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-76-51.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a27d0ad965c5564a283428340135a28393ee68cf986c1757aee566117982548 +size 118556 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-77-52.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-77-52.jpg new file mode 100644 index 0000000000..fbfc7ae9b4 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-77-52.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:735be3e47f6430963cc3098cbfe5bc6525def440b549ac49fe461f9570dbe0ac +size 54658 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-78-53.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-78-53.jpg new file mode 100644 index 0000000000..f4a252d042 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-78-53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96348a72c7c14c5937cf43235554ae8efd98a3b6b0409e4ab851d8435c68ee07 +size 70330 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-54.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-54.jpg new file mode 100644 index 0000000000..5510a9056e --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:884ec5ee6effbb173e98921b1a23205a8f7b9d6808211e9f483fb1c363e95282 +size 70884 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-55.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-55.jpg new file mode 100644 index 0000000000..f5f62e7189 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76a050023989f88960ba98441333decd3c91a18450597daaaae4cfb27d52a407 +size 46317 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-56.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-56.jpg new file mode 100644 index 0000000000..a8a21cbb02 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-79-56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1daca4ca5ffd3bddfb5a50ed4e1b822ed7f9369e18b3a4c9cdf391e80c6c6249 +size 47247 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-80-57.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-80-57.jpg new file mode 100644 index 0000000000..c77f552e4e --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-80-57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1d69beefaf1c0117413fa53b7b9b15feb4efc12486d46f40776ac9975d2757f +size 31572 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-81-58.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-81-58.jpg new file mode 100644 index 0000000000..47dbb244bb --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-81-58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9de4eee046ea5afca8d9cb5585c19e919e10b3e3e7ea2d5a53dc94b3b22057f5 +size 90702 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-82-59.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-82-59.jpg new file mode 100644 index 0000000000..725a1c145c --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-82-59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:072d31d5dd81bb5f15a7e49b582da4f2a6b841869d6666da7781e09390a4b420 +size 354183 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-83-60.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-83-60.jpg new file mode 100644 index 0000000000..f2b6984458 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-83-60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54a96220a68e08e4a61d6b8b15d85092c61bb95499ed963c7db3445508fd1e0d +size 102751 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-85-61.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-85-61.jpg new file mode 100644 index 0000000000..513648aae9 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-85-61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ceb71467ed58ab3ba9605d1445242ede96ba5f555d41cc35840bdf5323564116 +size 172564 diff --git a/notebook/agentchat_pdf_rag/parsed_pdf_info/table-95-62.jpg b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-95-62.jpg new file mode 100644 index 0000000000..01a7128677 --- /dev/null +++ b/notebook/agentchat_pdf_rag/parsed_pdf_info/table-95-62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49c6b8d938863a47dad6c9fcb8d62465ab99644d6432f5a49221c459064e3894 +size 433728 From d96e52df2efcc24ffeec3e4717a805cd0915809b Mon Sep 17 00:00:00 2001 From: AgentGenie Date: Tue, 7 Jan 2025 16:49:10 -0800 Subject: [PATCH 02/61] Create tabular_data_rag_workflow with groupchat --- .gitattributes | 1 + .../agentchat_rag_workflow.ipynb | 248 -------- .../agentchat_pdf_rag/parsed_elements.json | 4 +- .../agentchat_pdf_rag/processed_elements.json | 3 + .../agentchat_pdf_rag/sample_elements.json | 17 + .../agentchat_tabular_data_rag_workflow.ipynb | 541 ++++++++++++++++++ setup.py | 1 + 7 files changed, 565 insertions(+), 250 deletions(-) delete mode 100644 notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb create mode 100644 notebook/agentchat_pdf_rag/processed_elements.json create mode 100644 notebook/agentchat_pdf_rag/sample_elements.json create mode 100644 notebook/agentchat_tabular_data_rag_workflow.ipynb diff --git a/.gitattributes b/.gitattributes index b417b197fc..3adb203207 100644 --- a/.gitattributes +++ b/.gitattributes @@ -87,3 +87,4 @@ makefile text eol=lf *.jpeg filter=lfs diff=lfs merge=lfs -text notebook/agentchat_pdf_rag/parsed_elements.json filter=lfs diff=lfs merge=lfs -text notebook/agentchat_pdf_rag/input_files/nvidia_10k_2024.pdf filter=lfs diff=lfs merge=lfs -text +notebook/agentchat_pdf_rag/processed_elements.json filter=lfs diff=lfs merge=lfs -text diff --git a/notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb b/notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb deleted file mode 100644 index 5a8c010d1c..0000000000 --- a/notebook/agentchat_pdf_rag/agentchat_rag_workflow.ipynb +++ /dev/null @@ -1,248 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Defaulting to user installation because normal site-packages is not writeable\n", - "Requirement already satisfied: unstructured==0.16.11 in /home/autogen/.local/lib/python3.11/site-packages (0.16.11)\n", - "Requirement already satisfied: pi-heif==0.21.0 in /home/autogen/.local/lib/python3.11/site-packages (0.21.0)\n", - "Requirement already satisfied: unstructured_inference==0.8.1 in /home/autogen/.local/lib/python3.11/site-packages (0.8.1)\n", - "Requirement already satisfied: unstructured.pytesseract==0.3.13 in /home/autogen/.local/lib/python3.11/site-packages (0.3.13)\n", - "Collecting pytesseract==0.3.13\n", - " Downloading pytesseract-0.3.13-py3-none-any.whl.metadata (11 kB)\n", - "Requirement already satisfied: chardet in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (5.2.0)\n", - "Requirement already satisfied: filetype in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.2.0)\n", - "Requirement already satisfied: python-magic in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.4.27)\n", - "Requirement already satisfied: lxml in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (5.3.0)\n", - "Requirement already satisfied: nltk in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (3.9.1)\n", - "Requirement already satisfied: requests in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (2.32.3)\n", - "Requirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (4.12.3)\n", - "Requirement already satisfied: emoji in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (2.14.0)\n", - "Requirement already satisfied: dataclasses-json in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.6.7)\n", - "Requirement already satisfied: python-iso639 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (2024.10.22)\n", - "Requirement already satisfied: langdetect in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.0.9)\n", - "Requirement already satisfied: numpy<2 in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.26.4)\n", - "Requirement already satisfied: rapidfuzz in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (3.11.0)\n", - "Requirement already satisfied: backoff in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (2.2.1)\n", - "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (4.12.2)\n", - "Requirement already satisfied: unstructured-client in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.28.1)\n", - "Requirement already satisfied: wrapt in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.17.0)\n", - "Requirement already satisfied: tqdm in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (4.67.1)\n", - "Requirement already satisfied: psutil in /usr/local/lib/python3.11/site-packages (from unstructured==0.16.11) (6.1.0)\n", - "Requirement already satisfied: python-oxmsg in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (0.0.1)\n", - "Requirement already satisfied: html5lib in /home/autogen/.local/lib/python3.11/site-packages (from unstructured==0.16.11) (1.1)\n", - "Requirement already satisfied: pillow>=10.1.0 in /home/autogen/.local/lib/python3.11/site-packages (from pi-heif==0.21.0) (10.4.0)\n", - "Requirement already satisfied: layoutparser in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (0.3.4)\n", - "Requirement already satisfied: python-multipart in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (0.0.20)\n", - "Requirement already satisfied: huggingface-hub in /usr/local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (0.26.2)\n", - "Requirement already satisfied: opencv-python!=4.7.0.68 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (4.10.0.84)\n", - "Requirement already satisfied: onnx in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (1.17.0)\n", - "Requirement already satisfied: onnxruntime>=1.17.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (1.19.2)\n", - "Requirement already satisfied: matplotlib in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (3.9.2)\n", - "Requirement already satisfied: torch in /usr/local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (2.5.1)\n", - "Requirement already satisfied: timm in /home/autogen/.local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (1.0.12)\n", - "Requirement already satisfied: transformers>=4.25.1 in /usr/local/lib/python3.11/site-packages (from unstructured_inference==0.8.1) (4.46.3)\n", - "Requirement already satisfied: packaging>=21.3 in /usr/local/lib/python3.11/site-packages (from unstructured.pytesseract==0.3.13) (24.2)\n", - "Requirement already satisfied: coloredlogs in /usr/local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (15.0.1)\n", - "Requirement already satisfied: flatbuffers in /usr/local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (24.3.25)\n", - "Requirement already satisfied: protobuf in /home/autogen/.local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (5.29.1)\n", - "Requirement already satisfied: sympy in /usr/local/lib/python3.11/site-packages (from onnxruntime>=1.17.0->unstructured_inference==0.8.1) (1.13.1)\n", - "Requirement already satisfied: filelock in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (3.16.1)\n", - "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (6.0.2)\n", - "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (2024.11.6)\n", - "Requirement already satisfied: tokenizers<0.21,>=0.20 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (0.20.3)\n", - "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.11/site-packages (from transformers>=4.25.1->unstructured_inference==0.8.1) (0.4.5)\n", - "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.11/site-packages (from huggingface-hub->unstructured_inference==0.8.1) (2024.10.0)\n", - "Requirement already satisfied: soupsieve>1.2 in /usr/local/lib/python3.11/site-packages (from beautifulsoup4->unstructured==0.16.11) (2.6)\n", - "Requirement already satisfied: marshmallow<4.0.0,>=3.18.0 in /home/autogen/.local/lib/python3.11/site-packages (from dataclasses-json->unstructured==0.16.11) (3.23.1)\n", - "Requirement already satisfied: typing-inspect<1,>=0.4.0 in /home/autogen/.local/lib/python3.11/site-packages (from dataclasses-json->unstructured==0.16.11) (0.9.0)\n", - "Requirement already satisfied: six>=1.9 in /usr/local/lib/python3.11/site-packages (from html5lib->unstructured==0.16.11) (1.16.0)\n", - "Requirement already satisfied: webencodings in /usr/local/lib/python3.11/site-packages (from html5lib->unstructured==0.16.11) (0.5.1)\n", - "Requirement already satisfied: scipy in /usr/local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (1.14.1)\n", - "Requirement already satisfied: pandas in /usr/local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (2.2.3)\n", - "Requirement already satisfied: iopath in /home/autogen/.local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (0.1.10)\n", - "Requirement already satisfied: pdfplumber in /home/autogen/.local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (0.11.5)\n", - "Requirement already satisfied: pdf2image in /home/autogen/.local/lib/python3.11/site-packages (from layoutparser->unstructured_inference==0.8.1) (1.17.0)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (1.3.1)\n", - "Requirement already satisfied: cycler>=0.10 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (0.12.1)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (4.55.0)\n", - "Requirement already satisfied: kiwisolver>=1.3.1 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (1.4.7)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /home/autogen/.local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (3.2.0)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.11/site-packages (from matplotlib->unstructured_inference==0.8.1) (2.9.0.post0)\n", - "Requirement already satisfied: click in /usr/local/lib/python3.11/site-packages (from nltk->unstructured==0.16.11) (8.1.7)\n", - "Requirement already satisfied: joblib in /usr/local/lib/python3.11/site-packages (from nltk->unstructured==0.16.11) (1.4.2)\n", - "Requirement already satisfied: olefile in /home/autogen/.local/lib/python3.11/site-packages (from python-oxmsg->unstructured==0.16.11) (0.47)\n", - "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (3.4.0)\n", - "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (3.10)\n", - "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (2.2.3)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/site-packages (from requests->unstructured==0.16.11) (2024.8.30)\n", - "Requirement already satisfied: torchvision in /home/autogen/.local/lib/python3.11/site-packages (from timm->unstructured_inference==0.8.1) (0.20.1)\n", - "Requirement already satisfied: networkx in /usr/local/lib/python3.11/site-packages (from torch->unstructured_inference==0.8.1) (3.4.2)\n", - "Requirement already satisfied: jinja2 in /usr/local/lib/python3.11/site-packages (from torch->unstructured_inference==0.8.1) (3.1.4)\n", - "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/site-packages (from sympy->onnxruntime>=1.17.0->unstructured_inference==0.8.1) (1.3.0)\n", - "Requirement already satisfied: aiofiles>=24.1.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (24.1.0)\n", - "Requirement already satisfied: cryptography>=3.1 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (44.0.0)\n", - "Requirement already satisfied: eval-type-backport<0.3.0,>=0.2.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (0.2.0)\n", - "Requirement already satisfied: httpx>=0.27.0 in /usr/local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (0.27.2)\n", - "Requirement already satisfied: jsonpath-python<2.0.0,>=1.0.6 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (1.0.6)\n", - "Requirement already satisfied: nest-asyncio>=1.6.0 in /usr/local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (1.6.0)\n", - "Requirement already satisfied: pydantic<2.10.0,>=2.9.2 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (2.9.2)\n", - "Requirement already satisfied: pypdf>=4.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (4.3.1)\n", - "Requirement already satisfied: requests-toolbelt>=1.0.0 in /home/autogen/.local/lib/python3.11/site-packages (from unstructured-client->unstructured==0.16.11) (1.0.0)\n", - "Requirement already satisfied: cffi>=1.12 in /home/autogen/.local/lib/python3.11/site-packages (from cryptography>=3.1->unstructured-client->unstructured==0.16.11) (1.17.1)\n", - "Requirement already satisfied: anyio in /usr/local/lib/python3.11/site-packages (from httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (4.6.2.post1)\n", - "Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/site-packages (from httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (1.0.7)\n", - "Requirement already satisfied: sniffio in /usr/local/lib/python3.11/site-packages (from httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (1.3.1)\n", - "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.11/site-packages (from httpcore==1.*->httpx>=0.27.0->unstructured-client->unstructured==0.16.11) (0.14.0)\n", - "Requirement already satisfied: annotated-types>=0.6.0 in /home/autogen/.local/lib/python3.11/site-packages (from pydantic<2.10.0,>=2.9.2->unstructured-client->unstructured==0.16.11) (0.7.0)\n", - "Requirement already satisfied: pydantic-core==2.23.4 in /home/autogen/.local/lib/python3.11/site-packages (from pydantic<2.10.0,>=2.9.2->unstructured-client->unstructured==0.16.11) (2.23.4)\n", - "Requirement already satisfied: mypy-extensions>=0.3.0 in /usr/local/lib/python3.11/site-packages (from typing-inspect<1,>=0.4.0->dataclasses-json->unstructured==0.16.11) (1.0.0)\n", - "Requirement already satisfied: humanfriendly>=9.1 in /usr/local/lib/python3.11/site-packages (from coloredlogs->onnxruntime>=1.17.0->unstructured_inference==0.8.1) (10.0)\n", - "Requirement already satisfied: portalocker in /home/autogen/.local/lib/python3.11/site-packages (from iopath->layoutparser->unstructured_inference==0.8.1) (2.10.1)\n", - "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.11/site-packages (from jinja2->torch->unstructured_inference==0.8.1) (3.0.2)\n", - "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/site-packages (from pandas->layoutparser->unstructured_inference==0.8.1) (2024.2)\n", - "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/site-packages (from pandas->layoutparser->unstructured_inference==0.8.1) (2024.2)\n", - "Requirement already satisfied: pdfminer.six==20231228 in /home/autogen/.local/lib/python3.11/site-packages (from pdfplumber->layoutparser->unstructured_inference==0.8.1) (20231228)\n", - "Requirement already satisfied: pypdfium2>=4.18.0 in /home/autogen/.local/lib/python3.11/site-packages (from pdfplumber->layoutparser->unstructured_inference==0.8.1) (4.30.1)\n", - "Requirement already satisfied: pycparser in /home/autogen/.local/lib/python3.11/site-packages (from cffi>=1.12->cryptography>=3.1->unstructured-client->unstructured==0.16.11) (2.22)\n", - "Downloading pytesseract-0.3.13-py3-none-any.whl (14 kB)\n", - "Installing collected packages: pytesseract\n", - "Successfully installed pytesseract-0.3.13\n", - "Note: you may need to restart the kernel to use updated packages.\n" - ] - } - ], - "source": [ - "# need to install Poppler https://pdf2image.readthedocs.io/en/latest/installation.html\n", - "# need to install Tesseract https://tesseract-ocr.github.io/tessdoc/Installation.html\n", - "%pip install unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "\n", - "import autogen\n", - "\n", - "config_list = autogen.config_list_from_json(\"../OAI_CONFIG_LIST\")\n", - "os.environ[\"OPENAI_API_KEY\"] = config_list[0][\"api_key\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Use [Nvidia 2024 10-K](https://investor.nvidia.com/financial-info/sec-filings/sec-filings-details/default.aspx?FilingId=17293267) as an example ([file download link](https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "584f040446bc45958bf2c6f7bf0b90e2", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "config.json: 0%| | 0.00/1.47k [00:00 0:\n", + " pre_data = file_elements[idx - 1].to_dict()\n", + " if pre_data[\"type\"] in text_types:\n", + " new_data[\"text\"] = pre_data[\"text\"] + new_data[\"text\"]\n", + " if idx < element_length - 1:\n", + " post_data = file_elements[idx + 1].to_dict()\n", + " if post_data[\"type\"] in text_types:\n", + " new_data[\"text\"] = new_data[\"text\"] + post_data[\"text\"]\n", + " output_elements.append(new_data)\n", + "\n", + "with open(\"proessed_elements.json\", \"w\", encoding=\"utf-8\") as file:\n", + " json.dump(output_elements, file, indent=4)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# IMPORTS\n", + "\n", + "# This is needed to allow nested asyncio calls for Neo4j in Jupyter\n", + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()\n", + "\n", + "from llama_index.embeddings.openai import OpenAIEmbedding\n", + "from llama_index.llms.openai import OpenAI\n", + "\n", + "from autogen import AssistantAgent, ConversableAgent, UserProxyAgent\n", + "\n", + "# load documents\n", + "from autogen.agentchat.contrib.graph_rag.document import Document, DocumentType\n", + "from autogen.agentchat.contrib.graph_rag.neo4j_graph_query_engine import Neo4jGraphQueryEngine\n", + "from autogen.agentchat.contrib.graph_rag.neo4j_graph_rag_capability import Neo4jGraphCapability\n", + "from autogen.agentchat.contrib.multimodal_conversable_agent import MultimodalConversableAgent" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create a knowledge graph with sample data\n", + "\n", + "To save time, we uses a small subset of the data for the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "22c02a975b784c5db13ea02163bd140a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Parsing nodes: 0%| | 0/1 [00:00.\".\n", + " For example, when you got message \"The image path for the table titled XYZ is \"./parsed_pdf_info/abcde\".\",\n", + " you will reply \"Please extract table from the following image and convert it to Markdown.\n", + " .\"\n", + " \"\"\",\n", + " llm_config=llm_config,\n", + " human_input_mode=\"NEVER\",\n", + ")\n", + "\n", + "image2table_convertor = MultimodalConversableAgent(\n", + " name=\"image2table_convertor\",\n", + " system_message=\"\"\"\n", + " You are an image to table convertor. You will receive an image path of a table. The original table could be in csv, pdf or other format.\n", + " You need to the following step in sequence,\n", + " 1. extract the table content and structure.\n", + " 2. Make sure the structure is complete.\n", + " 3. Correct typos in the text fields.\n", + " 4. In the end, output the table in Markdown.\n", + " \"\"\",\n", + " llm_config={\"config_list\": config_list, \"max_tokens\": 300},\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=1,\n", + ")\n", + "\n", + "conclusion = AssistantAgent(\n", + " name=\"conclusion\",\n", + " system_message=\"\"\"You are a helpful assistant.\n", + " Base on the history of the groupchat, answer the original question from User_proxy.\n", + " \"\"\",\n", + " llm_config=llm_config,\n", + " human_input_mode=\"NEVER\", # Never ask for human input.\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "What is goodwill asset (in millions) for 2024 in table NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: table_assistant\n", + "\u001b[0m\n", + "\u001b[33mtable_assistant\u001b[0m (to chat_manager):\n", + "\n", + "Find image_path for Table: NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: nvidia_rag\n", + "\u001b[0m\n", + "\u001b[33mnvidia_rag\u001b[0m (to chat_manager):\n", + "\n", + "The image path for the table titled \"NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets\" is \"./parsed_pdf_info/table-52-17.jpg\".\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: img_request_format\n", + "\u001b[0m\n", + "\u001b[33mimg_request_format\u001b[0m (to chat_manager):\n", + "\n", + "Please extract table from the following image and convert it to Markdown.\n", + ".\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: image2table_convertor\n", + "\u001b[0m\n", + "\u001b[33mimage2table_convertor\u001b[0m (to chat_manager):\n", + "\n", + "Here is the table extracted in Markdown format:\n", + "\n", + "```markdown\n", + "| | Jan 28, 2024 | Jan 29, 2023 |\n", + "|--------------------------------------|--------------|--------------|\n", + "| **Assets** | | |\n", + "| Current assets: | | |\n", + "|     Cash and cash equivalents | $7,280 | $3,389 |\n", + "|     Marketable securities | $18,704 | $9,907 |\n", + "|     Accounts receivable, net | $9,999 | $3,827 |\n", + "|     Inventories | $5,282 | $5,159 |\n", + "|     Prepaid expenses and other current assets | $3,080 | $791 |\n", + "| **Total current assets** | $44,345 | $23,073 |\n", + "| Property and equipment, net | $3,914 | $3,807 |\n", + "| Operating lease assets | $1,346 | $1,038 |\n", + "| **Goodwill** | $4,430 | $4,372 |\n", + "| Intangible assets, net | $1,112 | $1,676 |\n", + "| Deferred income tax assets | $6,081 | $3,396 |\n", + "| Other\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: conclusion\n", + "\u001b[0m\n", + "\u001b[33mconclusion\u001b[0m (to chat_manager):\n", + "\n", + "Based on the table \"NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets,\" the goodwill asset for the fiscal year ending January 28, 2024, is $4,430 million.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: User_proxy\n", + "\u001b[0m\n" + ] + } + ], + "source": [ + "groupchat = autogen.GroupChat(\n", + " agents=[\n", + " user_proxy,\n", + " table_assistant,\n", + " rag_agent,\n", + " img_request_format,\n", + " image2table_convertor,\n", + " conclusion,\n", + " ],\n", + " messages=[],\n", + " max_round=12,\n", + " speaker_selection_method=\"round_robin\",\n", + ")\n", + "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)\n", + "chat_result = user_proxy.initiate_chat(\n", + " manager,\n", + " message=\"What is goodwill asset (in millions) for 2024 in table NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets?\",\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.10" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/setup.py b/setup.py index ae4aadb5af..e56d522e93 100644 --- a/setup.py +++ b/setup.py @@ -82,6 +82,7 @@ "llama-index==0.12.5", "llama-index-graph-stores-neo4j==0.4.2", "llama-index-core==0.12.5", + "llama-index-readers-web==0.3.3", ] # used for agentchat_realtime_swarm notebook and realtime agent twilio demo From e32c81ff523d9569a3481e939ad5a1558ae7f819 Mon Sep 17 00:00:00 2001 From: AgentGenie Date: Tue, 7 Jan 2025 17:19:34 -0800 Subject: [PATCH 03/61] Update website notebook for agentchat_tabular_data_rag_workflow.ipynb --- notebook/agentchat_tabular_data_rag_workflow.ipynb | 14 +++++++++++--- website/mint.json | 3 ++- website/snippets/data/NotebooksMetadata.mdx | 11 +++++++++++ 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/notebook/agentchat_tabular_data_rag_workflow.ipynb b/notebook/agentchat_tabular_data_rag_workflow.ipynb index a0831009dc..32090e8d4d 100644 --- a/notebook/agentchat_tabular_data_rag_workflow.ipynb +++ b/notebook/agentchat_tabular_data_rag_workflow.ipynb @@ -22,9 +22,10 @@ "````{=mdx}\n", ":::info Requirements\n", "Unstructured-IO is a dependency for this notebook to parse the pdf. Please install the following dependencies\n", - "# Install Poppler https://pdf2image.readthedocs.io/en/latest/installation.html\n", - "# Install Tesseract https://tesseract-ocr.github.io/tessdoc/Installation.html\n", - "# %pip install unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13\n", + "\n", + "- Install Poppler https://pdf2image.readthedocs.io/en/latest/installation.html\n", + "- Install Tesseract https://tesseract-ocr.github.io/tessdoc/Installation.html\n", + "- pip install unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13\n", ":::\n", "````\n" ] @@ -518,6 +519,13 @@ } ], "metadata": { + "front_matter": { + "description": "Agentic RAG workflow on tabular data from a PDF file", + "tags": [ + "RAG", + "groupchat" + ] + }, "kernelspec": { "display_name": "Python 3", "language": "python", diff --git a/website/mint.json b/website/mint.json index 4415bd2d2a..acaf590e73 100644 --- a/website/mint.json +++ b/website/mint.json @@ -626,7 +626,8 @@ "notebooks/config_loader_utility_functions", "notebooks/gpt_assistant_agent_function_call", "notebooks/lats_search", - "notebooks/tools_interoperability" + "notebooks/tools_interoperability", + "notebooks/agentchat_tabular_data_rag_workflow" ] }, "notebooks/Gallery" diff --git a/website/snippets/data/NotebooksMetadata.mdx b/website/snippets/data/NotebooksMetadata.mdx index 7279592e2f..52c880a798 100644 --- a/website/snippets/data/NotebooksMetadata.mdx +++ b/website/snippets/data/NotebooksMetadata.mdx @@ -991,5 +991,16 @@ export const notebooksMetadata = [ "pydanticai" ], "source": "/notebook/tools_interoperability.ipynb" + }, + { + "title": "Agentic RAG workflow on tabular data from a PDF file", + "link": "/notebooks/agentchat_tabular_data_rag_workflow", + "description": "Agentic RAG workflow on tabular data from a PDF file", + "image": null, + "tags": [ + "RAG", + "groupchat" + ], + "source": "/notebook/agentchat_tabular_data_rag_workflow.ipynb" } ]; From 6c1ce541dfa7ccaa75e525e2079a742ebd5740a6 Mon Sep 17 00:00:00 2001 From: skzhang1 Date: Sat, 4 Jan 2025 18:27:33 -0500 Subject: [PATCH 04/61] update --- website/talks/2024-12-12/index.mdx | 13 +++++++++++ website/talks/2024-12-19/index.mdx | 13 +++++++++++ website/talks/future_talks/index.mdx | 34 ++++------------------------ 3 files changed, 31 insertions(+), 29 deletions(-) create mode 100644 website/talks/2024-12-12/index.mdx create mode 100644 website/talks/2024-12-19/index.mdx diff --git a/website/talks/2024-12-12/index.mdx b/website/talks/2024-12-12/index.mdx new file mode 100644 index 0000000000..4f607c3bdc --- /dev/null +++ b/website/talks/2024-12-12/index.mdx @@ -0,0 +1,13 @@ +--- +title: Make AI Agents Collaborate: Drag, Drop, and Orchestrate with Waldiez - Dec 12, 2024 +--- + +### Speakers: Panagiotis Kasnesis + +### Biography of the speakers: + +Panagiotis Kasnesis holds a Ph.D degree in computer science from the Department of Electrical and Computer Engineering at NTUA. He received his diploma degree in chemical engineering and his M.Sc. in techno-economic systems from NTUA, in 2008 and 2013 respectively. His research interests include Machine/Deep learning, Multi-Agent Systems and IoT, while he has published more than 50 scientific articles in international journals/conferences in these fields. He is founder and CEO of Waldiez (https://waldiez.io/), co-founder and CTO of ThinGenious and serves as a senior researcher at University of West Attica. Moreover, he is a lecturer at the MSc program “Artificial Intelligence and Deep Learning” and is certified as University Ambassador, by NVIDIA Deep Learning Institute (DLI), in the tasks of Building Transformer-Based NLP Applications, and Rapid Application Development Using LLMs. + +### Abstract: + +Current LLM-based orchestration tools often lack support for multi-agent interactions, are restricted to basic communication patterns, or only provide information after the entire workflow has completed. Waldiez is an open-source workflow tool that lets you orchestrate your LLM-agents using drag-and-drop and develop complex agentic applications. It is a low-code tool that assists you design and visualize your multi-agent workflow in jupyter lab as a plugin. Wadiez runs over AG2 supporting all the communication patterns (e.g., sequential, nested and group chat), supporting several LLM-based services offered by OpenAI, Anthropic, NVIDIA NIM, local hosted models and several others. In this talk, we’ll dive into the powerful features of Wadiez, demonstrating its capabilities through real-world use cases. Join us as we explore how Wadiez can streamline complex workflows and enhance multi-agent interactions, showcasing exactly what sets it apart from other LLM-based orchestration tools. diff --git a/website/talks/2024-12-19/index.mdx b/website/talks/2024-12-19/index.mdx new file mode 100644 index 0000000000..ba253b317d --- /dev/null +++ b/website/talks/2024-12-19/index.mdx @@ -0,0 +1,13 @@ +--- +title: Transforming CRM with Agents: The Journey to Ully.ai's Next-Gen ERP - Dec 19, 2024 +--- + +### Speakers: Bassil Khilo + +### Biography of the speakers: + +Bassil Khilo is the Founder & CEO of Ully.ai, an AI-powered CRM platform that automates lead research and outreach while offering full-cycle sales management tools. With a vision to redefine enterprise resource planning (ERP) systems, Bassil combines his experience in SaaS sales and entrepreneurship to build solutions that empower businesses to scale efficiently. Before founding Ully.ai, Bassil served as a Senior Account Executive at a globally renowned ERP company, where he gained hands-on experience in ERP and CRM solutions. His entrepreneurial journey includes founding ventures such as Maple Tyres, an e-commerce platform for tires in the UAE, and WarrenAI, a stock research platform that helps investors identify top-performing companies. + +### Abstract: + +In today's fast-paced digital landscape, businesses need tools that go beyond conventional CRM systems to manage their operations efficiently. At Ully.ai, we’ve built a powerful AI-driven CRM (soon evolving into a full ERP) that redefines how businesses research leads, engage with customers, and manage their sales cycle. Ully automates WhatsApp and email replies with AI Agents, enriches leads with deep insights, and personalizes outreach at scale. This talk explores the how Autogen’s Agents can streamline operations, improve customer engagement, and drive growth with RAG. diff --git a/website/talks/future_talks/index.mdx b/website/talks/future_talks/index.mdx index 65dad7c997..b047cb4d8d 100644 --- a/website/talks/future_talks/index.mdx +++ b/website/talks/future_talks/index.mdx @@ -2,37 +2,13 @@ title: Upcoming Talks --- -## Make AI Agents Collaborate: Drag, Drop, and Orchestrate with Waldiez - Dec 9, 2024 - -### Speakers: Panagiotis Kasnesis - -### Biography of the speakers: - -Panagiotis Kasnesis holds a Ph.D degree in computer science from the Department of Electrical and Computer Engineering at NTUA. He received his diploma degree in chemical engineering and his M.Sc. in techno-economic systems from NTUA, in 2008 and 2013 respectively. His research interests include Machine/Deep learning, Multi-Agent Systems and IoT, while he has published more than 50 scientific articles in international journals/conferences in these fields. He is founder and CEO of Waldiez (https://waldiez.io/), co-founder and CTO of ThinGenious and serves as a senior researcher at University of West Attica. Moreover, he is a lecturer at the MSc program “Artificial Intelligence and Deep Learning” (https://aidl.uniwa.gr/) and is certified as University Ambassador, by NVIDIA Deep Learning Institute (DLI), in the tasks of Building Transformer-Based NLP Applications, and Rapid Application Development Using LLMs. - -### Abstract: - -Current LLM-based orchestration tools often lack support for multi-agent interactions, are restricted to basic communication patterns, or only provide information after the entire workflow has completed. Waldiez is an open-source workflow tool that lets you orchestrate your LLM-agents using drag-and-drop and develop complex agentic applications. It is a low-code tool that assists you design and visualize your multi-agent workflow in jupyter lab as a plugin. Wadiez runs over AG2 supporting all the communication patterns (e.g., sequential, nested and group chat), supporting several LLM-based services offered by OpenAI, Anthropic, NVIDIA NIM, local hosted models and several others. In this talk, we’ll dive into the powerful features of Wadiez, demonstrating its capabilities through real-world use cases. Join us as we explore how Wadiez can streamline complex workflows and enhance multi-agent interactions, showcasing exactly what sets it apart from other LLM-based orchestration tools. - -### Sign Up: https://discord.gg/NrNP5ZAx?event=1308233315442098197 - -## Transforming CRM with Agents: The Journey to Ully.ai's Next-Gen ERP - Dec 16, 2024 - -### Speakers: Bassil Khilo - -### Biography of the speakers: - -Bassil Khilo is the Founder & CEO of Ully.ai, an AI-powered CRM platform that automates lead research and outreach while offering full-cycle sales management tools. With a vision to redefine enterprise resource planning (ERP) systems, Bassil combines his experience in SaaS sales and entrepreneurship to build solutions that empower businesses to scale efficiently. Before founding Ully.ai, Bassil served as a Senior Account Executive at a globally renowned ERP company, where he gained hands-on experience in ERP and CRM solutions. His entrepreneurial journey includes founding ventures such as Maple Tyres, an e-commerce platform for tires in the UAE, and WarrenAI, a stock research platform that helps investors identify top-performing companies. - -### Abstract: - -In today's fast-paced digital landscape, businesses need tools that go beyond conventional CRM systems to manage their operations efficiently. At Ully.ai, we’ve built a powerful AI-driven CRM (soon evolving into a full ERP) that redefines how businesses research leads, engage with customers, and manage their sales cycle. Ully automates WhatsApp and email replies with AI Agents, enriches leads with deep insights, and personalizes outreach at scale. This talk explores the how Autogen’s Agents can streamline operations, improve customer engagement, and drive growth with RAG. - -### Sign Up: https://discord.gg/NrNP5ZAx?event=1308497335768059974 - +Our community is also dedicated to fostering knowledge sharing and collaboration through regular events. We frequently host talks featuring both academic experts and industry professionals, offering valuable insights into topics related to agentic AI. +By joining our community, you will have access to these enriching opportunities and be able to engage with like-minded individuals. ## How to follow up with the latest talks? -Join our community Discord (https://discord.gg/sUkGceyd) to be the first to know about amazing upcoming talks! +1. Stay informed about our latest events and upcoming talks by subscribing to our [lu.ma homepage](https://lu.ma/ag2ai). + +2. Join our community Discord (https://discord.gg/sUkGceyd) to be the first to know about amazing upcoming talks! Connect: shaokunzhang529@gmail.com From 92d25a4d1222224b24735333158757260ccd4d7e Mon Sep 17 00:00:00 2001 From: HRUSHIKESH DOKALA <96101829+Hk669@users.noreply.github.com> Date: Sun, 5 Jan 2025 06:38:36 +0000 Subject: [PATCH 05/61] add method property to reasoningagen --- autogen/agentchat/contrib/reasoning_agent.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autogen/agentchat/contrib/reasoning_agent.py b/autogen/agentchat/contrib/reasoning_agent.py index ac43fe3f48..2224d9f315 100644 --- a/autogen/agentchat/contrib/reasoning_agent.py +++ b/autogen/agentchat/contrib/reasoning_agent.py @@ -699,3 +699,7 @@ def _expand(self, node: ThinkNode) -> list: def _is_terminal(self, node): return node.depth >= self._max_depth or "TERMINATE" in node.content + + @property + def method(self): + return self._method From 9c1b0bb1e10aaba840b470fdeeffe472cec5f499 Mon Sep 17 00:00:00 2001 From: Mark Sze Date: Sun, 5 Jan 2025 22:52:10 +0000 Subject: [PATCH 06/61] Swarm: Restore lost auto return Signed-off-by: Mark Sze --- autogen/agentchat/contrib/swarm_agent.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/autogen/agentchat/contrib/swarm_agent.py b/autogen/agentchat/contrib/swarm_agent.py index b1c5ae5f8d..80c78a65f1 100644 --- a/autogen/agentchat/contrib/swarm_agent.py +++ b/autogen/agentchat/contrib/swarm_agent.py @@ -352,6 +352,8 @@ def _determine_next_agent( return None if user_agent is None else user_agent elif after_work_condition == AfterWorkOption.STAY: return last_speaker + elif after_work_condition == AfterWorkOption.SWARM_MANAGER: + return "auto" else: raise ValueError("Invalid After Work condition or return value from callable") From 4b17d18aa7b30641a34190de354ef4a84dea160d Mon Sep 17 00:00:00 2001 From: Harish Mohan Raj Date: Mon, 6 Jan 2025 05:52:54 +0000 Subject: [PATCH 07/61] Fix docs build and update navigation --- website/mint.json | 14 +++++++++++++- website/talks/2024-12-12/index.mdx | 2 +- website/talks/2024-12-19/index.mdx | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/website/mint.json b/website/mint.json index acaf590e73..ae6d767d8f 100644 --- a/website/mint.json +++ b/website/mint.json @@ -303,13 +303,16 @@ { "group": "agentchat.realtime_agent", "pages": [ + "docs/reference/agentchat/realtime_agent/client", "docs/reference/agentchat/realtime_agent/function_observer", "docs/reference/agentchat/realtime_agent/oai_realtime_client", "docs/reference/agentchat/realtime_agent/realtime_agent", "docs/reference/agentchat/realtime_agent/realtime_client", "docs/reference/agentchat/realtime_agent/realtime_observer", "docs/reference/agentchat/realtime_agent/twilio_audio_adapter", - "docs/reference/agentchat/realtime_agent/websocket_audio_adapter" + "docs/reference/agentchat/realtime_agent/twilio_observer", + "docs/reference/agentchat/realtime_agent/websocket_audio_adapter", + "docs/reference/agentchat/realtime_agent/websocket_observer" ] }, "docs/reference/agentchat/agent", @@ -398,6 +401,12 @@ "docs/reference/logger/file_logger" ] }, + { + "group": "messages", + "pages": [ + "docs/reference/messages/agent_messages" + ] + }, { "group": "oai", "pages": [ @@ -419,6 +428,7 @@ { "group": "tools", "pages": [ + "docs/reference/tools/function_utils", "docs/reference/tools/tool" ] }, @@ -476,6 +486,8 @@ "group": "Talks", "pages": [ "talks/future_talks/index", + "talks/2024-12-19/index", + "talks/2024-12-12/index", "talks/2024-11-28/index", "talks/2024-11-25/index", "talks/2024-11-18/index", diff --git a/website/talks/2024-12-12/index.mdx b/website/talks/2024-12-12/index.mdx index 4f607c3bdc..a893beb595 100644 --- a/website/talks/2024-12-12/index.mdx +++ b/website/talks/2024-12-12/index.mdx @@ -1,5 +1,5 @@ --- -title: Make AI Agents Collaborate: Drag, Drop, and Orchestrate with Waldiez - Dec 12, 2024 +title: "Make AI Agents Collaborate: Drag, Drop, and Orchestrate with Waldiez - Dec 12, 2024" --- ### Speakers: Panagiotis Kasnesis diff --git a/website/talks/2024-12-19/index.mdx b/website/talks/2024-12-19/index.mdx index ba253b317d..ed75d8fc13 100644 --- a/website/talks/2024-12-19/index.mdx +++ b/website/talks/2024-12-19/index.mdx @@ -1,5 +1,5 @@ --- -title: Transforming CRM with Agents: The Journey to Ully.ai's Next-Gen ERP - Dec 19, 2024 +title: "Transforming CRM with Agents: The Journey to Ully.ai's Next-Gen ERP - Dec 19, 2024" --- ### Speakers: Bassil Khilo From 442840c4b3f0da1221f5bb05f3ed05a1ac74c5e4 Mon Sep 17 00:00:00 2001 From: Harish Mohan Raj Date: Mon, 6 Jan 2025 06:04:05 +0000 Subject: [PATCH 08/61] Fix broken links --- notebook/config_loader_utility_functions.ipynb | 10 +++++----- website/blog/2024-12-20-RealtimeAgent/index.mdx | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/notebook/config_loader_utility_functions.ipynb b/notebook/config_loader_utility_functions.ipynb index c896ce627e..dca6cd5831 100644 --- a/notebook/config_loader_utility_functions.ipynb +++ b/notebook/config_loader_utility_functions.ipynb @@ -22,11 +22,11 @@ "\n", "There are several utility functions for loading LLM config lists that may be useful depending on the situation.\n", "\n", - "- [`get_config_list`](#get_config_list): Generates configurations for API calls, primarily from provided API keys.\n", - "- [`config_list_openai_aoai`](#config_list_openai_aoai): Constructs a list of configurations using both Azure OpenAI and OpenAI endpoints, sourcing API keys from environment variables or local files.\n", - "- [`config_list_from_json`](#config_list_from_json): Loads configurations from a JSON structure, either from an environment variable or a local JSON file, with the flexibility of filtering configurations based on given criteria.\n", - "- [`config_list_from_models`](#config_list_from_models): Creates configurations based on a provided list of models, useful when targeting specific models without manually specifying each configuration.\n", - "- [`config_list_from_dotenv`](#config_list_from_dotenv): Constructs a configuration list from a `.env` file, offering a consolidated way to manage multiple API configurations and keys from a single file." + "- [`get_config_list`](#get-config-list): Generates configurations for API calls, primarily from provided API keys.\n", + "- [`config_list_openai_aoai`](#config-list-openai-aoai): Constructs a list of configurations using both Azure OpenAI and OpenAI endpoints, sourcing API keys from environment variables or local files.\n", + "- [`config_list_from_json`](#config-list-from-json): Loads configurations from a JSON structure, either from an environment variable or a local JSON file, with the flexibility of filtering configurations based on given criteria.\n", + "- [`config_list_from_models`](#config-list-from-models): Creates configurations based on a provided list of models, useful when targeting specific models without manually specifying each configuration.\n", + "- [`config_list_from_dotenv`](#config-list-from-dotenv): Constructs a configuration list from a `.env` file, offering a consolidated way to manage multiple API configurations and keys from a single file." ] }, { diff --git a/website/blog/2024-12-20-RealtimeAgent/index.mdx b/website/blog/2024-12-20-RealtimeAgent/index.mdx index 931fe0b24b..ffd390df1b 100644 --- a/website/blog/2024-12-20-RealtimeAgent/index.mdx +++ b/website/blog/2024-12-20-RealtimeAgent/index.mdx @@ -93,7 +93,7 @@ Traditionally, conversational AI tasks have focused on asynchronous interactions 2. RealtimeAgent swarm integration - Seamless integration of [**`RealtimeAgent`**](/docs/reference/agentchat/realtime_agent/realtime_agent) into Swarm -3. [**`TwilioAudioAdapter`**](docs/reference/agentchat/realtime_agent/twilio_audio_adapter#twilioaudioadapter) +3. [**`TwilioAudioAdapter`**](/docs/reference/agentchat/realtime_agent/twilio_audio_adapter#twilioaudioadapter) - Connects agents to Twilio for telephony support. - Simplifies the process of handling voice calls with clear API methods. From daf437008571e17a30391cb94c0172f15a6982af Mon Sep 17 00:00:00 2001 From: Harish Mohan Raj Date: Mon, 6 Jan 2025 14:57:07 +0000 Subject: [PATCH 09/61] Fix broken links --- notebook/config_loader_utility_functions.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebook/config_loader_utility_functions.ipynb b/notebook/config_loader_utility_functions.ipynb index dca6cd5831..5cdf832e30 100644 --- a/notebook/config_loader_utility_functions.ipynb +++ b/notebook/config_loader_utility_functions.ipynb @@ -155,7 +155,7 @@ "source": [ "### config_list_from_models\n", "\n", - "This method creates configurations based on a provided list of models. It's useful when you have specific models in mind and don't want to manually specify each configuration. The [`config_list_from_models`](/docs/reference/oai/openai_utils#config_list_from_models) function tries to create a list of configurations using Azure OpenAI endpoints and OpenAI endpoints for the provided list of models. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files. It's okay to only have the OpenAI API key, OR only the Azure OpenAI API key + base. For Azure the model name refers to the OpenAI Studio deployment name.\n", + "This method creates configurations based on a provided list of models. It's useful when you have specific models in mind and don't want to manually specify each configuration. The [`config_list_from_models`](/docs/reference/oai/openai_utils#config-list-from-models) function tries to create a list of configurations using Azure OpenAI endpoints and OpenAI endpoints for the provided list of models. It assumes the api keys and api bases are stored in the corresponding environment variables or local txt files. It's okay to only have the OpenAI API key, OR only the Azure OpenAI API key + base. For Azure the model name refers to the OpenAI Studio deployment name.\n", "\n", "Steps:\n", "- Similar to method 1, store API keys and bases either in environment variables or `.txt` files." From 318b117504ece3662e096ffc3cfef0fc571f2d41 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 14:26:46 +0100 Subject: [PATCH 10/61] Fix wrong import --- autogen/coding/jupyter/embedded_ipython_code_executor.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/autogen/coding/jupyter/embedded_ipython_code_executor.py b/autogen/coding/jupyter/embedded_ipython_code_executor.py index 4e0a8d828c..e5f4fc2301 100644 --- a/autogen/coding/jupyter/embedded_ipython_code_executor.py +++ b/autogen/coding/jupyter/embedded_ipython_code_executor.py @@ -11,15 +11,12 @@ import uuid from pathlib import Path from queue import Empty -from typing import Any, List +from typing import Any -# this is needed for CI to work. The import of this file should fail if jupyter-kernel-gateway is not installed -import jupyter_kernel_gateway from jupyter_client import KernelManager # type: ignore[attr-defined] from jupyter_client.kernelspec import KernelSpecManager from pydantic import BaseModel, Field, field_validator -from ...agentchat.agent import LLMAgent from ..base import CodeBlock, CodeExtractor, IPythonCodeResult from ..markdown_code_extractor import MarkdownCodeExtractor From 9db7bc3f8408b30019b382650d7c24fb80954e2a Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 14:53:31 +0100 Subject: [PATCH 11/61] Fix imports --- test/coding/test_embedded_ipython_code_executor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/coding/test_embedded_ipython_code_executor.py b/test/coding/test_embedded_ipython_code_executor.py index df0d315161..d0698ce99d 100644 --- a/test/coding/test_embedded_ipython_code_executor.py +++ b/test/coding/test_embedded_ipython_code_executor.py @@ -9,15 +9,15 @@ import tempfile import uuid from pathlib import Path -from typing import Dict, Type, Union +from typing import Union import pytest -from conftest import MOCK_OPEN_AI_API_KEY, skip_docker, skip_openai # noqa: E402 from autogen.agentchat.conversable_agent import ConversableAgent from autogen.coding.base import CodeBlock, CodeExecutor from autogen.coding.factory import CodeExecutorFactory -from autogen.oai.openai_utils import config_list_from_json + +from ..conftest import MOCK_OPEN_AI_API_KEY, skip_docker # noqa: E402 try: from autogen.coding.jupyter import ( From 3ac3fb0bf1690747ebd406bb3929ff79d6904826 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 15:22:54 +0100 Subject: [PATCH 12/61] wip --- test/coding/test_embedded_ipython_code_executor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/coding/test_embedded_ipython_code_executor.py b/test/coding/test_embedded_ipython_code_executor.py index d0698ce99d..37a3dcfa68 100644 --- a/test/coding/test_embedded_ipython_code_executor.py +++ b/test/coding/test_embedded_ipython_code_executor.py @@ -17,7 +17,9 @@ from autogen.coding.base import CodeBlock, CodeExecutor from autogen.coding.factory import CodeExecutorFactory -from ..conftest import MOCK_OPEN_AI_API_KEY, skip_docker # noqa: E402 +# add ../.. to sys.path to import conftest +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) +from conftest import MOCK_OPEN_AI_API_KEY, skip_docker # noqa: E402 try: from autogen.coding.jupyter import ( From bbd8c90f813f24587812e434cae8ecfefd6f399b Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 15:49:43 +0100 Subject: [PATCH 13/61] wip --- autogen/coding/jupyter/__init__.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/autogen/coding/jupyter/__init__.py b/autogen/coding/jupyter/__init__.py index 36698b88f9..a0be01d20d 100644 --- a/autogen/coding/jupyter/__init__.py +++ b/autogen/coding/jupyter/__init__.py @@ -4,6 +4,14 @@ # # Original portions of this file are derived from https://github.com/microsoft/autogen under the MIT License. # SPDX-License-Identifier: MIT +from .helpers import is_jupyter_kernel_gateway_installed + +if not is_jupyter_kernel_gateway_installed(): + raise ImportError( + "jupyter-kernel-gateway is required for JupyterCodeExecutor, please install it with `pip install ag2[jupyter-executor]`" + ) + + from .base import JupyterConnectable, JupyterConnectionInfo from .docker_jupyter_server import DockerJupyterServer from .embedded_ipython_code_executor import EmbeddedIPythonCodeExecutor From 3c03443be9d5f082232eeaf0ab7a05c5d669f9e8 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 16:05:14 +0100 Subject: [PATCH 14/61] wip --- .gitignore | 2 +- autogen/coding/jupyter/helpers.py | 20 +++++++++++++++++++ test/coding/__init__.py | 0 .../test_embedded_ipython_code_executor.py | 4 +--- 4 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 autogen/coding/jupyter/helpers.py create mode 100644 test/coding/__init__.py diff --git a/.gitignore b/.gitignore index c60f4b2afa..dff3d44315 100644 --- a/.gitignore +++ b/.gitignore @@ -174,7 +174,7 @@ test/test_files/agenteval-in-out/out/ # local cache or coding foler local_cache/ -coding/ +# coding/ # Files created by tests *tmp_code_* diff --git a/autogen/coding/jupyter/helpers.py b/autogen/coding/jupyter/helpers.py new file mode 100644 index 0000000000..5db3563cac --- /dev/null +++ b/autogen/coding/jupyter/helpers.py @@ -0,0 +1,20 @@ +# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai +# +# SPDX-License-Identifier: Apache-2.0 + +import subprocess + +__all__ = ["is_jupyter_kernel_gateway_installed"] + + +def is_jupyter_kernel_gateway_installed() -> bool: + try: + subprocess.run( + ["jupyter-kernel-gateway", "--version"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + check=True, + ) + return True + except (subprocess.CalledProcessError, FileNotFoundError): + return False diff --git a/test/coding/__init__.py b/test/coding/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/coding/test_embedded_ipython_code_executor.py b/test/coding/test_embedded_ipython_code_executor.py index 37a3dcfa68..d0698ce99d 100644 --- a/test/coding/test_embedded_ipython_code_executor.py +++ b/test/coding/test_embedded_ipython_code_executor.py @@ -17,9 +17,7 @@ from autogen.coding.base import CodeBlock, CodeExecutor from autogen.coding.factory import CodeExecutorFactory -# add ../.. to sys.path to import conftest -sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) -from conftest import MOCK_OPEN_AI_API_KEY, skip_docker # noqa: E402 +from ..conftest import MOCK_OPEN_AI_API_KEY, skip_docker # noqa: E402 try: from autogen.coding.jupyter import ( From 0905ed0938b4f52e7020f7ca20fb95739f048347 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 16:18:18 +0100 Subject: [PATCH 15/61] wip --- autogen/coding/jupyter/helpers.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/autogen/coding/jupyter/helpers.py b/autogen/coding/jupyter/helpers.py index 5db3563cac..179e8bf997 100644 --- a/autogen/coding/jupyter/helpers.py +++ b/autogen/coding/jupyter/helpers.py @@ -3,6 +3,9 @@ # SPDX-License-Identifier: Apache-2.0 import subprocess +from logging import getLogger + +logger = getLogger(__name__) __all__ = ["is_jupyter_kernel_gateway_installed"] @@ -10,11 +13,16 @@ def is_jupyter_kernel_gateway_installed() -> bool: try: subprocess.run( - ["jupyter-kernel-gateway", "--version"], + ["jupyter", "kernelgateway", "--version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True, ) return True - except (subprocess.CalledProcessError, FileNotFoundError): + except (subprocess.CalledProcessError, FileNotFoundError) as e: + logger.warning( + "jupyter-kernel-gateway is required for JupyterCodeExecutor, please install it with `pip install ag2[jupyter-executor]`" + ) + logger.warning(e, exc_info=True) + print(e) return False From 82daa17ff11b384228b7593db1b950337bab060d Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 16:20:39 +0100 Subject: [PATCH 16/61] wip --- autogen/coding/jupyter/helpers.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/autogen/coding/jupyter/helpers.py b/autogen/coding/jupyter/helpers.py index 179e8bf997..3216e590ee 100644 --- a/autogen/coding/jupyter/helpers.py +++ b/autogen/coding/jupyter/helpers.py @@ -19,10 +19,8 @@ def is_jupyter_kernel_gateway_installed() -> bool: check=True, ) return True - except (subprocess.CalledProcessError, FileNotFoundError) as e: + except (subprocess.CalledProcessError, FileNotFoundError): logger.warning( "jupyter-kernel-gateway is required for JupyterCodeExecutor, please install it with `pip install ag2[jupyter-executor]`" ) - logger.warning(e, exc_info=True) - print(e) return False From 9d35eefc0317f10146eea24412b4519e081a2842 Mon Sep 17 00:00:00 2001 From: Harish Mohan Raj Date: Tue, 7 Jan 2025 04:18:45 +0000 Subject: [PATCH 17/61] Add correct URL in muffet exclusion list --- .muffet-excluded-links.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.muffet-excluded-links.txt b/.muffet-excluded-links.txt index 05c14e1e7e..d6bc275618 100644 --- a/.muffet-excluded-links.txt +++ b/.muffet-excluded-links.txt @@ -12,7 +12,7 @@ https://thesequence.substack.com/p/my-five-favorite-ai-papers-of-2023 https://www.llama.com/docs/how-to-guides/prompting/ https://azure.microsoft.com/en-us/get-started/azure-portal https://azure.microsoft.com/en-us/products/ai-services/openai-service -https://azure.microsoft.com/en-us/pricing/purchase-options/azure-account?icid=azurefreeaccount +https://azure.microsoft.com/en-us/pricing/purchase-options/azure-account https://github.com/pgvector/pgvector https://github.com/ag2ai/ag2/blob/b1adac515931bf236ac59224269eeec683a162ba/test/oai/test_client.py https://github.com/ag2ai/ag2/blob/main/notebook/contributing.md From c3e04f5892de8cc5eee064077ed27b74b6adfc49 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Fri, 3 Jan 2025 19:28:40 +0530 Subject: [PATCH 18/61] WIP: autogen iostream refactor --- autogen/agentchat/conversable_agent.py | 3 ++- autogen/io/base.py | 5 +++++ autogen/io/console.py | 15 ++++++++++++++- autogen/messages/agent_messages.py | 23 +++++++++++++++++++++++ 4 files changed, 44 insertions(+), 2 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index d6e4ac6cc3..6e6249e9de 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -860,7 +860,8 @@ def _print_received_message(self, message: Union[dict, str], sender: Agent, skip message = self._message_to_dict(message) message_model = create_received_message_model(message=message, sender=sender, recipient=self) iostream = IOStream.get_default() - message_model.print(iostream.print) + # message_model.print(iostream.print) + iostream.send(message_model) def _process_received_message(self, message: Union[dict, str], sender: Agent, silent: bool): # When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.) diff --git a/autogen/io/base.py b/autogen/io/base.py index 39b857f416..3012ba3667 100644 --- a/autogen/io/base.py +++ b/autogen/io/base.py @@ -10,6 +10,8 @@ from contextvars import ContextVar from typing import Any, Optional, Protocol, runtime_checkable +from autogen.messages.base_message import BaseMessage + __all__ = ("OutputStream", "InputStream", "IOStream") logger = logging.getLogger(__name__) @@ -28,6 +30,9 @@ def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = Fa """ ... # pragma: no cover + def send(self, message: BaseMessage): + + @runtime_checkable class InputStream(Protocol): diff --git a/autogen/io/console.py b/autogen/io/console.py index 14504d8798..da124f07ac 100644 --- a/autogen/io/console.py +++ b/autogen/io/console.py @@ -7,6 +7,9 @@ import getpass from typing import Any +from autogen.messages.agent_messages import PrintMessage +from autogen.messages.base_message import BaseMessage + from .base import IOStream __all__ = ("IOConsole",) @@ -24,7 +27,17 @@ def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = Fa end (str, optional): The end of the output. Defaults to "\n". flush (bool, optional): Whether to flush the output. Defaults to False. """ - print(*objects, sep=sep, end=end, flush=flush) + print_message = PrintMessage(*objects, sep=sep, end=end) + self.send(print_message) + # print(*objects, sep=sep, end=end, flush=flush) + + def send(self, message: BaseMessage): + """Send a message to the output stream. + + Args: + message (Any): The message to send. + """ + message.print() def input(self, prompt: str = "", *, password: bool = False) -> str: """Read a line from the input stream. diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index f9558984d0..59de5c9685 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -3,6 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 from copy import deepcopy +import json from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union from uuid import UUID @@ -662,3 +663,25 @@ def print(self, text: str, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f(text) + + +class PrintMessage(BaseMessage): + def __init__(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = False, uuid: Optional[UUID] = None): + self.objects = [self._to_json(x) for x in objects ] + self.sep = sep + self.end = end + + super().__init__(uuid=uuid) + + def _to_json(self, obj: Any): + if hasattr(obj, "model_dump_json"): + return obj.model_dump_json() + try: + return json.dumps(obj) + except Exception: + return repr(obj) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + f = f or print + + f(*self.objects, sep=self.sep, end=self.end, flush=True) \ No newline at end of file From 4ab67899deea3cfee9ade557f4f2f1c9dae59da3 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 13:48:01 +0530 Subject: [PATCH 19/61] Fix pre-commit issues --- autogen/io/base.py | 9 +++++++-- autogen/io/console.py | 2 +- autogen/io/websockets.py | 10 ++++++++++ autogen/messages/agent_messages.py | 16 +++++++++------- autogen/messages/base_message.py | 10 +++++++++- test/io/test_base.py | 4 ++++ test/io/test_websockets.py | 2 +- test/messages/test_agent_messages.py | 2 +- 8 files changed, 42 insertions(+), 13 deletions(-) diff --git a/autogen/io/base.py b/autogen/io/base.py index 3012ba3667..30f403b520 100644 --- a/autogen/io/base.py +++ b/autogen/io/base.py @@ -30,8 +30,13 @@ def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = Fa """ ... # pragma: no cover - def send(self, message: BaseMessage): - + def send(self, message: BaseMessage) -> None: + """Send data to the output stream. + + Args: + message (BaseMessage): BaseMessage from autogen.messages.base_message + """ + ... @runtime_checkable diff --git a/autogen/io/console.py b/autogen/io/console.py index da124f07ac..6370915be7 100644 --- a/autogen/io/console.py +++ b/autogen/io/console.py @@ -31,7 +31,7 @@ def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = Fa self.send(print_message) # print(*objects, sep=sep, end=end, flush=flush) - def send(self, message: BaseMessage): + def send(self, message: BaseMessage) -> None: """Send a message to the output stream. Args: diff --git a/autogen/io/websockets.py b/autogen/io/websockets.py index 2135727c8c..08447f53a1 100644 --- a/autogen/io/websockets.py +++ b/autogen/io/websockets.py @@ -13,6 +13,8 @@ from time import sleep from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Protocol, Union +from autogen.messages.base_message import BaseMessage + from .base import IOStream # Check if the websockets module is available @@ -195,6 +197,14 @@ def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = Fa xs = sep.join(map(str, objects)) + end self._websocket.send(xs) + def send(self, message: BaseMessage) -> None: + """Send a message to the output stream. + + Args: + message (Any): The message to send. + """ + raise NotImplementedError("send() method is not implemented for IOWebsockets") + def input(self, prompt: str = "", *, password: bool = False) -> str: """Read a line from the input stream. diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 59de5c9685..90fb042ac1 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -2,8 +2,8 @@ # # SPDX-License-Identifier: Apache-2.0 -from copy import deepcopy import json +from copy import deepcopy from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union from uuid import UUID @@ -659,23 +659,25 @@ class TextMessage(BaseMessage): def __init__(self, *, uuid: Optional[UUID] = None): super().__init__(uuid=uuid) - def print(self, text: str, f: Optional[Callable[..., Any]] = None) -> None: + def print_text(self, text: str, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f(text) class PrintMessage(BaseMessage): - def __init__(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = False, uuid: Optional[UUID] = None): - self.objects = [self._to_json(x) for x in objects ] + def __init__( + self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = False, uuid: Optional[UUID] = None + ): + self.objects = [self._to_json(x) for x in objects] self.sep = sep self.end = end super().__init__(uuid=uuid) - def _to_json(self, obj: Any): + def _to_json(self, obj: Any) -> str: if hasattr(obj, "model_dump_json"): - return obj.model_dump_json() + return obj.model_dump_json() # type: ignore [no-any-return] try: return json.dumps(obj) except Exception: @@ -684,4 +686,4 @@ def _to_json(self, obj: Any): def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - f(*self.objects, sep=self.sep, end=self.end, flush=True) \ No newline at end of file + f(*self.objects, sep=self.sep, end=self.end, flush=True) diff --git a/autogen/messages/base_message.py b/autogen/messages/base_message.py index bf06d8ea90..84141a65cb 100644 --- a/autogen/messages/base_message.py +++ b/autogen/messages/base_message.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 -from typing import Any, Optional +from typing import Any, Callable, Optional from uuid import UUID, uuid4 from pydantic import BaseModel @@ -17,3 +17,11 @@ class BaseMessage(BaseModel): def __init__(self, uuid: Optional[UUID] = None, **kwargs: Any) -> None: uuid = uuid or uuid4() super().__init__(uuid=uuid, **kwargs) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + """Print message + + Args: + f (Optional[Callable[..., Any]], optional): Print function. If none, python's default print will be used. + """ + ... diff --git a/test/io/test_base.py b/test/io/test_base.py index c4c77d8f66..5c6bdcf20e 100644 --- a/test/io/test_base.py +++ b/test/io/test_base.py @@ -8,6 +8,7 @@ from typing import Any, List from autogen.io import IOConsole, IOStream, IOWebsockets +from autogen.messages.base_message import BaseMessage class TestIOStream: @@ -19,6 +20,9 @@ class MyIOStream(IOStream): def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = False) -> None: pass + def send(self, message: BaseMessage) -> None: + pass + def input(self, prompt: str = "", *, password: bool = False) -> str: return "Hello, World!" diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index 14b7475d70..ee72e5930a 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -52,7 +52,7 @@ def on_connect(iostream: IOWebsockets) -> None: for msg in ["Hello, World!", "Over and out!"]: print(f" - on_connect(): Sending message '{msg}' to client.", flush=True) - text_message.print(msg, iostream.print) + text_message.print_text(msg, iostream.print) print(" - on_connect(): Receiving message from client.", flush=True) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 96ff7edab5..ae2cd21f5d 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -826,7 +826,7 @@ def test_TextMessage(text: str, expected: list[_Call], uuid: UUID) -> None: assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print(text, f=mock) + actual.print_text(text, f=mock) # print(mock.call_args_list) From cfbbf3e412ff4f1cc47569fcbe0af835e151c822 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 6 Jan 2025 10:14:07 +0100 Subject: [PATCH 20/61] wip --- autogen/agentchat/conversable_agent.py | 17 +++++++++----- autogen/io/websockets.py | 12 +++++----- autogen/messages/agent_messages.py | 31 ++++++++++++++------------ 3 files changed, 35 insertions(+), 25 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 6e6249e9de..b3d588711e 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -43,6 +43,7 @@ from ..io.base import IOStream from ..messages.agent_messages import ( ClearConversableAgentHistory, + ClearConversableAgentHistoryWarning, ConversableAgentUsageSummary, ExecuteCodeBlock, ExecuteFunction, @@ -1392,10 +1393,8 @@ def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preser nr_messages_to_preserve: the number of newest messages to preserve in the chat history. """ iostream = IOStream.get_default() - clear_conversable_agent_history = ClearConversableAgentHistory( - agent=self, nr_messages_to_preserve=nr_messages_to_preserve - ) if recipient is None: + no_messages_preserved = 0 if nr_messages_to_preserve: for key in self._oai_messages: nr_messages_to_preserve_internal = nr_messages_to_preserve @@ -1404,14 +1403,22 @@ def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preser first_msg_to_save = self._oai_messages[key][-nr_messages_to_preserve_internal] if "tool_responses" in first_msg_to_save: nr_messages_to_preserve_internal += 1 - clear_conversable_agent_history.print_preserving_message(iostream.print) + # clear_conversable_agent_history.print_preserving_message(iostream.print) + no_messages_preserved += 1 # Remove messages from history except last `nr_messages_to_preserve` messages. self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve_internal:] + clear_conversable_agent_history = ClearConversableAgentHistory( + agent=self, no_messages_preserved=no_messages_preserved + ) + clear_conversable_agent_history.print(iostream.print) else: self._oai_messages.clear() else: self._oai_messages[recipient].clear() - clear_conversable_agent_history.print_warning(iostream.print) + # clear_conversable_agent_history.print_warning(iostream.print) + if nr_messages_to_preserve: + clear_conversable_agent_history_warning = ClearConversableAgentHistoryWarning(agent=self) + clear_conversable_agent_history_warning.print(iostream.print) def generate_oai_reply( self, diff --git a/autogen/io/websockets.py b/autogen/io/websockets.py index 08447f53a1..c73d4d2d66 100644 --- a/autogen/io/websockets.py +++ b/autogen/io/websockets.py @@ -11,10 +11,10 @@ from contextlib import contextmanager from functools import partial from time import sleep -from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Protocol, Union - -from autogen.messages.base_message import BaseMessage +from typing import Any, Callable, Optional, Protocol, Union +from ..messages.agent_messages import PrintMessage +from ..messages.base_message import BaseMessage from .base import IOStream # Check if the websockets module is available @@ -194,8 +194,8 @@ def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = Fa end (str, optional): The end of the output. Defaults to "\n". flush (bool, optional): Whether to flush the output. Defaults to False. """ - xs = sep.join(map(str, objects)) + end - self._websocket.send(xs) + message = PrintMessage(*objects, sep=sep, end=end) + self.send(message) def send(self, message: BaseMessage) -> None: """Send a message to the output stream. @@ -203,7 +203,7 @@ def send(self, message: BaseMessage) -> None: Args: message (Any): The message to send. """ - raise NotImplementedError("send() method is not implemented for IOWebsockets") + self._websocket.send(message.model_dump_json()) def input(self, prompt: str = "", *, password: bool = False) -> str: """Read a line from the input stream. diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 90fb042ac1..91eefbef0d 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -573,35 +573,38 @@ def print_invalid_input(self, f: Optional[Callable[..., Any]] = None) -> None: class ClearConversableAgentHistory(BaseMessage): agent_name: str - nr_messages_to_preserve: Optional[int] = None + no_messages_preserved: Optional[int] = None - def __init__(self, *, uuid: Optional[UUID] = None, agent: "Agent", nr_messages_to_preserve: Optional[int] = None): + def __init__(self, *, uuid: Optional[UUID] = None, agent: "Agent", no_messages_preserved: Optional[int] = None): super().__init__( uuid=uuid, agent_name=agent.name, - nr_messages_to_preserve=nr_messages_to_preserve, + no_messages_preserved=no_messages_preserved, ) - def print_preserving_message(self, f: Optional[Callable[..., Any]] = None) -> None: + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.nr_messages_to_preserve: + for _ in range(self.no_messages_preserved): f( f"Preserving one more message for {self.agent_name} to not divide history between tool call and " f"tool response." ) - def print_warning(self, f: Optional[Callable[..., Any]] = None) -> None: + +class ClearConversableAgentHistoryWarning(BaseMessage): + agent_name: str + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.nr_messages_to_preserve: - f( - colored( - "WARNING: `nr_preserved_messages` is ignored when clearing chat history with a specific agent.", - "yellow", - ), - flush=True, - ) + f( + colored( + "WARNING: `nr_preserved_messages` is ignored when clearing chat history with a specific agent.", + "yellow", + ), + flush=True, + ) class GenerateCodeExecutionReply(BaseMessage): From 448bab3268857280dd95ea2397ae52588c11fdab Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 15:58:07 +0530 Subject: [PATCH 21/61] Move PrintMessage to separate file and add missing tests --- autogen/agentchat/conversable_agent.py | 2 +- autogen/io/console.py | 2 +- autogen/io/websockets.py | 2 +- autogen/messages/agent_messages.py | 37 +++++++----------------- autogen/messages/client_messages.py | 10 ++++--- autogen/messages/print_message.py | 40 ++++++++++++++++++++++++++ autogen/oai/client.py | 5 ++-- pyproject.toml | 2 ++ test/messages/test_agent_messages.py | 22 ++++++++++---- test/messages/test_client_messages.py | 7 +++-- test/messages/test_print_message.py | 24 ++++++++++++++++ 11 files changed, 107 insertions(+), 46 deletions(-) create mode 100644 autogen/messages/print_message.py create mode 100644 test/messages/test_print_message.py diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index b3d588711e..c59db11607 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1417,7 +1417,7 @@ def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preser self._oai_messages[recipient].clear() # clear_conversable_agent_history.print_warning(iostream.print) if nr_messages_to_preserve: - clear_conversable_agent_history_warning = ClearConversableAgentHistoryWarning(agent=self) + clear_conversable_agent_history_warning = ClearConversableAgentHistoryWarning(recipient=self) clear_conversable_agent_history_warning.print(iostream.print) def generate_oai_reply( diff --git a/autogen/io/console.py b/autogen/io/console.py index 6370915be7..62adab28cf 100644 --- a/autogen/io/console.py +++ b/autogen/io/console.py @@ -7,8 +7,8 @@ import getpass from typing import Any -from autogen.messages.agent_messages import PrintMessage from autogen.messages.base_message import BaseMessage +from autogen.messages.print_message import PrintMessage from .base import IOStream diff --git a/autogen/io/websockets.py b/autogen/io/websockets.py index c73d4d2d66..7f3422a455 100644 --- a/autogen/io/websockets.py +++ b/autogen/io/websockets.py @@ -13,8 +13,8 @@ from time import sleep from typing import Any, Callable, Optional, Protocol, Union -from ..messages.agent_messages import PrintMessage from ..messages.base_message import BaseMessage +from ..messages.print_message import PrintMessage from .base import IOStream # Check if the websockets module is available diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 91eefbef0d..f65fb2c452 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -2,7 +2,6 @@ # # SPDX-License-Identifier: Apache-2.0 -import json from copy import deepcopy from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union from uuid import UUID @@ -573,12 +572,14 @@ def print_invalid_input(self, f: Optional[Callable[..., Any]] = None) -> None: class ClearConversableAgentHistory(BaseMessage): agent_name: str - no_messages_preserved: Optional[int] = None + recipient_name: str + no_messages_preserved: int def __init__(self, *, uuid: Optional[UUID] = None, agent: "Agent", no_messages_preserved: Optional[int] = None): super().__init__( uuid=uuid, agent_name=agent.name, + recipient_name=agent.name, no_messages_preserved=no_messages_preserved, ) @@ -593,7 +594,13 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class ClearConversableAgentHistoryWarning(BaseMessage): - agent_name: str + recipient_name: str + + def __init__(self, *, uuid: Optional[UUID] = None, recipient: "Agent"): + super().__init__( + uuid=uuid, + recipient_name=recipient.name, + ) def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print @@ -666,27 +673,3 @@ def print_text(self, text: str, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f(text) - - -class PrintMessage(BaseMessage): - def __init__( - self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = False, uuid: Optional[UUID] = None - ): - self.objects = [self._to_json(x) for x in objects] - self.sep = sep - self.end = end - - super().__init__(uuid=uuid) - - def _to_json(self, obj: Any) -> str: - if hasattr(obj, "model_dump_json"): - return obj.model_dump_json() # type: ignore [no-any-return] - try: - return json.dumps(obj) - except Exception: - return repr(obj) - - def print(self, f: Optional[Callable[..., Any]] = None) -> None: - f = f or print - - f(*self.objects, sep=self.sep, end=self.end, flush=True) diff --git a/autogen/messages/client_messages.py b/autogen/messages/client_messages.py index 133eb6f004..fa60d115e2 100644 --- a/autogen/messages/client_messages.py +++ b/autogen/messages/client_messages.py @@ -125,16 +125,18 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class StreamMessage(BaseMessage): - def __init__(self, *, uuid: Optional[UUID] = None) -> None: - super().__init__(uuid=uuid) + content: str - def print_chunk_content(self, content: str, f: Optional[Callable[..., Any]] = None) -> None: + def __init__(self, *, uuid: Optional[UUID] = None, content: str) -> None: + super().__init__(uuid=uuid, content=content) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print # Set the terminal text color to green f("\033[32m", end="") - f(content, end="", flush=True) + f(self.content, end="", flush=True) # Reset the terminal text color f("\033[0m\n") diff --git a/autogen/messages/print_message.py b/autogen/messages/print_message.py new file mode 100644 index 0000000000..a5442ef4b4 --- /dev/null +++ b/autogen/messages/print_message.py @@ -0,0 +1,40 @@ +# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai +# +# SPDX-License-Identifier: Apache-2.0 + + +import json +from typing import Any, Callable, Optional +from uuid import UUID + +from .base_message import BaseMessage + + +class PrintMessage(BaseMessage): + objects: list[str] + sep: str + end: str + + def __init__( + self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = False, uuid: Optional[UUID] = None + ): + objects_as_string = [self._to_json(x) for x in objects] + + super().__init__(uuid=uuid, objects=objects_as_string, sep=sep, end=end) + + def _to_json(self, obj: Any) -> str: + if isinstance(obj, str): + return obj + + if hasattr(obj, "model_dump_json"): + return obj.model_dump_json() # type: ignore [no-any-return] + try: + return json.dumps(obj) + except Exception: + return str(obj) + # return repr(obj) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + f = f or print + + f(*self.objects, sep=self.sep, end=self.end, flush=True) diff --git a/autogen/oai/client.py b/autogen/oai/client.py index deebfbf35b..b692214ae5 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -316,8 +316,6 @@ def _create_or_parse(*args, **kwargs): full_function_call: Optional[dict[str, Any]] = None full_tool_calls: Optional[list[Optional[dict[str, Any]]]] = None - stream_message = StreamMessage() - # Send the chat completion request to OpenAI's API and process the response in chunks for chunk in create_or_parse(**params): if chunk.choices: @@ -364,7 +362,8 @@ def _create_or_parse(*args, **kwargs): # If content is present, print it to the terminal and update response variables if content is not None: - stream_message.print_chunk_content(content, iostream.print) + stream_message = StreamMessage(content=content) + stream_message.print(iostream.print) response_contents[choice.index] += content completion_tokens += 1 else: diff --git a/pyproject.toml b/pyproject.toml index 79df918a06..9ced6c6460 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,6 +66,7 @@ files = [ "autogen/messages/agent_messages.py", "autogen/messages/base_message.py", "autogen/messages/client_messages.py", + "autogen/messages/print_message.py", "test/test_pydantic.py", "test/io", "test/tools", @@ -76,6 +77,7 @@ files = [ "test/messages/test_agent_messages.py", "test/messages/test_base_message.py", "test/messages/test_client_messages.py", + "test/messages/test_print_message.py", ] exclude = [ "autogen/math_utils\\.py", diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index ae2cd21f5d..7b672d5d89 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -14,6 +14,7 @@ from autogen.messages.agent_messages import ( ClearAgentsHistory, ClearConversableAgentHistory, + ClearConversableAgentHistoryWarning, ContentMessage, ConversableAgentUsageSummary, ExecuteCodeBlock, @@ -700,28 +701,37 @@ def test_SelectSpeaker(uuid: UUID) -> None: def test_ClearConversableAgentHistory(uuid: UUID, recipient: ConversableAgent) -> None: - nr_messages_to_preserve = 5 + no_messages_preserved = 5 - actual = ClearConversableAgentHistory(uuid=uuid, agent=recipient, nr_messages_to_preserve=nr_messages_to_preserve) + actual = ClearConversableAgentHistory(uuid=uuid, agent=recipient, no_messages_preserved=no_messages_preserved) assert isinstance(actual, ClearConversableAgentHistory) expected_model_dump = { "uuid": uuid, "agent_name": "recipient", - "nr_messages_to_preserve": nr_messages_to_preserve, + "recipient_name": "recipient", + "no_messages_preserved": no_messages_preserved, } assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print_preserving_message(f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [ - call("Preserving one more message for recipient to not divide history between tool call and tool response.") + call("Preserving one more message for recipient to not divide history between tool call and tool response."), + call("Preserving one more message for recipient to not divide history between tool call and tool response."), + call("Preserving one more message for recipient to not divide history between tool call and tool response."), + call("Preserving one more message for recipient to not divide history between tool call and tool response."), + call("Preserving one more message for recipient to not divide history between tool call and tool response."), ] assert mock.call_args_list == expected_call_args_list + +def test_ClearConversableAgentHistoryWarning(uuid: UUID, recipient: ConversableAgent) -> None: + actual = ClearConversableAgentHistoryWarning(uuid=uuid, recipient=recipient) + mock = MagicMock() - actual.print_warning(f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [ call( diff --git a/test/messages/test_client_messages.py b/test/messages/test_client_messages.py index 46b17f2afa..bc890bce91 100644 --- a/test/messages/test_client_messages.py +++ b/test/messages/test_client_messages.py @@ -324,17 +324,18 @@ def test_usage_summary_print_none_actual_and_total( def test_StreamMessage(uuid: UUID) -> None: - stream_message = StreamMessage(uuid=uuid) + content = "random stream chunk content" + stream_message = StreamMessage(uuid=uuid, content=content) assert isinstance(stream_message, StreamMessage) expected_model_dump = { "uuid": uuid, + "content": content, } assert stream_message.model_dump() == expected_model_dump - content = "random stream chunk content" mock = MagicMock() - stream_message.print_chunk_content(content, f=mock) + stream_message.print(f=mock) # print(mock.call_args_list) diff --git a/test/messages/test_print_message.py b/test/messages/test_print_message.py new file mode 100644 index 0000000000..44ff431f5e --- /dev/null +++ b/test/messages/test_print_message.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai +# +# SPDX-License-Identifier: Apache-2.0 + +from unittest.mock import MagicMock, call +from uuid import uuid4 + +from autogen.messages.print_message import PrintMessage + + +def test_print_message() -> None: + uuid = uuid4() + print_message = PrintMessage("Hello, World!", "How are you", sep=" ", end="\n", flush=False, uuid=uuid) + + assert isinstance(print_message, PrintMessage) + + expected_model_dump = {"uuid": uuid, "objects": ["Hello, World!", "How are you"], "sep": " ", "end": "\n"} + assert print_message.model_dump() == expected_model_dump + + mock = MagicMock() + print_message.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [call("Hello, World!", "How are you", sep=" ", end="\n", flush=True)] + assert mock.call_args_list == expected_call_args_list From 38fcb3970881b504240f1d66b23da8eefcc7a950 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 16:04:32 +0530 Subject: [PATCH 22/61] Rename print_* method to print --- autogen/messages/agent_messages.py | 10 ++++++---- test/io/test_websockets.py | 5 ++--- test/messages/test_agent_messages.py | 6 +++--- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index f65fb2c452..8b0d93f488 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -666,10 +666,12 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class TextMessage(BaseMessage): - def __init__(self, *, uuid: Optional[UUID] = None): - super().__init__(uuid=uuid) + text: str - def print_text(self, text: str, f: Optional[Callable[..., Any]] = None) -> None: + def __init__(self, *, uuid: Optional[UUID] = None, text: str): + super().__init__(uuid=uuid, text=text) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - f(text) + f(self.text) diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index ee72e5930a..36e1ab85c5 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -41,8 +41,6 @@ def on_connect(iostream: IOWebsockets) -> None: print(" - on_connect(): Receiving message from client.", flush=True) - text_message = TextMessage() - msg = iostream.input() print(f" - on_connect(): Received message '{msg}' from client.", flush=True) @@ -52,7 +50,8 @@ def on_connect(iostream: IOWebsockets) -> None: for msg in ["Hello, World!", "Over and out!"]: print(f" - on_connect(): Sending message '{msg}' to client.", flush=True) - text_message.print_text(msg, iostream.print) + text_message = TextMessage(text=msg) + text_message.print(iostream.print) print(" - on_connect(): Receiving message from client.", flush=True) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 7b672d5d89..03e2a793c6 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -830,13 +830,13 @@ def test_ConversableAgentUsageSummary( ], ) def test_TextMessage(text: str, expected: list[_Call], uuid: UUID) -> None: - actual = TextMessage(uuid=uuid) - expected_model_dump = {"uuid": uuid} + actual = TextMessage(uuid=uuid, text=text) + expected_model_dump = {"uuid": uuid, "text": text} assert isinstance(actual, TextMessage) assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print_text(text, f=mock) + actual.print(f=mock) # print(mock.call_args_list) From e64c04fd69ded7d7e893ef63347efcdf28ee433f Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 16:18:10 +0530 Subject: [PATCH 23/61] Rename print_* method to print in ExecuteFunction --- autogen/agentchat/conversable_agent.py | 12 +++++--- autogen/messages/agent_messages.py | 42 +++++++++++++++++--------- test/messages/test_agent_messages.py | 27 ++++++++++++++--- 3 files changed, 58 insertions(+), 23 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index c59db11607..5b2ce241e5 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -47,6 +47,7 @@ ConversableAgentUsageSummary, ExecuteCodeBlock, ExecuteFunction, + ExecuteFunctionArgumentsContent, GenerateCodeExecutionReply, TerminationAndHumanReply, create_received_message_model, @@ -2313,8 +2314,6 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict func_name = func_call.get("name", "") func = self._function_map.get(func_name, None) - execute_function = ExecuteFunction(func_name=func_name, recipient=self, verbose=verbose) - is_exec_success = False if func is not None: # Extract arguments from a json-like string and put it into a dict. @@ -2327,7 +2326,8 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict # Try to execute the function if arguments is not None: - execute_function.print_executing_func(iostream.print) + execute_function = ExecuteFunction(func_name=func_name, recipient=self) + execute_function.print(iostream.print) try: content = func(**arguments) is_exec_success = True @@ -2337,7 +2337,11 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict arguments = {} content = f"Error: Function {func_name} not found." - execute_function.print_arguments_and_content(arguments, content, iostream.print) + if verbose: + execute_function_arguments_content = ExecuteFunctionArgumentsContent( + func_name=func_name, arguments=arguments, content=content, recipient=self + ) + execute_function_arguments_content.print(arguments, content, iostream.print) return is_exec_success, { "name": func_name, diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 8b0d93f488..91023d2997 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -517,14 +517,11 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class ExecuteFunction(BaseMessage): func_name: str recipient_name: str - verbose: Optional[bool] = False - def __init__( - self, *, uuid: Optional[UUID] = None, func_name: str, recipient: "Agent", verbose: Optional[bool] = False - ): - super().__init__(uuid=uuid, func_name=func_name, recipient_name=recipient.name, verbose=verbose) + def __init__(self, *, uuid: Optional[UUID] = None, func_name: str, recipient: "Agent"): + super().__init__(uuid=uuid, func_name=func_name, recipient_name=recipient.name) - def print_executing_func(self, f: Optional[Callable[..., Any]] = None) -> None: + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f( @@ -532,16 +529,33 @@ def print_executing_func(self, f: Optional[Callable[..., Any]] = None) -> None: flush=True, ) - def print_arguments_and_content( - self, arguments: dict[str, Any], content: str, f: Optional[Callable[..., Any]] = None - ) -> None: + +class ExecuteFunctionArgumentsContent(BaseMessage): + func_name: str + arguments: dict[str, Any] + content: str + recipient_name: str + + def __init__( + self, + *, + uuid: Optional[UUID] = None, + func_name: str, + arguments: dict[str, Any], + content: str, + recipient: "Agent", + ): + super().__init__( + uuid=uuid, func_name=func_name, arguments=arguments, content=content, recipient_name=recipient.name + ) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.verbose: - f( - colored(f"\nInput arguments: {arguments}\nOutput:\n{content}", "magenta"), - flush=True, - ) + f( + colored(f"\nInput arguments: {self.arguments}\nOutput:\n{self.content}", "magenta"), + flush=True, + ) class SelectSpeaker(BaseMessage): diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 03e2a793c6..f373ab3eb9 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -19,6 +19,7 @@ ConversableAgentUsageSummary, ExecuteCodeBlock, ExecuteFunction, + ExecuteFunctionArgumentsContent, FunctionCall, FunctionCallMessage, FunctionResponseMessage, @@ -632,29 +633,45 @@ def test_ExecuteCodeBlock(uuid: UUID, sender: ConversableAgent, recipient: Conve def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: func_name = "add_num" - verbose = True - actual = ExecuteFunction(uuid=uuid, func_name=func_name, recipient=recipient, verbose=verbose) + actual = ExecuteFunction(uuid=uuid, func_name=func_name, recipient=recipient) assert isinstance(actual, ExecuteFunction) expected_model_dump = { "uuid": uuid, "func_name": func_name, "recipient_name": "recipient", - "verbose": verbose, } assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print_executing_func(f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [call("\x1b[35m\n>>>>>>>> EXECUTING FUNCTION add_num...\x1b[0m", flush=True)] assert mock.call_args_list == expected_call_args_list + +def test_ExecuteFunctionArgumentsContent(uuid: UUID, recipient: ConversableAgent) -> None: + func_name = "add_num" arguments = {"num_to_be_added": 5} content = "15" + + actual = ExecuteFunctionArgumentsContent( + uuid=uuid, func_name=func_name, arguments=arguments, content=content, recipient=recipient + ) + assert isinstance(actual, ExecuteFunctionArgumentsContent) + + expected_model_dump = { + "uuid": uuid, + "func_name": func_name, + "arguments": arguments, + "content": content, + "recipient_name": "recipient", + } + assert actual.model_dump() == expected_model_dump + mock = MagicMock() - actual.print_arguments_and_content(arguments, content, f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [ call("\x1b[35m\nInput arguments: {'num_to_be_added': 5}\nOutput:\n15\x1b[0m", flush=True) From 3f1f0d3fc0e5a32c116d07018874aaf54f417719 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 16:35:00 +0530 Subject: [PATCH 24/61] Rename print_* method to print in TerminationAndHumanReply --- autogen/agentchat/conversable_agent.py | 31 +++++++++++++++-------- autogen/messages/agent_messages.py | 34 +++++++++++++++++++------- test/messages/test_agent_messages.py | 30 +++++++++++++++++++---- 3 files changed, 71 insertions(+), 24 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 5b2ce241e5..67c1ef3f4e 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -50,6 +50,7 @@ ExecuteFunctionArgumentsContent, GenerateCodeExecutionReply, TerminationAndHumanReply, + TerminationAndHumanReplyHumanInputMode, create_received_message_model, ) from ..oai.client import ModelClient, OpenAIWrapper @@ -1839,10 +1840,11 @@ def check_termination_and_human_reply( reply = reply or "exit" # print the no_human_input_msg - termination_and_human_reply = TerminationAndHumanReply( - no_human_input_msg=no_human_input_msg, human_input_mode=self.human_input_mode, sender=sender, recipient=self - ) - termination_and_human_reply.print_no_human_input_msg(iostream.print) + if no_human_input_msg: + termination_and_human_reply = TerminationAndHumanReply( + no_human_input_msg=no_human_input_msg, sender=sender, recipient=self + ) + termination_and_human_reply.print(iostream.print) # stop the conversation if reply == "exit": @@ -1881,7 +1883,11 @@ def check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 - termination_and_human_reply.print_human_input_mode(iostream.print) + if self.human_input_mode != "NEVER": + termination_and_human_reply_human_input_mode = TerminationAndHumanReplyHumanInputMode( + human_input_mode=self.human_input_mode, sender=sender, recipient=self + ) + termination_and_human_reply_human_input_mode.print(iostream.print) return False, None @@ -1953,10 +1959,11 @@ async def a_check_termination_and_human_reply( reply = reply or "exit" # print the no_human_input_msg - termination_and_human_reply = TerminationAndHumanReply( - no_human_input_msg=no_human_input_msg, human_input_mode=self.human_input_mode, sender=sender, recipient=self - ) - termination_and_human_reply.print_no_human_input_msg(iostream.print) + if no_human_input_msg: + termination_and_human_reply = TerminationAndHumanReply( + no_human_input_msg=no_human_input_msg, sender=sender, recipient=self + ) + termination_and_human_reply.print(iostream.print) # stop the conversation if reply == "exit": @@ -1995,7 +2002,11 @@ async def a_check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 - termination_and_human_reply.print_human_input_mode(iostream.print) + if self.human_input_mode != "NEVER": + termination_and_human_reply_human_input_mode = TerminationAndHumanReplyHumanInputMode( + human_input_mode=self.human_input_mode, sender=sender, recipient=self + ) + termination_and_human_reply_human_input_mode.print(iostream.print) return False, None diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 91023d2997..431c2e57b0 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -455,7 +455,6 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class TerminationAndHumanReply(BaseMessage): no_human_input_msg: str - human_input_mode: str sender_name: str recipient_name: str @@ -464,29 +463,46 @@ def __init__( *, uuid: Optional[UUID] = None, no_human_input_msg: str, - human_input_mode: str, sender: Optional["Agent"] = None, recipient: "Agent", ): super().__init__( uuid=uuid, no_human_input_msg=no_human_input_msg, - human_input_mode=human_input_mode, sender_name=sender.name if sender else "No sender", recipient_name=recipient.name, ) - def print_no_human_input_msg(self, f: Optional[Callable[..., Any]] = None) -> None: + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.no_human_input_msg: - f(colored(f"\n>>>>>>>> {self.no_human_input_msg}", "red"), flush=True) + f(colored(f"\n>>>>>>>> {self.no_human_input_msg}", "red"), flush=True) - def print_human_input_mode(self, f: Optional[Callable[..., Any]] = None) -> None: + +class TerminationAndHumanReplyHumanInputMode(BaseMessage): + human_input_mode: str + sender_name: str + recipient_name: str + + def __init__( + self, + *, + uuid: Optional[UUID] = None, + human_input_mode: str, + sender: Optional["Agent"] = None, + recipient: "Agent", + ): + super().__init__( + uuid=uuid, + human_input_mode=human_input_mode, + sender_name=sender.name if sender else "No sender", + recipient_name=recipient.name, + ) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.human_input_mode != "NEVER": - f(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True) + f(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True) class ExecuteCodeBlock(BaseMessage): diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index f373ab3eb9..76aae7c3ca 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -31,6 +31,7 @@ SelectSpeaker, SpeakerAttempt, TerminationAndHumanReply, + TerminationAndHumanReplyHumanInputMode, TextMessage, ToolCall, ToolCallMessage, @@ -567,12 +568,10 @@ def test_GroupChatRunChat(uuid: UUID) -> None: def test_TerminationAndHumanReply(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: no_human_input_msg = "NO HUMAN INPUT RECEIVED." - human_input_mode = "ALWAYS" actual = TerminationAndHumanReply( uuid=uuid, no_human_input_msg=no_human_input_msg, - human_input_mode=human_input_mode, sender=sender, recipient=recipient, ) @@ -581,20 +580,41 @@ def test_TerminationAndHumanReply(uuid: UUID, sender: ConversableAgent, recipien expected_model_dump = { "uuid": uuid, "no_human_input_msg": no_human_input_msg, - "human_input_mode": human_input_mode, "sender_name": "sender", "recipient_name": "recipient", } assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print_no_human_input_msg(f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [call("\x1b[31m\n>>>>>>>> NO HUMAN INPUT RECEIVED.\x1b[0m", flush=True)] assert mock.call_args_list == expected_call_args_list + +def test_TerminationAndHumanReplyHumanInputMode( + uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent +) -> None: + human_input_mode = "ALWAYS" + + actual = TerminationAndHumanReplyHumanInputMode( + uuid=uuid, + human_input_mode=human_input_mode, + sender=sender, + recipient=recipient, + ) + assert isinstance(actual, TerminationAndHumanReplyHumanInputMode) + + expected_model_dump = { + "uuid": uuid, + "human_input_mode": human_input_mode, + "sender_name": "sender", + "recipient_name": "recipient", + } + assert actual.model_dump() == expected_model_dump + mock = MagicMock() - actual.print_human_input_mode(f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [call("\x1b[31m\n>>>>>>>> USING AUTO REPLY...\x1b[0m", flush=True)] assert mock.call_args_list == expected_call_args_list From 7bcd76c5fe729f638fa72f7ebecd2e35227eadb3 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 16:42:43 +0530 Subject: [PATCH 25/61] Rename print_* method to print in GenerateCodeExecutionReply --- autogen/agentchat/conversable_agent.py | 6 ++++-- autogen/messages/agent_messages.py | 23 ++++++++++++++++------- test/messages/test_agent_messages.py | 5 +++-- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 67c1ef3f4e..3eb121994f 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1541,7 +1541,6 @@ def _generate_code_execution_reply_using_executor( # iterate through the last n messages in reverse # if code blocks are found, execute the code blocks and return the output # if no code blocks are found, continue - generate_code_execution_reply = GenerateCodeExecutionReply(sender=sender, recipient=self) for message in reversed(messages_to_scan): if not message["content"]: continue @@ -1549,7 +1548,10 @@ def _generate_code_execution_reply_using_executor( if len(code_blocks) == 0: continue - generate_code_execution_reply.print_executing_code_block(code_blocks, iostream.print) + generate_code_execution_reply = GenerateCodeExecutionReply( + code_blocks=code_blocks, sender=sender, recipient=self + ) + generate_code_execution_reply.print(iostream.print) # found code blocks, execute code. code_result = self._code_executor.execute_code_blocks(code_blocks) diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 431c2e57b0..7dd944bd78 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -645,26 +645,35 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class GenerateCodeExecutionReply(BaseMessage): + code_block_languages: list[str] sender_name: Optional[str] = None recipient_name: str - def __init__(self, *, uuid: Optional[UUID] = None, sender: Optional["Agent"] = None, recipient: "Agent"): + def __init__( + self, + *, + uuid: Optional[UUID] = None, + code_blocks: list["CodeBlock"], + sender: Optional["Agent"] = None, + recipient: "Agent", + ): + code_block_languages = [code_block.language for code_block in code_blocks] + super().__init__( uuid=uuid, + code_block_languages=code_block_languages, sender_name=sender.name if sender else None, recipient_name=recipient.name, ) - def print_executing_code_block( - self, code_blocks: list["CodeBlock"], f: Optional[Callable[..., Any]] = None - ) -> None: + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - num_code_blocks = len(code_blocks) + num_code_blocks = len(self.code_block_languages) if num_code_blocks == 1: f( colored( - f"\n>>>>>>>> EXECUTING CODE BLOCK (inferred language is {code_blocks[0].language})...", + f"\n>>>>>>>> EXECUTING CODE BLOCK (inferred language is {self.code_block_languages[0]})...", "red", ), flush=True, @@ -672,7 +681,7 @@ def print_executing_code_block( else: f( colored( - f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join([x.language for x in code_blocks])}])...", + f"\n>>>>>>>> EXECUTING {num_code_blocks} CODE BLOCKS (inferred languages are [{', '.join([x for x in self.code_block_languages])}])...", "red", ), flush=True, diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 76aae7c3ca..ae55affb8b 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -809,18 +809,19 @@ def test_GenerateCodeExecutionReply( sender: ConversableAgent, recipient: ConversableAgent, ) -> None: - actual = GenerateCodeExecutionReply(uuid=uuid, sender=sender, recipient=recipient) + actual = GenerateCodeExecutionReply(uuid=uuid, code_blocks=code_blocks, sender=sender, recipient=recipient) assert isinstance(actual, GenerateCodeExecutionReply) expected_model_dump = { "uuid": uuid, + "code_block_languages": [x.language for x in code_blocks], "sender_name": "sender", "recipient_name": "recipient", } assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print_executing_code_block(code_blocks=code_blocks, f=mock) + actual.print(f=mock) # print(mock.call_args_list) From c62db84e69cbdd968a0d0364f498e98a872687f2 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 16:54:22 +0530 Subject: [PATCH 26/61] Rename print_* method to print in SelectSpeaker --- autogen/agentchat/groupchat.py | 10 +++++--- autogen/messages/agent_messages.py | 25 ++++++++++++++++---- test/messages/test_agent_messages.py | 34 +++++++++++++++++++++++++--- 3 files changed, 59 insertions(+), 10 deletions(-) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 02c1203d70..72a1cfa358 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -23,6 +23,8 @@ GroupChatResume, GroupChatRunChat, SelectSpeaker, + SelectSpeakerInvalidInput, + SelectSpeakerTryCountExceeded, SpeakerAttempt, ) from ..oai.client import ModelClient @@ -397,14 +399,15 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A agents = self.agents select_speaker = SelectSpeaker(agents=agents) - select_speaker.print_select_speaker(iostream.print) + select_speaker.print(iostream.print) try_count = 0 # Assume the user will enter a valid number within 3 tries, otherwise use auto selection to avoid blocking. while try_count <= 3: try_count += 1 if try_count >= 3: - select_speaker.print_try_count_exceeded(try_count, iostream.print) + select_speaker_try_count_exceeded = SelectSpeakerTryCountExceeded(try_count=try_count, agents=agents) + select_speaker_try_count_exceeded.print(try_count, iostream.print) break try: i = iostream.input( @@ -418,7 +421,8 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A else: raise ValueError except ValueError: - select_speaker.print_invalid_input(iostream.print) + select_speaker_invalid_input = SelectSpeakerInvalidInput(agents=agents) + select_speaker_invalid_input.print(iostream.print) return None def random_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[Agent, None]: diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 7dd944bd78..53ade8dacc 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -581,7 +581,7 @@ def __init__(self, *, uuid: Optional[UUID] = None, agents: Optional[list["Agent" agent_names = [agent.name for agent in agents] if agents else None super().__init__(uuid=uuid, agent_names=agent_names) - def print_select_speaker(self, f: Optional[Callable[..., Any]] = None) -> None: + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f("Please select the next speaker from the following list:") @@ -589,12 +589,29 @@ def print_select_speaker(self, f: Optional[Callable[..., Any]] = None) -> None: for i, agent_name in enumerate(agent_names): f(f"{i+1}: {agent_name}") - def print_try_count_exceeded(self, try_count: int = 3, f: Optional[Callable[..., Any]] = None) -> None: + +class SelectSpeakerTryCountExceeded(BaseMessage): + try_count: int + agent_names: Optional[list[str]] = None + + def __init__(self, *, uuid: Optional[UUID] = None, try_count: int, agents: Optional[list["Agent"]] = None): + agent_names = [agent.name for agent in agents] if agents else None + super().__init__(uuid=uuid, try_count=try_count, agent_names=agent_names) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - f(f"You have tried {try_count} times. The next speaker will be selected automatically.") + f(f"You have tried {self.try_count} times. The next speaker will be selected automatically.") + - def print_invalid_input(self, f: Optional[Callable[..., Any]] = None) -> None: +class SelectSpeakerInvalidInput(BaseMessage): + agent_names: Optional[list[str]] = None + + def __init__(self, *, uuid: Optional[UUID] = None, agents: Optional[list["Agent"]] = None): + agent_names = [agent.name for agent in agents] if agents else None + super().__init__(uuid=uuid, agent_names=agent_names) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f(f"Invalid input. Please enter a number between 1 and {len(self.agent_names or [])}.") diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index ae55affb8b..e3eecd8334 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -29,6 +29,8 @@ MessageRole, PostCarryoverProcessing, SelectSpeaker, + SelectSpeakerInvalidInput, + SelectSpeakerTryCountExceeded, SpeakerAttempt, TerminationAndHumanReply, TerminationAndHumanReplyHumanInputMode, @@ -715,7 +717,7 @@ def test_SelectSpeaker(uuid: UUID) -> None: assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print_select_speaker(f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [ call("Please select the next speaker from the following list:"), @@ -724,14 +726,40 @@ def test_SelectSpeaker(uuid: UUID) -> None: ] assert mock.call_args_list == expected_call_args_list + +def test_SelectSpeakerTryCountExceeded(uuid: UUID) -> None: + agents = [ + ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ] + try_count = 3 + + actual = SelectSpeakerTryCountExceeded(uuid=uuid, try_count=try_count, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerTryCountExceeded) + mock = MagicMock() - actual.print_try_count_exceeded(try_count=3, f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [call("You have tried 3 times. The next speaker will be selected automatically.")] assert mock.call_args_list == expected_call_args_list + +def test_SelectSpeakerInvalidInput(uuid: UUID) -> None: + agents = [ + ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ] + + actual = SelectSpeakerInvalidInput(uuid=uuid, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerInvalidInput) + + expected_model_dump = { + "uuid": uuid, + "agent_names": ["bob", "charlie"], + } + assert actual.model_dump() == expected_model_dump mock = MagicMock() - actual.print_invalid_input(f=mock) + actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [call("Invalid input. Please enter a number between 1 and 2.")] assert mock.call_args_list == expected_call_args_list From c601840b1b5e3da57f7d0f3e610b341637de6d28 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 17:07:51 +0530 Subject: [PATCH 27/61] Move verbose if statements out of print method --- autogen/messages/agent_messages.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 53ade8dacc..fd1d816fad 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -382,9 +382,6 @@ def __init__( def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if not self.verbose: - return - if len(self.mentions) == 1: # Success on retry, we have just one name mentioned selected_agent_name = next(iter(self.mentions)) @@ -431,12 +428,11 @@ def __init__( def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.verbose: - f( - f"Prepared group chat with {len(self.messages)} messages, the last speaker is", - colored(self.last_speaker_name, "yellow"), - flush=True, - ) + f( + f"Prepared group chat with {len(self.messages)} messages, the last speaker is", + colored(self.last_speaker_name, "yellow"), + flush=True, + ) class GroupChatRunChat(BaseMessage): @@ -449,8 +445,7 @@ def __init__(self, *, uuid: Optional[UUID] = None, speaker: "Agent", silent: Opt def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.verbose: - f(colored(f"\nNext speaker: {self.speaker_name}\n", "green"), flush=True) + f(colored(f"\nNext speaker: {self.speaker_name}\n", "green"), flush=True) class TerminationAndHumanReply(BaseMessage): From 644a404aa0f2839501a4656bc2af81ec8d404f0a Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 17:28:48 +0530 Subject: [PATCH 28/61] Fix misc rafactor issues --- autogen/agentchat/conversable_agent.py | 13 ++++++++----- autogen/agentchat/groupchat.py | 2 +- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 3eb121994f..08b58f5aa6 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -2362,7 +2362,7 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict "content": content, } - async def a_execute_function(self, func_call): + async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict[str, Any]]: """Execute an async function call and return the result. Override this function to modify the way async functions and tools are executed. @@ -2383,8 +2383,6 @@ async def a_execute_function(self, func_call): func_name = func_call.get("name", "") func = self._function_map.get(func_name, None) - execute_function = ExecuteFunction(func_name=func_name, recipient=self) - is_exec_success = False if func is not None: # Extract arguments from a json-like string and put it into a dict. @@ -2397,7 +2395,8 @@ async def a_execute_function(self, func_call): # Try to execute the function if arguments is not None: - execute_function.print_executing_func(iostream.print) + execute_function = ExecuteFunction(func_name=func_name, recipient=self) + execute_function.print(iostream.print) try: if inspect.iscoroutinefunction(func): content = await func(**arguments) @@ -2411,7 +2410,11 @@ async def a_execute_function(self, func_call): arguments = {} content = f"Error: Function {func_name} not found." - execute_function.print_arguments_and_content(arguments, content, iostream.print) + if verbose: + execute_function_arguments_content = ExecuteFunctionArgumentsContent( + func_name=func_name, arguments=arguments, content=content, recipient=self + ) + execute_function_arguments_content.print(arguments, content, iostream.print) return is_exec_success, { "name": func_name, diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 72a1cfa358..83a5d8e04f 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -407,7 +407,7 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A try_count += 1 if try_count >= 3: select_speaker_try_count_exceeded = SelectSpeakerTryCountExceeded(try_count=try_count, agents=agents) - select_speaker_try_count_exceeded.print(try_count, iostream.print) + select_speaker_try_count_exceeded.print(iostream.print) break try: i = iostream.input( From 57bc4a32a031837cedae3c7e453d88aa171a6a6e Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 18:08:29 +0530 Subject: [PATCH 29/61] Rename variable for consistency --- autogen/io/websockets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autogen/io/websockets.py b/autogen/io/websockets.py index 7f3422a455..e40081f97e 100644 --- a/autogen/io/websockets.py +++ b/autogen/io/websockets.py @@ -194,8 +194,8 @@ def print(self, *objects: Any, sep: str = " ", end: str = "\n", flush: bool = Fa end (str, optional): The end of the output. Defaults to "\n". flush (bool, optional): Whether to flush the output. Defaults to False. """ - message = PrintMessage(*objects, sep=sep, end=end) - self.send(message) + print_message = PrintMessage(*objects, sep=sep, end=end) + self.send(print_message) def send(self, message: BaseMessage) -> None: """Send a message to the output stream. From f900998afee84716e16dc7853b8d33c533a4f4d9 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 18:53:30 +0530 Subject: [PATCH 30/61] Replace model.print with iostream.send --- autogen/agentchat/chat.py | 2 +- autogen/agentchat/conversable_agent.py | 26 +++++++++++++------------- autogen/agentchat/groupchat.py | 16 ++++++++-------- autogen/oai/client.py | 4 ++-- test/io/test_websockets.py | 8 +++++++- 5 files changed, 31 insertions(+), 25 deletions(-) diff --git a/autogen/agentchat/chat.py b/autogen/agentchat/chat.py index 087cb112be..20a13f1da9 100644 --- a/autogen/agentchat/chat.py +++ b/autogen/agentchat/chat.py @@ -133,7 +133,7 @@ def __post_carryover_processing(chat_info: dict[str, Any]) -> None: ) post_carryover_processing = PostCarryoverProcessing(chat_info=chat_info) - post_carryover_processing.print(iostream.print) + iostream.send(post_carryover_processing) def initiate_chats(chat_queue: list[dict[str, Any]]) -> list[ChatResult]: diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 08b58f5aa6..266338184b 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1412,7 +1412,7 @@ def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preser clear_conversable_agent_history = ClearConversableAgentHistory( agent=self, no_messages_preserved=no_messages_preserved ) - clear_conversable_agent_history.print(iostream.print) + iostream.send(clear_conversable_agent_history) else: self._oai_messages.clear() else: @@ -1420,7 +1420,7 @@ def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preser # clear_conversable_agent_history.print_warning(iostream.print) if nr_messages_to_preserve: clear_conversable_agent_history_warning = ClearConversableAgentHistoryWarning(recipient=self) - clear_conversable_agent_history_warning.print(iostream.print) + iostream.send(clear_conversable_agent_history_warning) def generate_oai_reply( self, @@ -1551,7 +1551,7 @@ def _generate_code_execution_reply_using_executor( generate_code_execution_reply = GenerateCodeExecutionReply( code_blocks=code_blocks, sender=sender, recipient=self ) - generate_code_execution_reply.print(iostream.print) + iostream.send(generate_code_execution_reply) # found code blocks, execute code. code_result = self._code_executor.execute_code_blocks(code_blocks) @@ -1846,7 +1846,7 @@ def check_termination_and_human_reply( termination_and_human_reply = TerminationAndHumanReply( no_human_input_msg=no_human_input_msg, sender=sender, recipient=self ) - termination_and_human_reply.print(iostream.print) + iostream.send(termination_and_human_reply) # stop the conversation if reply == "exit": @@ -1889,7 +1889,7 @@ def check_termination_and_human_reply( termination_and_human_reply_human_input_mode = TerminationAndHumanReplyHumanInputMode( human_input_mode=self.human_input_mode, sender=sender, recipient=self ) - termination_and_human_reply_human_input_mode.print(iostream.print) + iostream.send(termination_and_human_reply_human_input_mode) return False, None @@ -1965,7 +1965,7 @@ async def a_check_termination_and_human_reply( termination_and_human_reply = TerminationAndHumanReply( no_human_input_msg=no_human_input_msg, sender=sender, recipient=self ) - termination_and_human_reply.print(iostream.print) + iostream.send(termination_and_human_reply) # stop the conversation if reply == "exit": @@ -2008,7 +2008,7 @@ async def a_check_termination_and_human_reply( termination_and_human_reply_human_input_mode = TerminationAndHumanReplyHumanInputMode( human_input_mode=self.human_input_mode, sender=sender, recipient=self ) - termination_and_human_reply_human_input_mode.print(iostream.print) + iostream.send(termination_and_human_reply_human_input_mode) return False, None @@ -2246,7 +2246,7 @@ def execute_code_blocks(self, code_blocks): lang = infer_lang(code) execute_code_block = ExecuteCodeBlock(code=code, language=lang, code_block_count=i, recipient=self) - execute_code_block.print(iostream.print) + iostream.send(execute_code_block) if lang in ["bash", "shell", "sh"]: exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config) @@ -2340,7 +2340,7 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict # Try to execute the function if arguments is not None: execute_function = ExecuteFunction(func_name=func_name, recipient=self) - execute_function.print(iostream.print) + iostream.send(execute_function) try: content = func(**arguments) is_exec_success = True @@ -2354,7 +2354,7 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict execute_function_arguments_content = ExecuteFunctionArgumentsContent( func_name=func_name, arguments=arguments, content=content, recipient=self ) - execute_function_arguments_content.print(arguments, content, iostream.print) + iostream.send(execute_function_arguments_content) return is_exec_success, { "name": func_name, @@ -2396,7 +2396,7 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo # Try to execute the function if arguments is not None: execute_function = ExecuteFunction(func_name=func_name, recipient=self) - execute_function.print(iostream.print) + iostream.send(execute_function) try: if inspect.iscoroutinefunction(func): content = await func(**arguments) @@ -2414,7 +2414,7 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo execute_function_arguments_content = ExecuteFunctionArgumentsContent( func_name=func_name, arguments=arguments, content=content, recipient=self ) - execute_function_arguments_content.print(arguments, content, iostream.print) + iostream.send(execute_function_arguments_content) return is_exec_success, { "name": func_name, @@ -2879,7 +2879,7 @@ def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) iostream = IOStream.get_default() conversable_agent_usage_summary = ConversableAgentUsageSummary(recipient=self, client=self.client) - conversable_agent_usage_summary.print(iostream.print) + iostream.send(conversable_agent_usage_summary) if self.client is not None: self.client.print_usage_summary(mode) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 83a5d8e04f..57092f8a52 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -399,7 +399,7 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A agents = self.agents select_speaker = SelectSpeaker(agents=agents) - select_speaker.print(iostream.print) + iostream.send(select_speaker) try_count = 0 # Assume the user will enter a valid number within 3 tries, otherwise use auto selection to avoid blocking. @@ -407,7 +407,7 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A try_count += 1 if try_count >= 3: select_speaker_try_count_exceeded = SelectSpeakerTryCountExceeded(try_count=try_count, agents=agents) - select_speaker_try_count_exceeded.print(iostream.print) + iostream.send(select_speaker_try_count_exceeded) break try: i = iostream.input( @@ -422,7 +422,7 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A raise ValueError except ValueError: select_speaker_invalid_input = SelectSpeakerInvalidInput(agents=agents) - select_speaker_invalid_input.print(iostream.print) + iostream.send(select_speaker_invalid_input) return None def random_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[Agent, None]: @@ -864,7 +864,7 @@ def _validate_speaker_name( attempts_left=attempts_left, select_speaker_auto_verbose=self.select_speaker_auto_verbose, ) - speaker_attempt.print(iostream.print) + iostream.send(speaker_attempt) if len(mentions) == 1: # Success on retry, we have just one name mentioned @@ -1159,7 +1159,7 @@ def run_chat( if not silent: iostream = IOStream.get_default() group_chat_run_chat = GroupChatRunChat(speaker=speaker, silent=silent) - group_chat_run_chat.print(iostream.print) + iostream.send(group_chat_run_chat) # let the speaker speak reply = speaker.generate_reply(sender=self) except KeyboardInterrupt: @@ -1365,7 +1365,7 @@ def resume( if not silent: iostream = IOStream.get_default() group_chat_resume = GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent) - group_chat_resume.print(iostream.print) + iostream.send(group_chat_resume) # Update group chat settings for resuming self._groupchat.send_introductions = False @@ -1470,7 +1470,7 @@ async def a_resume( if not silent: iostream = IOStream.get_default() group_chat_resume = GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent) - group_chat_resume.print(iostream.print) + iostream.send(group_chat_resume) # Update group chat settings for resuming self._groupchat.send_introductions = False @@ -1629,7 +1629,7 @@ def clear_agents_history(self, reply: dict, groupchat: GroupChat) -> str: clear_agents_history = ClearAgentsHistory( agent=agent_to_memory_clear, nr_messages_to_preserve=nr_messages_to_preserve ) - clear_agents_history.print(iostream.print) + iostream.send(clear_agents_history) if agent_to_memory_clear: agent_to_memory_clear.clear_history(nr_messages_to_preserve=nr_messages_to_preserve) else: diff --git a/autogen/oai/client.py b/autogen/oai/client.py index b692214ae5..f9789e0b1a 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -363,7 +363,7 @@ def _create_or_parse(*args, **kwargs): # If content is present, print it to the terminal and update response variables if content is not None: stream_message = StreamMessage(content=content) - stream_message.print(iostream.print) + iostream.send(stream_message) response_contents[choice.index] += content completion_tokens += 1 else: @@ -1135,7 +1135,7 @@ def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) usage_summary = UsageSummary( actual_usage_summary=self.actual_usage_summary, total_usage_summary=self.total_usage_summary, mode=mode ) - usage_summary.print(iostream.print) + iostream.send(usage_summary) def clear_usage_summary(self) -> None: """Clear the usage summary.""" diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index 36e1ab85c5..270912ea8c 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -4,6 +4,7 @@ # # Portions derived from https://github.com/microsoft/autogen are under the MIT License. # SPDX-License-Identifier: MIT +import json from tempfile import TemporaryDirectory from typing import Dict @@ -83,7 +84,12 @@ def on_connect(iostream: IOWebsockets) -> None: f" - Asserting received message '{message}' is the same as the expected message '{expected}'", flush=True, ) - assert message == expected + try: + message_dict = json.loads(message) + actual = message_dict["objects"][0] + except json.JSONDecodeError: + actual = message + assert actual == expected print(" - Sending message 'Yes' to server.", flush=True) websocket.send("Yes") From 159b2154688abded5a7f508863a5ce5fa25e73aa Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 18:58:25 +0530 Subject: [PATCH 31/61] Add test for IOConsole.send --- test/io/test_console.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/io/test_console.py b/test/io/test_console.py index 653ccca0fc..ed3e38605c 100644 --- a/test/io/test_console.py +++ b/test/io/test_console.py @@ -9,6 +9,7 @@ import pytest from autogen.io import IOConsole +from autogen.messages.print_message import PrintMessage class TestConsoleIO: @@ -21,6 +22,13 @@ def test_print(self, mock_print: MagicMock) -> None: self.console_io.print("Hello, World!", flush=True) mock_print.assert_called_once_with("Hello, World!", end="\n", sep=" ", flush=True) + @patch("builtins.print") + def test_send(self, mock_print: MagicMock) -> None: + # calling the send method should call the print method + message = PrintMessage("Hello, World!", "How are you", sep=" ", end="\n", flush=False) + self.console_io.send(message) + mock_print.assert_called_once_with("Hello, World!", "How are you", sep=" ", end="\n", flush=True) + @patch("builtins.input") def test_input(self, mock_input: MagicMock) -> None: # calling the input method should call the mock of the builtin input function From 188540d750d0b2757284e0bb2ce617c77fd3e88c Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 19:13:22 +0530 Subject: [PATCH 32/61] Refactor to remove single use variable --- autogen/agentchat/chat.py | 3 +- autogen/agentchat/conversable_agent.py | 64 +++++++++++--------------- autogen/agentchat/groupchat.py | 36 ++++++--------- autogen/oai/client.py | 10 ++-- 4 files changed, 47 insertions(+), 66 deletions(-) diff --git a/autogen/agentchat/chat.py b/autogen/agentchat/chat.py index 20a13f1da9..eab21dcece 100644 --- a/autogen/agentchat/chat.py +++ b/autogen/agentchat/chat.py @@ -132,8 +132,7 @@ def __post_carryover_processing(chat_info: dict[str, Any]) -> None: UserWarning, ) - post_carryover_processing = PostCarryoverProcessing(chat_info=chat_info) - iostream.send(post_carryover_processing) + iostream.send(PostCarryoverProcessing(chat_info=chat_info)) def initiate_chats(chat_queue: list[dict[str, Any]]) -> list[ChatResult]: diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 266338184b..78d553f9a2 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1409,18 +1409,14 @@ def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preser no_messages_preserved += 1 # Remove messages from history except last `nr_messages_to_preserve` messages. self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve_internal:] - clear_conversable_agent_history = ClearConversableAgentHistory( - agent=self, no_messages_preserved=no_messages_preserved - ) - iostream.send(clear_conversable_agent_history) + iostream.send(ClearConversableAgentHistory(agent=self, no_messages_preserved=no_messages_preserved)) else: self._oai_messages.clear() else: self._oai_messages[recipient].clear() # clear_conversable_agent_history.print_warning(iostream.print) if nr_messages_to_preserve: - clear_conversable_agent_history_warning = ClearConversableAgentHistoryWarning(recipient=self) - iostream.send(clear_conversable_agent_history_warning) + iostream.send(ClearConversableAgentHistoryWarning(recipient=self)) def generate_oai_reply( self, @@ -1548,10 +1544,7 @@ def _generate_code_execution_reply_using_executor( if len(code_blocks) == 0: continue - generate_code_execution_reply = GenerateCodeExecutionReply( - code_blocks=code_blocks, sender=sender, recipient=self - ) - iostream.send(generate_code_execution_reply) + iostream.send(GenerateCodeExecutionReply(code_blocks=code_blocks, sender=sender, recipient=self)) # found code blocks, execute code. code_result = self._code_executor.execute_code_blocks(code_blocks) @@ -1843,10 +1836,9 @@ def check_termination_and_human_reply( # print the no_human_input_msg if no_human_input_msg: - termination_and_human_reply = TerminationAndHumanReply( - no_human_input_msg=no_human_input_msg, sender=sender, recipient=self + iostream.send( + TerminationAndHumanReply(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self) ) - iostream.send(termination_and_human_reply) # stop the conversation if reply == "exit": @@ -1886,10 +1878,11 @@ def check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 if self.human_input_mode != "NEVER": - termination_and_human_reply_human_input_mode = TerminationAndHumanReplyHumanInputMode( - human_input_mode=self.human_input_mode, sender=sender, recipient=self + iostream.send( + TerminationAndHumanReplyHumanInputMode( + human_input_mode=self.human_input_mode, sender=sender, recipient=self + ) ) - iostream.send(termination_and_human_reply_human_input_mode) return False, None @@ -1962,10 +1955,9 @@ async def a_check_termination_and_human_reply( # print the no_human_input_msg if no_human_input_msg: - termination_and_human_reply = TerminationAndHumanReply( - no_human_input_msg=no_human_input_msg, sender=sender, recipient=self + iostream.send( + TerminationAndHumanReply(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self) ) - iostream.send(termination_and_human_reply) # stop the conversation if reply == "exit": @@ -2005,10 +1997,11 @@ async def a_check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 if self.human_input_mode != "NEVER": - termination_and_human_reply_human_input_mode = TerminationAndHumanReplyHumanInputMode( - human_input_mode=self.human_input_mode, sender=sender, recipient=self + iostream.send( + TerminationAndHumanReplyHumanInputMode( + human_input_mode=self.human_input_mode, sender=sender, recipient=self + ) ) - iostream.send(termination_and_human_reply_human_input_mode) return False, None @@ -2245,8 +2238,7 @@ def execute_code_blocks(self, code_blocks): if not lang: lang = infer_lang(code) - execute_code_block = ExecuteCodeBlock(code=code, language=lang, code_block_count=i, recipient=self) - iostream.send(execute_code_block) + iostream.send(ExecuteCodeBlock(code=code, language=lang, code_block_count=i, recipient=self)) if lang in ["bash", "shell", "sh"]: exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config) @@ -2339,8 +2331,7 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict # Try to execute the function if arguments is not None: - execute_function = ExecuteFunction(func_name=func_name, recipient=self) - iostream.send(execute_function) + iostream.send(ExecuteFunction(func_name=func_name, recipient=self)) try: content = func(**arguments) is_exec_success = True @@ -2351,10 +2342,11 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict content = f"Error: Function {func_name} not found." if verbose: - execute_function_arguments_content = ExecuteFunctionArgumentsContent( - func_name=func_name, arguments=arguments, content=content, recipient=self + iostream.send( + ExecuteFunctionArgumentsContent( + func_name=func_name, arguments=arguments, content=content, recipient=self + ) ) - iostream.send(execute_function_arguments_content) return is_exec_success, { "name": func_name, @@ -2395,8 +2387,7 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo # Try to execute the function if arguments is not None: - execute_function = ExecuteFunction(func_name=func_name, recipient=self) - iostream.send(execute_function) + iostream.send(ExecuteFunction(func_name=func_name, recipient=self)) try: if inspect.iscoroutinefunction(func): content = await func(**arguments) @@ -2411,10 +2402,11 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo content = f"Error: Function {func_name} not found." if verbose: - execute_function_arguments_content = ExecuteFunctionArgumentsContent( - func_name=func_name, arguments=arguments, content=content, recipient=self + iostream.send( + ExecuteFunctionArgumentsContent( + func_name=func_name, arguments=arguments, content=content, recipient=self + ) ) - iostream.send(execute_function_arguments_content) return is_exec_success, { "name": func_name, @@ -2877,9 +2869,7 @@ def process_last_received_message(self, messages: list[dict]) -> list[dict]: def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) -> None: """Print the usage summary.""" iostream = IOStream.get_default() - conversable_agent_usage_summary = ConversableAgentUsageSummary(recipient=self, client=self.client) - - iostream.send(conversable_agent_usage_summary) + iostream.send(ConversableAgentUsageSummary(recipient=self, client=self.client)) if self.client is not None: self.client.print_usage_summary(mode) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 57092f8a52..ac79a89c1e 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -398,16 +398,14 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A if agents is None: agents = self.agents - select_speaker = SelectSpeaker(agents=agents) - iostream.send(select_speaker) + iostream.send(SelectSpeaker(agents=agents)) try_count = 0 # Assume the user will enter a valid number within 3 tries, otherwise use auto selection to avoid blocking. while try_count <= 3: try_count += 1 if try_count >= 3: - select_speaker_try_count_exceeded = SelectSpeakerTryCountExceeded(try_count=try_count, agents=agents) - iostream.send(select_speaker_try_count_exceeded) + iostream.send(SelectSpeakerTryCountExceeded(try_count=try_count, agents=agents)) break try: i = iostream.input( @@ -421,8 +419,7 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A else: raise ValueError except ValueError: - select_speaker_invalid_input = SelectSpeakerInvalidInput(agents=agents) - iostream.send(select_speaker_invalid_input) + iostream.send(SelectSpeakerInvalidInput(agents=agents)) return None def random_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[Agent, None]: @@ -858,13 +855,14 @@ def _validate_speaker_name( # Output the query and requery results if self.select_speaker_auto_verbose: iostream = IOStream.get_default() - speaker_attempt = SpeakerAttempt( - mentions=mentions, - attempt=attempt, - attempts_left=attempts_left, - select_speaker_auto_verbose=self.select_speaker_auto_verbose, + iostream.send( + SpeakerAttempt( + mentions=mentions, + attempt=attempt, + attempts_left=attempts_left, + select_speaker_auto_verbose=self.select_speaker_auto_verbose, + ) ) - iostream.send(speaker_attempt) if len(mentions) == 1: # Success on retry, we have just one name mentioned @@ -1158,8 +1156,7 @@ def run_chat( speaker = groupchat.select_speaker(speaker, self) if not silent: iostream = IOStream.get_default() - group_chat_run_chat = GroupChatRunChat(speaker=speaker, silent=silent) - iostream.send(group_chat_run_chat) + iostream.send(GroupChatRunChat(speaker=speaker, silent=silent)) # let the speaker speak reply = speaker.generate_reply(sender=self) except KeyboardInterrupt: @@ -1364,8 +1361,7 @@ def resume( if not silent: iostream = IOStream.get_default() - group_chat_resume = GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent) - iostream.send(group_chat_resume) + iostream.send(GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent)) # Update group chat settings for resuming self._groupchat.send_introductions = False @@ -1469,8 +1465,7 @@ async def a_resume( if not silent: iostream = IOStream.get_default() - group_chat_resume = GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent) - iostream.send(group_chat_resume) + iostream.send(GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent)) # Update group chat settings for resuming self._groupchat.send_introductions = False @@ -1626,10 +1621,7 @@ def clear_agents_history(self, reply: dict, groupchat: GroupChat) -> str: "The last tool call message will be saved to prevent errors caused by tool response without tool call." ) # clear history - clear_agents_history = ClearAgentsHistory( - agent=agent_to_memory_clear, nr_messages_to_preserve=nr_messages_to_preserve - ) - iostream.send(clear_agents_history) + iostream.send(ClearAgentsHistory(agent=agent_to_memory_clear, nr_messages_to_preserve=nr_messages_to_preserve)) if agent_to_memory_clear: agent_to_memory_clear.clear_history(nr_messages_to_preserve=nr_messages_to_preserve) else: diff --git a/autogen/oai/client.py b/autogen/oai/client.py index f9789e0b1a..9a3fd38e67 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -362,8 +362,7 @@ def _create_or_parse(*args, **kwargs): # If content is present, print it to the terminal and update response variables if content is not None: - stream_message = StreamMessage(content=content) - iostream.send(stream_message) + iostream.send(StreamMessage(content=content)) response_contents[choice.index] += content completion_tokens += 1 else: @@ -1132,10 +1131,11 @@ def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) elif "total" in mode: mode = "total" - usage_summary = UsageSummary( - actual_usage_summary=self.actual_usage_summary, total_usage_summary=self.total_usage_summary, mode=mode + iostream.send( + UsageSummary( + actual_usage_summary=self.actual_usage_summary, total_usage_summary=self.total_usage_summary, mode=mode + ) ) - iostream.send(usage_summary) def clear_usage_summary(self) -> None: """Clear the usage summary.""" From e496291e09dbc17aa5f3c9843780f47c61eb2913 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 19:37:34 +0530 Subject: [PATCH 33/61] Rename to UsingAutoReply --- autogen/agentchat/conversable_agent.py | 14 +++----------- autogen/messages/agent_messages.py | 2 +- test/messages/test_agent_messages.py | 10 ++++------ 3 files changed, 8 insertions(+), 18 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 78d553f9a2..0b5c9c54ca 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -50,7 +50,7 @@ ExecuteFunctionArgumentsContent, GenerateCodeExecutionReply, TerminationAndHumanReply, - TerminationAndHumanReplyHumanInputMode, + UsingAutoReply, create_received_message_model, ) from ..oai.client import ModelClient, OpenAIWrapper @@ -1878,11 +1878,7 @@ def check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 if self.human_input_mode != "NEVER": - iostream.send( - TerminationAndHumanReplyHumanInputMode( - human_input_mode=self.human_input_mode, sender=sender, recipient=self - ) - ) + iostream.send(UsingAutoReply(human_input_mode=self.human_input_mode, sender=sender, recipient=self)) return False, None @@ -1997,11 +1993,7 @@ async def a_check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 if self.human_input_mode != "NEVER": - iostream.send( - TerminationAndHumanReplyHumanInputMode( - human_input_mode=self.human_input_mode, sender=sender, recipient=self - ) - ) + iostream.send(UsingAutoReply(human_input_mode=self.human_input_mode, sender=sender, recipient=self)) return False, None diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index fd1d816fad..259767e8c3 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -474,7 +474,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored(f"\n>>>>>>>> {self.no_human_input_msg}", "red"), flush=True) -class TerminationAndHumanReplyHumanInputMode(BaseMessage): +class UsingAutoReply(BaseMessage): human_input_mode: str sender_name: str recipient_name: str diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index e3eecd8334..e225c402cb 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -33,12 +33,12 @@ SelectSpeakerTryCountExceeded, SpeakerAttempt, TerminationAndHumanReply, - TerminationAndHumanReplyHumanInputMode, TextMessage, ToolCall, ToolCallMessage, ToolResponse, ToolResponseMessage, + UsingAutoReply, create_received_message_model, ) from autogen.oai.client import OpenAIWrapper @@ -594,18 +594,16 @@ def test_TerminationAndHumanReply(uuid: UUID, sender: ConversableAgent, recipien assert mock.call_args_list == expected_call_args_list -def test_TerminationAndHumanReplyHumanInputMode( - uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent -) -> None: +def test_UsingAutoReply(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: human_input_mode = "ALWAYS" - actual = TerminationAndHumanReplyHumanInputMode( + actual = UsingAutoReply( uuid=uuid, human_input_mode=human_input_mode, sender=sender, recipient=recipient, ) - assert isinstance(actual, TerminationAndHumanReplyHumanInputMode) + assert isinstance(actual, UsingAutoReply) expected_model_dump = { "uuid": uuid, From 3b4f1b6368e413177918d772af221c6e11e6a8f3 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 20:01:40 +0530 Subject: [PATCH 34/61] Refactor ExecuteFunction and ExecutedFunction --- autogen/agentchat/conversable_agent.py | 18 +++++------------- autogen/messages/agent_messages.py | 14 +++++++++----- test/messages/test_agent_messages.py | 26 +++++++++++++++++--------- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 0b5c9c54ca..ad6fd461a4 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -46,8 +46,8 @@ ClearConversableAgentHistoryWarning, ConversableAgentUsageSummary, ExecuteCodeBlock, + ExecutedFunction, ExecuteFunction, - ExecuteFunctionArgumentsContent, GenerateCodeExecutionReply, TerminationAndHumanReply, UsingAutoReply, @@ -2323,7 +2323,7 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict # Try to execute the function if arguments is not None: - iostream.send(ExecuteFunction(func_name=func_name, recipient=self)) + iostream.send(ExecuteFunction(func_name=func_name, arguments=arguments, recipient=self)) try: content = func(**arguments) is_exec_success = True @@ -2334,11 +2334,7 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict content = f"Error: Function {func_name} not found." if verbose: - iostream.send( - ExecuteFunctionArgumentsContent( - func_name=func_name, arguments=arguments, content=content, recipient=self - ) - ) + iostream.send(ExecutedFunction(func_name=func_name, arguments=arguments, content=content, recipient=self)) return is_exec_success, { "name": func_name, @@ -2379,7 +2375,7 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo # Try to execute the function if arguments is not None: - iostream.send(ExecuteFunction(func_name=func_name, recipient=self)) + iostream.send(ExecuteFunction(func_name=func_name, arguments=arguments, recipient=self)) try: if inspect.iscoroutinefunction(func): content = await func(**arguments) @@ -2394,11 +2390,7 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo content = f"Error: Function {func_name} not found." if verbose: - iostream.send( - ExecuteFunctionArgumentsContent( - func_name=func_name, arguments=arguments, content=content, recipient=self - ) - ) + iostream.send(ExecutedFunction(func_name=func_name, arguments=arguments, content=content, recipient=self)) return is_exec_success, { "name": func_name, diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 259767e8c3..f199b41cbd 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -527,21 +527,22 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class ExecuteFunction(BaseMessage): func_name: str + arguments: dict[str, Any] recipient_name: str - def __init__(self, *, uuid: Optional[UUID] = None, func_name: str, recipient: "Agent"): - super().__init__(uuid=uuid, func_name=func_name, recipient_name=recipient.name) + def __init__(self, *, uuid: Optional[UUID] = None, func_name: str, arguments: dict[str, Any], recipient: "Agent"): + super().__init__(uuid=uuid, func_name=func_name, arguments=arguments, recipient_name=recipient.name) def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f( - colored(f"\n>>>>>>>> EXECUTING FUNCTION {self.func_name}...", "magenta"), + colored(f"\n>>>>>>>> EXECUTING FUNCTION {self.func_name}...\nInput arguments: {self.arguments}", "magenta"), flush=True, ) -class ExecuteFunctionArgumentsContent(BaseMessage): +class ExecutedFunction(BaseMessage): func_name: str arguments: dict[str, Any] content: str @@ -564,7 +565,10 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f( - colored(f"\nInput arguments: {self.arguments}\nOutput:\n{self.content}", "magenta"), + colored( + f"\n>>>>>>>> EXECUTED FUNCTION {self.func_name}...\nInput arguments: {self.arguments}\nOutput:\n{self.content}", + "magenta", + ), flush=True, ) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index e225c402cb..8ef28e51d1 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -18,8 +18,8 @@ ContentMessage, ConversableAgentUsageSummary, ExecuteCodeBlock, + ExecutedFunction, ExecuteFunction, - ExecuteFunctionArgumentsContent, FunctionCall, FunctionCallMessage, FunctionResponseMessage, @@ -653,13 +653,15 @@ def test_ExecuteCodeBlock(uuid: UUID, sender: ConversableAgent, recipient: Conve def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: func_name = "add_num" + arguments = {"num_to_be_added": 5} - actual = ExecuteFunction(uuid=uuid, func_name=func_name, recipient=recipient) + actual = ExecuteFunction(uuid=uuid, func_name=func_name, arguments=arguments, recipient=recipient) assert isinstance(actual, ExecuteFunction) expected_model_dump = { "uuid": uuid, "func_name": func_name, + "arguments": arguments, "recipient_name": "recipient", } assert actual.model_dump() == expected_model_dump @@ -667,19 +669,22 @@ def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: mock = MagicMock() actual.print(f=mock) # print(mock.call_args_list) - expected_call_args_list = [call("\x1b[35m\n>>>>>>>> EXECUTING FUNCTION add_num...\x1b[0m", flush=True)] + expected_call_args_list = [ + call( + "\x1b[35m\n>>>>>>>> EXECUTING FUNCTION add_num...\nInput arguments: {'num_to_be_added': 5}\x1b[0m", + flush=True, + ) + ] assert mock.call_args_list == expected_call_args_list -def test_ExecuteFunctionArgumentsContent(uuid: UUID, recipient: ConversableAgent) -> None: +def test_ExecutedFunction(uuid: UUID, recipient: ConversableAgent) -> None: func_name = "add_num" arguments = {"num_to_be_added": 5} content = "15" - actual = ExecuteFunctionArgumentsContent( - uuid=uuid, func_name=func_name, arguments=arguments, content=content, recipient=recipient - ) - assert isinstance(actual, ExecuteFunctionArgumentsContent) + actual = ExecutedFunction(uuid=uuid, func_name=func_name, arguments=arguments, content=content, recipient=recipient) + assert isinstance(actual, ExecutedFunction) expected_model_dump = { "uuid": uuid, @@ -694,7 +699,10 @@ def test_ExecuteFunctionArgumentsContent(uuid: UUID, recipient: ConversableAgent actual.print(f=mock) # print(mock.call_args_list) expected_call_args_list = [ - call("\x1b[35m\nInput arguments: {'num_to_be_added': 5}\nOutput:\n15\x1b[0m", flush=True) + call( + "\x1b[35m\n>>>>>>>> EXECUTED FUNCTION add_num...\nInput arguments: {'num_to_be_added': 5}\nOutput:\n15\x1b[0m", + flush=True, + ) ] assert mock.call_args_list == expected_call_args_list From d138b477a92299a4633c2e3ffe64352c8aa1d0ee Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 20:42:22 +0530 Subject: [PATCH 35/61] Add call_id to ExecuteFunction and ExecutedFunction --- .../agentchat/contrib/gpt_assistant_agent.py | 5 +- autogen/agentchat/conversable_agent.py | 47 ++++++++++++++----- autogen/messages/agent_messages.py | 31 ++++++++++-- test/messages/test_agent_messages.py | 14 ++++-- 4 files changed, 74 insertions(+), 23 deletions(-) diff --git a/autogen/agentchat/contrib/gpt_assistant_agent.py b/autogen/agentchat/contrib/gpt_assistant_agent.py index 4c2ac731f7..3512e11abc 100644 --- a/autogen/agentchat/contrib/gpt_assistant_agent.py +++ b/autogen/agentchat/contrib/gpt_assistant_agent.py @@ -305,7 +305,10 @@ def _get_run_response(self, thread, run): actions = [] for tool_call in run.required_action.submit_tool_outputs.tool_calls: function = tool_call.function - is_exec_success, tool_response = self.execute_function(function.dict(), self._verbose) + tool_call_id = tool_call.id + is_exec_success, tool_response = self.execute_function( + function.dict(), call_id=tool_call_id, verbose=self._verbose + ) tool_response["metadata"] = { "tool_call_id": tool_call.id, "run_id": run.id, diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index ad6fd461a4..324b6871ad 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1623,6 +1623,7 @@ def generate_function_call_reply( messages = self._oai_messages[sender] message = messages[-1] if "function_call" in message and message["function_call"]: + call_id = message.get("id", None) func_call = message["function_call"] func = self._function_map.get(func_call.get("name", None), None) if inspect.iscoroutinefunction(func): @@ -1635,11 +1636,11 @@ def generate_function_call_reply( loop = asyncio.new_event_loop() close_loop = True - _, func_return = loop.run_until_complete(self.a_execute_function(func_call)) + _, func_return = loop.run_until_complete(self.a_execute_function(func_call, call_id=call_id)) if close_loop: loop.close() else: - _, func_return = self.execute_function(message["function_call"]) + _, func_return = self.execute_function(message["function_call"], call_id=call_id) return True, func_return return False, None @@ -1661,13 +1662,14 @@ async def a_generate_function_call_reply( messages = self._oai_messages[sender] message = messages[-1] if "function_call" in message: + call_id = message.get("id", None) func_call = message["function_call"] func_name = func_call.get("name", "") func = self._function_map.get(func_name, None) if func and inspect.iscoroutinefunction(func): - _, func_return = await self.a_execute_function(func_call) + _, func_return = await self.a_execute_function(func_call, call_id=call_id) else: - _, func_return = self.execute_function(func_call) + _, func_return = self.execute_function(func_call, call_id=call_id) return True, func_return return False, None @@ -1690,6 +1692,7 @@ def generate_tool_calls_reply( tool_returns = [] for tool_call in message.get("tool_calls", []): function_call = tool_call.get("function", {}) + tool_call_id = tool_call.get("id", None) func = self._function_map.get(function_call.get("name", None), None) if inspect.iscoroutinefunction(func): try: @@ -1701,15 +1704,15 @@ def generate_tool_calls_reply( loop = asyncio.new_event_loop() close_loop = True - _, func_return = loop.run_until_complete(self.a_execute_function(function_call)) + _, func_return = loop.run_until_complete(self.a_execute_function(function_call, call_id=tool_call_id)) if close_loop: loop.close() else: - _, func_return = self.execute_function(function_call) + _, func_return = self.execute_function(function_call, call_id=tool_call_id) content = func_return.get("content", "") if content is None: content = "" - tool_call_id = tool_call.get("id", None) + if tool_call_id is not None: tool_call_response = { "tool_call_id": tool_call_id, @@ -2290,13 +2293,16 @@ def _format_json_str(jstr): result.append(char) return "".join(result) - def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict[str, Any]]: + def execute_function( + self, func_call, call_id: Optional[str] = None, verbose: bool = False + ) -> tuple[bool, dict[str, Any]]: """Execute a function call and return the result. Override this function to modify the way to execute function and tool calls. Args: func_call: a dictionary extracted from openai message at "function_call" or "tool_calls" with keys "name" and "arguments". + call_id: a string to identify the tool call. Returns: A tuple of (is_exec_success, result_dict). @@ -2323,7 +2329,9 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict # Try to execute the function if arguments is not None: - iostream.send(ExecuteFunction(func_name=func_name, arguments=arguments, recipient=self)) + iostream.send( + ExecuteFunction(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self) + ) try: content = func(**arguments) is_exec_success = True @@ -2334,7 +2342,11 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict content = f"Error: Function {func_name} not found." if verbose: - iostream.send(ExecutedFunction(func_name=func_name, arguments=arguments, content=content, recipient=self)) + iostream.send( + ExecutedFunction( + func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=self + ) + ) return is_exec_success, { "name": func_name, @@ -2342,13 +2354,16 @@ def execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict "content": content, } - async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bool, dict[str, Any]]: + async def a_execute_function( + self, func_call, call_id: Optional[str] = None, verbose: bool = False + ) -> tuple[bool, dict[str, Any]]: """Execute an async function call and return the result. Override this function to modify the way async functions and tools are executed. Args: func_call: a dictionary extracted from openai message at key "function_call" or "tool_calls" with keys "name" and "arguments". + call_id: a string to identify the tool call. Returns: A tuple of (is_exec_success, result_dict). @@ -2375,7 +2390,9 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo # Try to execute the function if arguments is not None: - iostream.send(ExecuteFunction(func_name=func_name, arguments=arguments, recipient=self)) + iostream.send( + ExecuteFunction(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self) + ) try: if inspect.iscoroutinefunction(func): content = await func(**arguments) @@ -2390,7 +2407,11 @@ async def a_execute_function(self, func_call, verbose: bool = False) -> tuple[bo content = f"Error: Function {func_name} not found." if verbose: - iostream.send(ExecutedFunction(func_name=func_name, arguments=arguments, content=content, recipient=self)) + iostream.send( + ExecutedFunction( + func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=self + ) + ) return is_exec_success, { "name": func_name, diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index f199b41cbd..04395f6001 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -527,23 +527,38 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: class ExecuteFunction(BaseMessage): func_name: str + call_id: Optional[str] = None arguments: dict[str, Any] recipient_name: str - def __init__(self, *, uuid: Optional[UUID] = None, func_name: str, arguments: dict[str, Any], recipient: "Agent"): - super().__init__(uuid=uuid, func_name=func_name, arguments=arguments, recipient_name=recipient.name) + def __init__( + self, + *, + uuid: Optional[UUID] = None, + func_name: str, + call_id: Optional[str] = None, + arguments: dict[str, Any], + recipient: "Agent", + ): + super().__init__( + uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, recipient_name=recipient.name + ) def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f( - colored(f"\n>>>>>>>> EXECUTING FUNCTION {self.func_name}...\nInput arguments: {self.arguments}", "magenta"), + colored( + f"\n>>>>>>>> EXECUTING FUNCTION {self.func_name}...\nCall ID: {self.call_id}\nInput arguments: {self.arguments}", + "magenta", + ), flush=True, ) class ExecutedFunction(BaseMessage): func_name: str + call_id: Optional[str] = None arguments: dict[str, Any] content: str recipient_name: str @@ -553,12 +568,18 @@ def __init__( *, uuid: Optional[UUID] = None, func_name: str, + call_id: Optional[str] = None, arguments: dict[str, Any], content: str, recipient: "Agent", ): super().__init__( - uuid=uuid, func_name=func_name, arguments=arguments, content=content, recipient_name=recipient.name + uuid=uuid, + func_name=func_name, + call_id=call_id, + arguments=arguments, + content=content, + recipient_name=recipient.name, ) def print(self, f: Optional[Callable[..., Any]] = None) -> None: @@ -566,7 +587,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f( colored( - f"\n>>>>>>>> EXECUTED FUNCTION {self.func_name}...\nInput arguments: {self.arguments}\nOutput:\n{self.content}", + f"\n>>>>>>>> EXECUTED FUNCTION {self.func_name}...\nCall ID: {self.call_id}\nInput arguments: {self.arguments}\nOutput:\n{self.content}", "magenta", ), flush=True, diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 8ef28e51d1..3a31f867a6 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -653,14 +653,16 @@ def test_ExecuteCodeBlock(uuid: UUID, sender: ConversableAgent, recipient: Conve def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: func_name = "add_num" + call_id = "call_12345xyz" arguments = {"num_to_be_added": 5} - actual = ExecuteFunction(uuid=uuid, func_name=func_name, arguments=arguments, recipient=recipient) + actual = ExecuteFunction(uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, recipient=recipient) assert isinstance(actual, ExecuteFunction) expected_model_dump = { "uuid": uuid, "func_name": func_name, + "call_id": call_id, "arguments": arguments, "recipient_name": "recipient", } @@ -671,7 +673,7 @@ def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: # print(mock.call_args_list) expected_call_args_list = [ call( - "\x1b[35m\n>>>>>>>> EXECUTING FUNCTION add_num...\nInput arguments: {'num_to_be_added': 5}\x1b[0m", + "\x1b[35m\n>>>>>>>> EXECUTING FUNCTION add_num...\nCall ID: call_12345xyz\nInput arguments: {'num_to_be_added': 5}\x1b[0m", flush=True, ) ] @@ -680,15 +682,19 @@ def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: def test_ExecutedFunction(uuid: UUID, recipient: ConversableAgent) -> None: func_name = "add_num" + call_id = "call_12345xyz" arguments = {"num_to_be_added": 5} content = "15" - actual = ExecutedFunction(uuid=uuid, func_name=func_name, arguments=arguments, content=content, recipient=recipient) + actual = ExecutedFunction( + uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=recipient + ) assert isinstance(actual, ExecutedFunction) expected_model_dump = { "uuid": uuid, "func_name": func_name, + "call_id": call_id, "arguments": arguments, "content": content, "recipient_name": "recipient", @@ -700,7 +706,7 @@ def test_ExecutedFunction(uuid: UUID, recipient: ConversableAgent) -> None: # print(mock.call_args_list) expected_call_args_list = [ call( - "\x1b[35m\n>>>>>>>> EXECUTED FUNCTION add_num...\nInput arguments: {'num_to_be_added': 5}\nOutput:\n15\x1b[0m", + "\x1b[35m\n>>>>>>>> EXECUTED FUNCTION add_num...\nCall ID: call_12345xyz\nInput arguments: {'num_to_be_added': 5}\nOutput:\n15\x1b[0m", flush=True, ) ] From 40c1455fb1a268ad330a30b8fab856a2e5e5054d Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Mon, 6 Jan 2025 20:46:40 +0530 Subject: [PATCH 36/61] Add missed tool_call_id --- autogen/agentchat/conversable_agent.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 324b6871ad..cf75623ecf 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1736,11 +1736,11 @@ def generate_tool_calls_reply( return False, None async def _a_execute_tool_call(self, tool_call): - id = tool_call["id"] + tool_call_id = tool_call["id"] function_call = tool_call.get("function", {}) - _, func_return = await self.a_execute_function(function_call) + _, func_return = await self.a_execute_function(function_call, call_id=tool_call_id) return { - "tool_call_id": id, + "tool_call_id": tool_call_id, "role": "tool", "content": func_return.get("content", ""), } From 43697b18fae005debe3c839e75d1be6e6ad23ade Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Tue, 7 Jan 2025 01:47:20 +0100 Subject: [PATCH 37/61] Message wrapping added --- autogen/messages/__init__.py | 4 + autogen/messages/agent_messages.py | 31 ++++- autogen/messages/base_message.py | 84 +++++++++++- notebook/agentchat_websockets.ipynb | 191 ++-------------------------- pyproject.toml | 13 +- test/messages/test_base_message.py | 57 ++++++++- 6 files changed, 177 insertions(+), 203 deletions(-) diff --git a/autogen/messages/__init__.py b/autogen/messages/__init__.py index bcd5401d54..b21bb1c2a4 100644 --- a/autogen/messages/__init__.py +++ b/autogen/messages/__init__.py @@ -1,3 +1,7 @@ # Copyright (c) 2023 - 2024, Owners of https://github.com/ag2ai # # SPDX-License-Identifier: Apache-2.0 + +from .base_message import BaseMessage, get_annotated_type_for_message_classes, wrap_message + +__all__ = ["BaseMessage", "wrap_message", "get_annotated_type_for_message_classes"] diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 04395f6001..47d59d319c 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -2,6 +2,7 @@ # # SPDX-License-Identifier: Apache-2.0 +from abc import ABC from copy import deepcopy from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union from uuid import UUID @@ -12,7 +13,7 @@ # ToDo: once you move the code below, we can just delete this import from ..oai.client import OpenAIWrapper -from .base_message import BaseMessage +from .base_message import BaseMessage, wrap_message if TYPE_CHECKING: from ..agentchat.agent import Agent @@ -43,7 +44,7 @@ MessageRole = Literal["assistant", "function", "tool"] -class BasePrintReceivedMessage(BaseMessage): +class BasePrintReceivedMessage(BaseMessage, ABC): content: Union[str, int, float, bool] sender_name: str recipient_name: str @@ -53,6 +54,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(f"{colored(self.sender_name, 'yellow')} (to {self.recipient_name}):\n", flush=True) +@wrap_message class FunctionResponseMessage(BasePrintReceivedMessage): name: Optional[str] = None role: MessageRole = "function" @@ -71,6 +73,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") +@wrap_message class ToolResponse(BaseMessage): tool_call_id: Optional[str] = None role: MessageRole = "tool" @@ -85,6 +88,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored("*" * len(tool_print), "green"), flush=True) +@wrap_message class ToolResponseMessage(BasePrintReceivedMessage): role: MessageRole = "tool" tool_responses: list[ToolResponse] @@ -99,6 +103,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") +@wrap_message class FunctionCall(BaseMessage): name: Optional[str] = None arguments: Optional[str] = None @@ -120,6 +125,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored("*" * len(func_print), "green"), flush=True) +@wrap_message class FunctionCallMessage(BasePrintReceivedMessage): content: Optional[Union[str, int, float, bool]] = None # type: ignore [assignment] function_call: FunctionCall @@ -136,6 +142,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") +@wrap_message class ToolCall(BaseMessage): id: Optional[str] = None function: FunctionCall @@ -160,6 +167,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored("*" * len(func_print), "green"), flush=True) +@wrap_message class ToolCallMessage(BasePrintReceivedMessage): content: Optional[Union[str, int, float, bool]] = None # type: ignore [assignment] refusal: Optional[str] = None @@ -181,6 +189,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") +@wrap_message class ContentMessage(BasePrintReceivedMessage): content: Optional[Union[str, int, float, bool]] = None # type: ignore [assignment] @@ -244,6 +253,7 @@ def create_received_message_model( ) +@wrap_message class PostCarryoverProcessing(BaseMessage): carryover: Union[str, list[Union[str, dict[str, Any], Any]]] message: str @@ -326,6 +336,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored("\n" + "*" * 80, "blue"), flush=True, sep="") +@wrap_message class ClearAgentsHistory(BaseMessage): agent_name: Optional[str] = None nr_messages_to_preserve: Optional[int] = None @@ -356,6 +367,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("Clearing history for all agents.") +@wrap_message class SpeakerAttempt(BaseMessage): mentions: dict[str, int] attempt: int @@ -410,6 +422,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class GroupChatResume(BaseMessage): last_speaker_name: str messages: list[dict[str, Any]] @@ -435,6 +448,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class GroupChatRunChat(BaseMessage): speaker_name: str verbose: Optional[bool] = False @@ -448,6 +462,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored(f"\nNext speaker: {self.speaker_name}\n", "green"), flush=True) +@wrap_message class TerminationAndHumanReply(BaseMessage): no_human_input_msg: str sender_name: str @@ -474,6 +489,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored(f"\n>>>>>>>> {self.no_human_input_msg}", "red"), flush=True) +@wrap_message class UsingAutoReply(BaseMessage): human_input_mode: str sender_name: str @@ -500,6 +516,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(colored("\n>>>>>>>> USING AUTO REPLY...", "red"), flush=True) +@wrap_message class ExecuteCodeBlock(BaseMessage): code: str language: str @@ -525,6 +542,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class ExecuteFunction(BaseMessage): func_name: str call_id: Optional[str] = None @@ -556,6 +574,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class ExecutedFunction(BaseMessage): func_name: str call_id: Optional[str] = None @@ -594,6 +613,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class SelectSpeaker(BaseMessage): agent_names: Optional[list[str]] = None @@ -610,6 +630,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(f"{i+1}: {agent_name}") +@wrap_message class SelectSpeakerTryCountExceeded(BaseMessage): try_count: int agent_names: Optional[list[str]] = None @@ -624,6 +645,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(f"You have tried {self.try_count} times. The next speaker will be selected automatically.") +@wrap_message class SelectSpeakerInvalidInput(BaseMessage): agent_names: Optional[list[str]] = None @@ -637,6 +659,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(f"Invalid input. Please enter a number between 1 and {len(self.agent_names or [])}.") +@wrap_message class ClearConversableAgentHistory(BaseMessage): agent_name: str recipient_name: str @@ -660,6 +683,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class ClearConversableAgentHistoryWarning(BaseMessage): recipient_name: str @@ -681,6 +705,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class GenerateCodeExecutionReply(BaseMessage): code_block_languages: list[str] sender_name: Optional[str] = None @@ -725,6 +750,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message class ConversableAgentUsageSummary(BaseMessage): recipient_name: str is_client_empty: bool @@ -741,6 +767,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(f"Agent '{self.recipient_name}':") +@wrap_message class TextMessage(BaseMessage): text: str diff --git a/autogen/messages/base_message.py b/autogen/messages/base_message.py index 84141a65cb..65b59bac48 100644 --- a/autogen/messages/base_message.py +++ b/autogen/messages/base_message.py @@ -3,15 +3,18 @@ # SPDX-License-Identifier: Apache-2.0 -from typing import Any, Callable, Optional +from abc import ABC +from typing import Annotated, Any, Callable, Literal, Optional, Type, TypeVar, Union from uuid import UUID, uuid4 -from pydantic import BaseModel +from pydantic import BaseModel, Field, create_model -__all__ = ["BaseMessage"] +PetType = TypeVar("PetType", bound=Literal["cat", "dog"]) +__all__ = ["BaseMessage", "wrap_message", "get_annotated_type_for_message_classes"] -class BaseMessage(BaseModel): + +class BaseMessage(BaseModel, ABC): uuid: UUID def __init__(self, uuid: Optional[UUID] = None, **kwargs: Any) -> None: @@ -25,3 +28,76 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f (Optional[Callable[..., Any]], optional): Print function. If none, python's default print will be used. """ ... + + # def model_dump( + # self, + # *, + # mode="python", + # include=None, + # exclude=None, + # context=None, + # by_alias=False, + # exclude_unset=False, + # exclude_defaults=False, + # exclude_none=False, + # round_trip=False, + # warnings=True, + # serialize_as_any=False + # ): + # return super().model_dump( + # mode=mode, + # include=include, + # exclude=exclude, + # context=context, + # by_alias=by_alias, + # exclude_unset=exclude_unset, + # exclude_defaults=exclude_defaults, + # exclude_none=exclude_none, + # round_trip=round_trip, + # warnings=warnings, + # serialize_as_any=serialize_as_any, + # ) + + +def camel2snake(name: str) -> str: + return "".join(["_" + i.lower() if i.isupper() else i for i in name]).lstrip("_") + + +_message_classes: dict[str, Type[BaseModel]] = {} + + +def wrap_message(message_cls: Type[BaseMessage]) -> Type[BaseModel]: + global _message_classes + + type_name = camel2snake(message_cls.__name__) + + class WrapperBase(BaseModel): + # these types are generated dynamically so we need to disable the type checker + type: Literal[type_name] = type_name # type: ignore[valid-type] + content: message_cls # type: ignore[valid-type] + + def __init__(self, **data: Any): + if set(data.keys()) <= {"type", "content"} and "content" in data: + super().__init__(**data) + else: + if "content" in data: + content = data.pop("content") + super().__init__(content=message_cls(**data, content=content), **data) + else: + super().__init__(content=message_cls(**data), **data) + + Wrapper = create_model(message_cls.__name__, __base__=WrapperBase) + + _message_classes[type_name] = Wrapper + + return Wrapper + + +def get_annotated_type_for_message_classes() -> Type[Any]: + # this is a dynamic type so we need to disable the type checker + union_type = Union[tuple(_message_classes.values())] # type: ignore[valid-type] + return Annotated[union_type, Field(discriminator="type")] # type: ignore[return-value] + + +def get_message_classes() -> dict[str, Type[BaseModel]]: + return _message_classes diff --git a/notebook/agentchat_websockets.ipynb b/notebook/agentchat_websockets.ipynb index cf67a65ea7..0f6a6cd75e 100644 --- a/notebook/agentchat_websockets.ipynb +++ b/notebook/agentchat_websockets.ipynb @@ -47,7 +47,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "dca301a4", "metadata": {}, "outputs": [], @@ -97,7 +97,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "id": "9fb85afb", "metadata": {}, "outputs": [], @@ -118,7 +118,7 @@ " \"config_list\": autogen.config_list_from_json(\n", " env_or_file=\"OAI_CONFIG_LIST\",\n", " filter_dict={\n", - " \"model\": [\"gpt-4\", \"gpt-3.5-turbo\", \"gpt-3.5-turbo-16k\"],\n", + " \"model\": [\"gpt-4o\"],\n", " },\n", " ),\n", " \"stream\": True,\n", @@ -196,91 +196,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "4fbe004d", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " - test_setup() with websocket server running on ws://127.0.0.1:8765.\n", - " - on_connect(): Connected to client using IOWebsockets \n", - " - on_connect(): Receiving message from client.\n", - " - Connected to server on ws://127.0.0.1:8765\n", - " - Sending message to server.\n", - " - on_connect(): Initiating chat with agent using message 'Check out the weather in Paris and write a poem about it.'\n", - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "Check out the weather in Paris and write a poem about it.\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", - "\n", - "\n", - "\u001b[32m***** Suggested tool call (call_xFFWe52vwdpgZ8xTRV6adBdy): weather_forecast *****\u001b[0m\n", - "Arguments: \n", - "{\n", - " \"city\": \"Paris\"\n", - "}\n", - "\u001b[32m*********************************************************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[35m\n", - ">>>>>>>> EXECUTING FUNCTION weather_forecast...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "\u001b[32m***** Response from calling tool (call_xFFWe52vwdpgZ8xTRV6adBdy) *****\u001b[0m\n", - "The weather forecast for Paris at 2024-04-05 12:00:06.206125 is sunny.\n", - "\u001b[32m**********************************************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[31m\n", - ">>>>>>>> USING AUTO REPLY...\u001b[0m\n", - "\u001b[32m\u001b[32mIn the heart of France, beneath the sun's warm glow,\n", - "Lies the city of Paris, where the Seine waters flow.\n", - "Bathed in sunlight, every street and spire,\n", - "Illuminated each detail, just like a docile fire.\n", - "\n", - "Once monochromatic cityscape, kissed by the sun's bright light,\n", - "Now a kaleidoscope of colors, from morning till the night.\n", - "This sun-swept city sparkles, under the azure dome,\n", - "Her inhabitants find comfort, for they call this city home.\n", - "\n", - "One can wander in her sunshine, on this perfect weather day,\n", - "And feel the warmth it brings, to chase your blues away.\n", - "For the weather in Paris, is more than just a forecast,\n", - "It is a stage setting for dwellers and tourists amassed.\n", - "\n", - "TERMINATE\u001b[0m\n", - "\n", - "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", - "\n", - "In the heart of France, beneath the sun's warm glow,\n", - "Lies the city of Paris, where the Seine waters flow.\n", - "Bathed in sunlight, every street and spire,\n", - "Illuminated each detail, just like a docile fire.\n", - "\n", - "Once monochromatic cityscape, kissed by the sun's bright light,\n", - "Now a kaleidoscope of colors, from morning till the night.\n", - "This sun-swept city sparkles, under the azure dome,\n", - "Her inhabitants find comfort, for they call this city home.\n", - "\n", - "One can wander in her sunshine, on this perfect weather day,\n", - "And feel the warmth it brings, to chase your blues away.\n", - "For the weather in Paris, is more than just a forecast,\n", - "It is a stage setting for dwellers and tourists amassed.\n", - "\n", - "TERMINATE\n", - "\n", - " - Received TERMINATE message. Exiting.\n" - ] - } - ], + "outputs": [], "source": [ "with IOWebsockets.run_server_in_thread(on_connect=on_connect, port=8765) as uri:\n", " print(f\" - test_setup() with websocket server running on {uri}.\", flush=True)\n", @@ -394,55 +313,10 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "id": "d92e50b5", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO: Started server process [5227]\n", - "INFO: Waiting for application startup.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Websocket server started at ws://127.0.0.1:8080.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO: Application startup complete.\n", - "INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO: 127.0.0.1:42548 - \"GET / HTTP/1.1\" 200 OK\n", - "INFO: 127.0.0.1:42548 - \"GET /favicon.ico HTTP/1.1\" 404 Not Found\n", - " - on_connect(): Connected to client using IOWebsockets \n", - " - on_connect(): Receiving message from client.\n", - " - on_connect(): Initiating chat with agent using message 'write a poem about lundon'\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO: Shutting down\n", - "INFO: Waiting for application shutdown.\n", - "INFO: Application shutdown complete.\n", - "INFO: Finished server process [5227]\n" - ] - } - ], + "outputs": [], "source": [ "import uvicorn # noqa: E402\n", "\n", @@ -497,53 +371,10 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "708a98de", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Websocket server started at ws://127.0.0.1:8080.\n", - "HTTP server started at http://localhost:8000\n", - " - on_connect(): Connected to client using IOWebsockets \n", - " - on_connect(): Receiving message from client.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "127.0.0.1 - - [05/Apr/2024 12:01:51] \"GET / HTTP/1.1\" 200 -\n", - "127.0.0.1 - - [05/Apr/2024 12:01:51] \"GET / HTTP/1.1\" 200 -\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " - on_connect(): Initiating chat with agent using message 'write a poem about new york'\n", - " - on_connect(): Connected to client using IOWebsockets \n", - " - on_connect(): Receiving message from client.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "127.0.0.1 - - [05/Apr/2024 12:02:27] \"GET / HTTP/1.1\" 304 -\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " - on_connect(): Initiating chat with agent using message 'check the weather in london and write a poem about it'\n", - " - HTTP server stopped.\n" - ] - } - ], + "outputs": [], "source": [ "from http.server import HTTPServer, SimpleHTTPRequestHandler # noqa: E402\n", "\n", @@ -630,7 +461,7 @@ ] }, "kernelspec": { - "display_name": "flaml_dev", + "display_name": ".venv-3.9", "language": "python", "name": "python3" }, @@ -644,7 +475,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.20" } }, "nbformat": 4, diff --git a/pyproject.toml b/pyproject.toml index 9ced6c6460..c9a6b572c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,22 +62,13 @@ files = [ "autogen/tools", "autogen/interop", "autogen/agentchat/realtime_agent", - "autogen/messages/__init__.py", - "autogen/messages/agent_messages.py", - "autogen/messages/base_message.py", - "autogen/messages/client_messages.py", - "autogen/messages/print_message.py", + "autogen/messages", "test/test_pydantic.py", "test/io", "test/tools", "test/interop", "test/agentchat/realtime_agent", - "test/messages/__init__.py", - "test/messages/conftest.py", - "test/messages/test_agent_messages.py", - "test/messages/test_base_message.py", - "test/messages/test_client_messages.py", - "test/messages/test_print_message.py", + "test/messages", ] exclude = [ "autogen/math_utils\\.py", diff --git a/test/messages/test_base_message.py b/test/messages/test_base_message.py index b3e2ed1d2e..de51c4d607 100644 --- a/test/messages/test_base_message.py +++ b/test/messages/test_base_message.py @@ -2,15 +2,60 @@ # # SPDX-License-Identifier: Apache-2.0 +from contextlib import contextmanager +from typing import Generator, Type from uuid import uuid4 -from autogen.messages.base_message import BaseMessage +import pytest +from pydantic import BaseModel +from autogen.messages.base_message import ( + BaseMessage, + _message_classes, + get_annotated_type_for_message_classes, + wrap_message, +) -def test_BaseMessage() -> None: - uuid = uuid4() - actual = BaseMessage(uuid=uuid) - expected_model_dump = {"uuid": uuid} +@pytest.fixture() +def TestMessage() -> Generator[Type[BaseMessage], None, None]: + org_message_classes = _message_classes.copy() + try: - assert actual.model_dump() == expected_model_dump + @wrap_message + class TestMessage(BaseMessage): + sender: str + receiver: str + content: str + + yield TestMessage + finally: + _message_classes.clear() + _message_classes.update(org_message_classes) + + +class TestBaseMessage: + def test_model_dump_validate(self, TestMessage: Type[BaseModel]) -> None: + uuid = uuid4() + + print(f"{TestMessage=}") + + message = TestMessage(uuid=uuid, sender="sender", receiver="receiver", content="Hello, World!") + + expected = { + "type": "test_message", + "content": { + "uuid": uuid, + "sender": "sender", + "receiver": "receiver", + "content": "Hello, World!", + }, + } + actual = message.model_dump() + assert actual == expected + + model = TestMessage.model_validate(expected) + assert model.model_dump() == expected + + model = TestMessage(**expected) + assert model.model_dump() == expected From 75afaa349172b6bcb50095e785f21e44ddd2e1d9 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Tue, 7 Jan 2025 08:29:59 +0100 Subject: [PATCH 38/61] Refactoring --- autogen/agentchat/chat.py | 4 +- autogen/agentchat/groupchat.py | 16 ++--- autogen/messages/agent_messages.py | 21 +++---- autogen/messages/base_message.py | 43 +++++--------- test/messages/test_agent_messages.py | 88 ++++++++++++++++------------ 5 files changed, 86 insertions(+), 86 deletions(-) diff --git a/autogen/agentchat/chat.py b/autogen/agentchat/chat.py index eab21dcece..9745e67ef6 100644 --- a/autogen/agentchat/chat.py +++ b/autogen/agentchat/chat.py @@ -15,7 +15,7 @@ from ..formatting_utils import colored from ..io.base import IOStream -from ..messages.agent_messages import PostCarryoverProcessing +from ..messages.agent_messages import PostCarryoverProcessingMessage from .utils import consolidate_chat_info logger = logging.getLogger(__name__) @@ -132,7 +132,7 @@ def __post_carryover_processing(chat_info: dict[str, Any]) -> None: UserWarning, ) - iostream.send(PostCarryoverProcessing(chat_info=chat_info)) + iostream.send(PostCarryoverProcessingMessage(chat_info=chat_info)) def initiate_chats(chat_queue: list[dict[str, Any]]) -> list[ChatResult]: diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index ac79a89c1e..d065c62aa0 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -19,13 +19,13 @@ from ..graph_utils import check_graph_validity, invert_disallowed_to_allowed from ..io.base import IOStream from ..messages.agent_messages import ( - ClearAgentsHistory, - GroupChatResume, + ClearAgentsHistoryMessage, + GroupChatResumeMessage, GroupChatRunChat, SelectSpeaker, SelectSpeakerInvalidInput, SelectSpeakerTryCountExceeded, - SpeakerAttempt, + SpeakerAttemptMessage, ) from ..oai.client import ModelClient from ..runtime_logging import log_new_agent, logging_enabled @@ -856,7 +856,7 @@ def _validate_speaker_name( if self.select_speaker_auto_verbose: iostream = IOStream.get_default() iostream.send( - SpeakerAttempt( + SpeakerAttemptMessage( mentions=mentions, attempt=attempt, attempts_left=attempts_left, @@ -1361,7 +1361,7 @@ def resume( if not silent: iostream = IOStream.get_default() - iostream.send(GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent)) + iostream.send(GroupChatResumeMessage(last_speaker_name=last_speaker_name, messages=messages, silent=silent)) # Update group chat settings for resuming self._groupchat.send_introductions = False @@ -1465,7 +1465,7 @@ async def a_resume( if not silent: iostream = IOStream.get_default() - iostream.send(GroupChatResume(last_speaker_name=last_speaker_name, messages=messages, silent=silent)) + iostream.send(GroupChatResumeMessage(last_speaker_name=last_speaker_name, messages=messages, silent=silent)) # Update group chat settings for resuming self._groupchat.send_introductions = False @@ -1621,7 +1621,9 @@ def clear_agents_history(self, reply: dict, groupchat: GroupChat) -> str: "The last tool call message will be saved to prevent errors caused by tool response without tool call." ) # clear history - iostream.send(ClearAgentsHistory(agent=agent_to_memory_clear, nr_messages_to_preserve=nr_messages_to_preserve)) + iostream.send( + ClearAgentsHistoryMessage(agent=agent_to_memory_clear, nr_messages_to_preserve=nr_messages_to_preserve) + ) if agent_to_memory_clear: agent_to_memory_clear.clear_history(nr_messages_to_preserve=nr_messages_to_preserve) else: diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 47d59d319c..40bb986884 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union from uuid import UUID +from pydantic import BaseModel from termcolor import colored from ..code_utils import content_str @@ -26,10 +27,10 @@ "FunctionCallMessage", "ToolCallMessage", "ContentMessage", - "PostCarryoverProcessing", - "ClearAgentsHistory", - "SpeakerAttempt", - "GroupChatResume", + "PostCarryoverProcessingMessage", + "ClearAgentsHistoryMessage", + "SpeakerAttemptMessage", + "GroupChatResumeMessage", "GroupChatRunChat", "TerminationAndHumanReply", "ExecuteCodeBlock", @@ -103,8 +104,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") -@wrap_message -class FunctionCall(BaseMessage): +class FunctionCall(BaseModel): name: Optional[str] = None arguments: Optional[str] = None @@ -254,7 +254,7 @@ def create_received_message_model( @wrap_message -class PostCarryoverProcessing(BaseMessage): +class PostCarryoverProcessingMessage(BaseMessage): carryover: Union[str, list[Union[str, dict[str, Any], Any]]] message: str verbose: bool = False @@ -337,7 +337,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ClearAgentsHistory(BaseMessage): +class ClearAgentsHistoryMessage(BaseMessage): agent_name: Optional[str] = None nr_messages_to_preserve: Optional[int] = None @@ -367,8 +367,9 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("Clearing history for all agents.") +# todo: break into multiple messages @wrap_message -class SpeakerAttempt(BaseMessage): +class SpeakerAttemptMessage(BaseMessage): mentions: dict[str, int] attempt: int attempts_left: int @@ -423,7 +424,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class GroupChatResume(BaseMessage): +class GroupChatResumeMessage(BaseMessage): last_speaker_name: str messages: list[dict[str, Any]] verbose: Optional[bool] = False diff --git a/autogen/messages/base_message.py b/autogen/messages/base_message.py index 65b59bac48..4be41447b4 100644 --- a/autogen/messages/base_message.py +++ b/autogen/messages/base_message.py @@ -29,35 +29,6 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: """ ... - # def model_dump( - # self, - # *, - # mode="python", - # include=None, - # exclude=None, - # context=None, - # by_alias=False, - # exclude_unset=False, - # exclude_defaults=False, - # exclude_none=False, - # round_trip=False, - # warnings=True, - # serialize_as_any=False - # ): - # return super().model_dump( - # mode=mode, - # include=include, - # exclude=exclude, - # context=context, - # by_alias=by_alias, - # exclude_unset=exclude_unset, - # exclude_defaults=exclude_defaults, - # exclude_none=exclude_none, - # round_trip=round_trip, - # warnings=warnings, - # serialize_as_any=serialize_as_any, - # ) - def camel2snake(name: str) -> str: return "".join(["_" + i.lower() if i.isupper() else i for i in name]).lstrip("_") @@ -67,9 +38,20 @@ def camel2snake(name: str) -> str: def wrap_message(message_cls: Type[BaseMessage]) -> Type[BaseModel]: + """Wrap a message class with a type field to be used in a union type + + This is needed for proper serialization and deserialization of messages in a union type. + + Args: + message_cls (Type[BaseMessage]): Message class to wrap + """ global _message_classes + if not message_cls.__name__.endswith("Message"): + raise ValueError("Message class name must end with 'Message'") + type_name = camel2snake(message_cls.__name__) + type_name = type_name[: -len("_message")] class WrapperBase(BaseModel): # these types are generated dynamically so we need to disable the type checker @@ -86,6 +68,9 @@ def __init__(self, **data: Any): else: super().__init__(content=message_cls(**data), **data) + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + self.content.print(f) # type: ignore[attr-defined] + Wrapper = create_model(message_cls.__name__, __base__=WrapperBase) _message_classes[type_name] = Wrapper diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 3a31f867a6..445016cf4a 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -12,7 +12,7 @@ from autogen.agentchat.conversable_agent import ConversableAgent from autogen.coding.base import CodeBlock from autogen.messages.agent_messages import ( - ClearAgentsHistory, + ClearAgentsHistoryMessage, ClearConversableAgentHistory, ClearConversableAgentHistoryWarning, ContentMessage, @@ -24,14 +24,14 @@ FunctionCallMessage, FunctionResponseMessage, GenerateCodeExecutionReply, - GroupChatResume, + GroupChatResumeMessage, GroupChatRunChat, MessageRole, - PostCarryoverProcessing, + PostCarryoverProcessingMessage, SelectSpeaker, SelectSpeakerInvalidInput, SelectSpeakerTryCountExceeded, - SpeakerAttempt, + SpeakerAttemptMessage, TerminationAndHumanReply, TextMessage, ToolCall, @@ -152,38 +152,50 @@ def test_function_response( assert mock.call_args_list == expected_call_args_list -def test_function_call(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - message = {"content": "Let's play a game.", "function_call": {"name": "get_random_number", "arguments": "{}"}} +class TestFunctionCallMessage: + def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: + fc_message = { + "content": "Let's play a game.", + "function_call": {"name": "get_random_number", "arguments": "{}"}, + } - actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) + message = create_received_message_model(uuid=uuid, message=fc_message, sender=sender, recipient=recipient) - assert isinstance(actual, FunctionCallMessage) + assert isinstance(message, FunctionCallMessage) - assert actual.content == "Let's play a game." - assert actual.sender_name == "sender" - assert actual.recipient_name == "recipient" + actual = message.model_dump() + expected = { + "type": "function_call", + "content": { + "content": "Let's play a game.", + "sender_name": "sender", + "recipient_name": "recipient", + "uuid": uuid, + "function_call": {"name": "get_random_number", "arguments": "{}"}, + }, + } + assert actual == expected, actual - assert isinstance(actual.function_call, FunctionCall) - assert actual.function_call.name == "get_random_number" - assert actual.function_call.arguments == "{}" + mock = MagicMock() + message.print(f=mock) - mock = MagicMock() - actual.print(f=mock) + # print(mock.call_args_list) - # print(mock.call_args_list) + expected_call_args_list = [ + call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), + call("Let's play a game.", flush=True), + call("\x1b[32m***** Suggested function call: get_random_number *****\x1b[0m", flush=True), + call("Arguments: \n", "{}", flush=True, sep=""), + call("\x1b[32m******************************************************\x1b[0m", flush=True), + call( + "\n", + "--------------------------------------------------------------------------------", + flush=True, + sep="", + ), + ] - expected_call_args_list = [ - call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), - call("Let's play a game.", flush=True), - call("\x1b[32m***** Suggested function call: get_random_number *****\x1b[0m", flush=True), - call("Arguments: \n", "{}", flush=True, sep=""), - call("\x1b[32m******************************************************\x1b[0m", flush=True), - call( - "\n", "--------------------------------------------------------------------------------", flush=True, sep="" - ), - ] - - assert mock.call_args_list == expected_call_args_list + assert mock.call_args_list == expected_call_args_list @pytest.mark.parametrize( @@ -336,8 +348,8 @@ def test_PostCarryoverProcessing(uuid: UUID, sender: ConversableAgent, recipient "max_turns": 5, } - actual = PostCarryoverProcessing(uuid=uuid, chat_info=chat_info) - assert isinstance(actual, PostCarryoverProcessing) + actual = PostCarryoverProcessingMessage(uuid=uuid, chat_info=chat_info) + assert isinstance(actual, PostCarryoverProcessingMessage) expected = { "uuid": uuid, @@ -411,7 +423,7 @@ def test__process_carryover( "max_turns": 5, } - post_carryover_processing = PostCarryoverProcessing(uuid=uuid, chat_info=chat_info) + post_carryover_processing = PostCarryoverProcessingMessage(uuid=uuid, chat_info=chat_info) expected_model_dump = { "uuid": uuid, "carryover": carryover, @@ -449,8 +461,8 @@ def test__process_carryover( def test_ClearAgentsHistory( agent: Optional[ConversableAgent], nr_messages_to_preserve: Optional[int], expected: str, uuid: UUID ) -> None: - actual = ClearAgentsHistory(uuid=uuid, agent=agent, nr_messages_to_preserve=nr_messages_to_preserve) - assert isinstance(actual, ClearAgentsHistory) + actual = ClearAgentsHistoryMessage(uuid=uuid, agent=agent, nr_messages_to_preserve=nr_messages_to_preserve) + assert isinstance(actual, ClearAgentsHistoryMessage) expected_model_dump = { "uuid": uuid, @@ -484,10 +496,10 @@ def test_SpeakerAttempt(mentions: dict[str, int], expected: str, uuid: UUID) -> attempts_left = 2 verbose = True - actual = SpeakerAttempt( + actual = SpeakerAttemptMessage( uuid=uuid, mentions=mentions, attempt=attempt, attempts_left=attempts_left, select_speaker_auto_verbose=verbose ) - assert isinstance(actual, SpeakerAttempt) + assert isinstance(actual, SpeakerAttemptMessage) expected_model_dump = { "uuid": uuid, @@ -516,8 +528,8 @@ def test_GroupChatResume(uuid: UUID) -> None: ] silent = False - actual = GroupChatResume(uuid=uuid, last_speaker_name=last_speaker_name, messages=messages, silent=silent) - assert isinstance(actual, GroupChatResume) + actual = GroupChatResumeMessage(uuid=uuid, last_speaker_name=last_speaker_name, messages=messages, silent=silent) + assert isinstance(actual, GroupChatResumeMessage) expected_model_dump = { "uuid": uuid, From 3188685cfadd1df866c061b2b71514f7cfb9f979 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:07:39 +0530 Subject: [PATCH 39/61] Rename classes to have Message suffix --- autogen/agentchat/conversable_agent.py | 46 ++++++++------- autogen/agentchat/groupchat.py | 16 +++--- autogen/messages/agent_messages.py | 43 +++++++------- test/messages/test_agent_messages.py | 80 ++++++++++++++------------ 4 files changed, 95 insertions(+), 90 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index cf75623ecf..65ae67ff1e 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -42,15 +42,15 @@ from ..formatting_utils import colored from ..io.base import IOStream from ..messages.agent_messages import ( - ClearConversableAgentHistory, - ClearConversableAgentHistoryWarning, - ConversableAgentUsageSummary, - ExecuteCodeBlock, - ExecutedFunction, - ExecuteFunction, - GenerateCodeExecutionReply, - TerminationAndHumanReply, - UsingAutoReply, + ClearConversableAgentHistoryMessage, + ClearConversableAgentHistoryWarningMessage, + ConversableAgentUsageSummaryMessage, + ExecuteCodeBlockMessage, + ExecutedFunctionMessage, + ExecuteFunctionMessage, + GenerateCodeExecutionReplyMessage, + TerminationAndHumanReplyMessage, + UsingAutoReplyMessage, create_received_message_model, ) from ..oai.client import ModelClient, OpenAIWrapper @@ -1409,14 +1409,16 @@ def clear_history(self, recipient: Optional[Agent] = None, nr_messages_to_preser no_messages_preserved += 1 # Remove messages from history except last `nr_messages_to_preserve` messages. self._oai_messages[key] = self._oai_messages[key][-nr_messages_to_preserve_internal:] - iostream.send(ClearConversableAgentHistory(agent=self, no_messages_preserved=no_messages_preserved)) + iostream.send( + ClearConversableAgentHistoryMessage(agent=self, no_messages_preserved=no_messages_preserved) + ) else: self._oai_messages.clear() else: self._oai_messages[recipient].clear() # clear_conversable_agent_history.print_warning(iostream.print) if nr_messages_to_preserve: - iostream.send(ClearConversableAgentHistoryWarning(recipient=self)) + iostream.send(ClearConversableAgentHistoryWarningMessage(recipient=self)) def generate_oai_reply( self, @@ -1544,7 +1546,7 @@ def _generate_code_execution_reply_using_executor( if len(code_blocks) == 0: continue - iostream.send(GenerateCodeExecutionReply(code_blocks=code_blocks, sender=sender, recipient=self)) + iostream.send(GenerateCodeExecutionReplyMessage(code_blocks=code_blocks, sender=sender, recipient=self)) # found code blocks, execute code. code_result = self._code_executor.execute_code_blocks(code_blocks) @@ -1840,7 +1842,7 @@ def check_termination_and_human_reply( # print the no_human_input_msg if no_human_input_msg: iostream.send( - TerminationAndHumanReply(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self) + TerminationAndHumanReplyMessage(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self) ) # stop the conversation @@ -1881,7 +1883,7 @@ def check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 if self.human_input_mode != "NEVER": - iostream.send(UsingAutoReply(human_input_mode=self.human_input_mode, sender=sender, recipient=self)) + iostream.send(UsingAutoReplyMessage(human_input_mode=self.human_input_mode, sender=sender, recipient=self)) return False, None @@ -1955,7 +1957,7 @@ async def a_check_termination_and_human_reply( # print the no_human_input_msg if no_human_input_msg: iostream.send( - TerminationAndHumanReply(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self) + TerminationAndHumanReplyMessage(no_human_input_msg=no_human_input_msg, sender=sender, recipient=self) ) # stop the conversation @@ -1996,7 +1998,7 @@ async def a_check_termination_and_human_reply( # increment the consecutive_auto_reply_counter self._consecutive_auto_reply_counter[sender] += 1 if self.human_input_mode != "NEVER": - iostream.send(UsingAutoReply(human_input_mode=self.human_input_mode, sender=sender, recipient=self)) + iostream.send(UsingAutoReplyMessage(human_input_mode=self.human_input_mode, sender=sender, recipient=self)) return False, None @@ -2233,7 +2235,7 @@ def execute_code_blocks(self, code_blocks): if not lang: lang = infer_lang(code) - iostream.send(ExecuteCodeBlock(code=code, language=lang, code_block_count=i, recipient=self)) + iostream.send(ExecuteCodeBlockMessage(code=code, language=lang, code_block_count=i, recipient=self)) if lang in ["bash", "shell", "sh"]: exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config) @@ -2330,7 +2332,7 @@ def execute_function( # Try to execute the function if arguments is not None: iostream.send( - ExecuteFunction(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self) + ExecuteFunctionMessage(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self) ) try: content = func(**arguments) @@ -2343,7 +2345,7 @@ def execute_function( if verbose: iostream.send( - ExecutedFunction( + ExecutedFunctionMessage( func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=self ) ) @@ -2391,7 +2393,7 @@ async def a_execute_function( # Try to execute the function if arguments is not None: iostream.send( - ExecuteFunction(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self) + ExecuteFunctionMessage(func_name=func_name, call_id=call_id, arguments=arguments, recipient=self) ) try: if inspect.iscoroutinefunction(func): @@ -2408,7 +2410,7 @@ async def a_execute_function( if verbose: iostream.send( - ExecutedFunction( + ExecutedFunctionMessage( func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=self ) ) @@ -2874,7 +2876,7 @@ def process_last_received_message(self, messages: list[dict]) -> list[dict]: def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) -> None: """Print the usage summary.""" iostream = IOStream.get_default() - iostream.send(ConversableAgentUsageSummary(recipient=self, client=self.client)) + iostream.send(ConversableAgentUsageSummaryMessage(recipient=self, client=self.client)) if self.client is not None: self.client.print_usage_summary(mode) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index d065c62aa0..1541034a70 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -21,10 +21,10 @@ from ..messages.agent_messages import ( ClearAgentsHistoryMessage, GroupChatResumeMessage, - GroupChatRunChat, - SelectSpeaker, - SelectSpeakerInvalidInput, - SelectSpeakerTryCountExceeded, + GroupChatRunChatMessage, + SelectSpeakerInvalidInputMessage, + SelectSpeakerMessage, + SelectSpeakerTryCountExceededMessage, SpeakerAttemptMessage, ) from ..oai.client import ModelClient @@ -398,14 +398,14 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A if agents is None: agents = self.agents - iostream.send(SelectSpeaker(agents=agents)) + iostream.send(SelectSpeakerMessage(agents=agents)) try_count = 0 # Assume the user will enter a valid number within 3 tries, otherwise use auto selection to avoid blocking. while try_count <= 3: try_count += 1 if try_count >= 3: - iostream.send(SelectSpeakerTryCountExceeded(try_count=try_count, agents=agents)) + iostream.send(SelectSpeakerTryCountExceededMessage(try_count=try_count, agents=agents)) break try: i = iostream.input( @@ -419,7 +419,7 @@ def manual_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[A else: raise ValueError except ValueError: - iostream.send(SelectSpeakerInvalidInput(agents=agents)) + iostream.send(SelectSpeakerInvalidInputMessage(agents=agents)) return None def random_select_speaker(self, agents: Optional[list[Agent]] = None) -> Union[Agent, None]: @@ -1156,7 +1156,7 @@ def run_chat( speaker = groupchat.select_speaker(speaker, self) if not silent: iostream = IOStream.get_default() - iostream.send(GroupChatRunChat(speaker=speaker, silent=silent)) + iostream.send(GroupChatRunChatMessage(speaker=speaker, silent=silent)) # let the speaker speak reply = speaker.generate_reply(sender=self) except KeyboardInterrupt: diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index 40bb986884..e85491bfa7 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -31,14 +31,14 @@ "ClearAgentsHistoryMessage", "SpeakerAttemptMessage", "GroupChatResumeMessage", - "GroupChatRunChat", - "TerminationAndHumanReply", - "ExecuteCodeBlock", - "ExecuteFunction", - "SelectSpeaker", - "ClearConversableAgentHistory", - "GenerateCodeExecutionReply", - "ConversableAgentUsageSummary", + "GroupChatRunChatMessage", + "TerminationAndHumanReplyMessage", + "ExecuteCodeBlockMessage", + "ExecuteFunctionMessage", + "SelectSpeakerMessage", + "ClearConversableAgentHistoryMessage", + "GenerateCodeExecutionReplyMessage", + "ConversableAgentUsageSummaryMessage", "TextMessage", ] @@ -74,7 +74,6 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") -@wrap_message class ToolResponse(BaseMessage): tool_call_id: Optional[str] = None role: MessageRole = "tool" @@ -450,7 +449,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class GroupChatRunChat(BaseMessage): +class GroupChatRunChatMessage(BaseMessage): speaker_name: str verbose: Optional[bool] = False @@ -464,7 +463,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class TerminationAndHumanReply(BaseMessage): +class TerminationAndHumanReplyMessage(BaseMessage): no_human_input_msg: str sender_name: str recipient_name: str @@ -491,7 +490,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class UsingAutoReply(BaseMessage): +class UsingAutoReplyMessage(BaseMessage): human_input_mode: str sender_name: str recipient_name: str @@ -518,7 +517,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ExecuteCodeBlock(BaseMessage): +class ExecuteCodeBlockMessage(BaseMessage): code: str language: str code_block_count: int @@ -544,7 +543,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ExecuteFunction(BaseMessage): +class ExecuteFunctionMessage(BaseMessage): func_name: str call_id: Optional[str] = None arguments: dict[str, Any] @@ -576,7 +575,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ExecutedFunction(BaseMessage): +class ExecutedFunctionMessage(BaseMessage): func_name: str call_id: Optional[str] = None arguments: dict[str, Any] @@ -615,7 +614,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class SelectSpeaker(BaseMessage): +class SelectSpeakerMessage(BaseMessage): agent_names: Optional[list[str]] = None def __init__(self, *, uuid: Optional[UUID] = None, agents: Optional[list["Agent"]] = None): @@ -632,7 +631,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class SelectSpeakerTryCountExceeded(BaseMessage): +class SelectSpeakerTryCountExceededMessage(BaseMessage): try_count: int agent_names: Optional[list[str]] = None @@ -647,7 +646,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class SelectSpeakerInvalidInput(BaseMessage): +class SelectSpeakerInvalidInputMessage(BaseMessage): agent_names: Optional[list[str]] = None def __init__(self, *, uuid: Optional[UUID] = None, agents: Optional[list["Agent"]] = None): @@ -661,7 +660,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ClearConversableAgentHistory(BaseMessage): +class ClearConversableAgentHistoryMessage(BaseMessage): agent_name: str recipient_name: str no_messages_preserved: int @@ -685,7 +684,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ClearConversableAgentHistoryWarning(BaseMessage): +class ClearConversableAgentHistoryWarningMessage(BaseMessage): recipient_name: str def __init__(self, *, uuid: Optional[UUID] = None, recipient: "Agent"): @@ -707,7 +706,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class GenerateCodeExecutionReply(BaseMessage): +class GenerateCodeExecutionReplyMessage(BaseMessage): code_block_languages: list[str] sender_name: Optional[str] = None recipient_name: str @@ -752,7 +751,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ConversableAgentUsageSummary(BaseMessage): +class ConversableAgentUsageSummaryMessage(BaseMessage): recipient_name: str is_client_empty: bool diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 445016cf4a..e1332cbee5 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -13,32 +13,32 @@ from autogen.coding.base import CodeBlock from autogen.messages.agent_messages import ( ClearAgentsHistoryMessage, - ClearConversableAgentHistory, - ClearConversableAgentHistoryWarning, + ClearConversableAgentHistoryMessage, + ClearConversableAgentHistoryWarningMessage, ContentMessage, - ConversableAgentUsageSummary, - ExecuteCodeBlock, - ExecutedFunction, - ExecuteFunction, + ConversableAgentUsageSummaryMessage, + ExecuteCodeBlockMessage, + ExecutedFunctionMessage, + ExecuteFunctionMessage, FunctionCall, FunctionCallMessage, FunctionResponseMessage, - GenerateCodeExecutionReply, + GenerateCodeExecutionReplyMessage, GroupChatResumeMessage, - GroupChatRunChat, + GroupChatRunChatMessage, MessageRole, PostCarryoverProcessingMessage, - SelectSpeaker, - SelectSpeakerInvalidInput, - SelectSpeakerTryCountExceeded, + SelectSpeakerInvalidInputMessage, + SelectSpeakerMessage, + SelectSpeakerTryCountExceededMessage, SpeakerAttemptMessage, - TerminationAndHumanReply, + TerminationAndHumanReplyMessage, TextMessage, ToolCall, ToolCallMessage, ToolResponse, ToolResponseMessage, - UsingAutoReply, + UsingAutoReplyMessage, create_received_message_model, ) from autogen.oai.client import OpenAIWrapper @@ -557,8 +557,8 @@ def test_GroupChatRunChat(uuid: UUID) -> None: ) silent = False - actual = GroupChatRunChat(uuid=uuid, speaker=speaker, silent=silent) - assert isinstance(actual, GroupChatRunChat) + actual = GroupChatRunChatMessage(uuid=uuid, speaker=speaker, silent=silent) + assert isinstance(actual, GroupChatRunChatMessage) expected_model_dump = { "uuid": uuid, @@ -583,13 +583,13 @@ def test_GroupChatRunChat(uuid: UUID) -> None: def test_TerminationAndHumanReply(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: no_human_input_msg = "NO HUMAN INPUT RECEIVED." - actual = TerminationAndHumanReply( + actual = TerminationAndHumanReplyMessage( uuid=uuid, no_human_input_msg=no_human_input_msg, sender=sender, recipient=recipient, ) - assert isinstance(actual, TerminationAndHumanReply) + assert isinstance(actual, TerminationAndHumanReplyMessage) expected_model_dump = { "uuid": uuid, @@ -609,13 +609,13 @@ def test_TerminationAndHumanReply(uuid: UUID, sender: ConversableAgent, recipien def test_UsingAutoReply(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: human_input_mode = "ALWAYS" - actual = UsingAutoReply( + actual = UsingAutoReplyMessage( uuid=uuid, human_input_mode=human_input_mode, sender=sender, recipient=recipient, ) - assert isinstance(actual, UsingAutoReply) + assert isinstance(actual, UsingAutoReplyMessage) expected_model_dump = { "uuid": uuid, @@ -637,10 +637,10 @@ def test_ExecuteCodeBlock(uuid: UUID, sender: ConversableAgent, recipient: Conve language = "python" code_block_count = 0 - actual = ExecuteCodeBlock( + actual = ExecuteCodeBlockMessage( uuid=uuid, code=code, language=language, code_block_count=code_block_count, recipient=recipient ) - assert isinstance(actual, ExecuteCodeBlock) + assert isinstance(actual, ExecuteCodeBlockMessage) expected_model_dump = { "uuid": uuid, @@ -668,8 +668,10 @@ def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: call_id = "call_12345xyz" arguments = {"num_to_be_added": 5} - actual = ExecuteFunction(uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, recipient=recipient) - assert isinstance(actual, ExecuteFunction) + actual = ExecuteFunctionMessage( + uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, recipient=recipient + ) + assert isinstance(actual, ExecuteFunctionMessage) expected_model_dump = { "uuid": uuid, @@ -698,10 +700,10 @@ def test_ExecutedFunction(uuid: UUID, recipient: ConversableAgent) -> None: arguments = {"num_to_be_added": 5} content = "15" - actual = ExecutedFunction( + actual = ExecutedFunctionMessage( uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=recipient ) - assert isinstance(actual, ExecutedFunction) + assert isinstance(actual, ExecutedFunctionMessage) expected_model_dump = { "uuid": uuid, @@ -731,8 +733,8 @@ def test_SelectSpeaker(uuid: UUID) -> None: ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), ] - actual = SelectSpeaker(uuid=uuid, agents=agents) # type: ignore [arg-type] - assert isinstance(actual, SelectSpeaker) + actual = SelectSpeakerMessage(uuid=uuid, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerMessage) expected_model_dump = { "uuid": uuid, @@ -758,8 +760,8 @@ def test_SelectSpeakerTryCountExceeded(uuid: UUID) -> None: ] try_count = 3 - actual = SelectSpeakerTryCountExceeded(uuid=uuid, try_count=try_count, agents=agents) # type: ignore [arg-type] - assert isinstance(actual, SelectSpeakerTryCountExceeded) + actual = SelectSpeakerTryCountExceededMessage(uuid=uuid, try_count=try_count, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerTryCountExceededMessage) mock = MagicMock() actual.print(f=mock) @@ -774,8 +776,8 @@ def test_SelectSpeakerInvalidInput(uuid: UUID) -> None: ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), ] - actual = SelectSpeakerInvalidInput(uuid=uuid, agents=agents) # type: ignore [arg-type] - assert isinstance(actual, SelectSpeakerInvalidInput) + actual = SelectSpeakerInvalidInputMessage(uuid=uuid, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerInvalidInputMessage) expected_model_dump = { "uuid": uuid, @@ -792,8 +794,10 @@ def test_SelectSpeakerInvalidInput(uuid: UUID) -> None: def test_ClearConversableAgentHistory(uuid: UUID, recipient: ConversableAgent) -> None: no_messages_preserved = 5 - actual = ClearConversableAgentHistory(uuid=uuid, agent=recipient, no_messages_preserved=no_messages_preserved) - assert isinstance(actual, ClearConversableAgentHistory) + actual = ClearConversableAgentHistoryMessage( + uuid=uuid, agent=recipient, no_messages_preserved=no_messages_preserved + ) + assert isinstance(actual, ClearConversableAgentHistoryMessage) expected_model_dump = { "uuid": uuid, @@ -817,7 +821,7 @@ def test_ClearConversableAgentHistory(uuid: UUID, recipient: ConversableAgent) - def test_ClearConversableAgentHistoryWarning(uuid: UUID, recipient: ConversableAgent) -> None: - actual = ClearConversableAgentHistoryWarning(uuid=uuid, recipient=recipient) + actual = ClearConversableAgentHistoryWarningMessage(uuid=uuid, recipient=recipient) mock = MagicMock() actual.print(f=mock) @@ -861,8 +865,8 @@ def test_GenerateCodeExecutionReply( sender: ConversableAgent, recipient: ConversableAgent, ) -> None: - actual = GenerateCodeExecutionReply(uuid=uuid, code_blocks=code_blocks, sender=sender, recipient=recipient) - assert isinstance(actual, GenerateCodeExecutionReply) + actual = GenerateCodeExecutionReplyMessage(uuid=uuid, code_blocks=code_blocks, sender=sender, recipient=recipient) + assert isinstance(actual, GenerateCodeExecutionReplyMessage) expected_model_dump = { "uuid": uuid, @@ -894,8 +898,8 @@ def test_ConversableAgentUsageSummary( uuid: UUID, recipient: ConversableAgent, ) -> None: - actual = ConversableAgentUsageSummary(uuid=uuid, recipient=recipient, client=client) - assert isinstance(actual, ConversableAgentUsageSummary) + actual = ConversableAgentUsageSummaryMessage(uuid=uuid, recipient=recipient, client=client) + assert isinstance(actual, ConversableAgentUsageSummaryMessage) expected_model_dump = { "uuid": uuid, From e93eccd6fdc3cb6f2c9b549d08af04a8a8ac6335 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:13:34 +0530 Subject: [PATCH 40/61] Fix TestToolResponseMessage --- autogen/messages/agent_messages.py | 5 +- test/messages/test_agent_messages.py | 96 +++++++++++++++------------- 2 files changed, 54 insertions(+), 47 deletions(-) diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index e85491bfa7..b7ad711dfe 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -74,7 +74,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") -class ToolResponse(BaseMessage): +class ToolResponse(BaseModel): tool_call_id: Optional[str] = None role: MessageRole = "tool" content: Union[str, int, float, bool] @@ -141,8 +141,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("\n", "-" * 80, flush=True, sep="") -@wrap_message -class ToolCall(BaseMessage): +class ToolCall(BaseModel): id: Optional[str] = None function: FunctionCall type: str diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index e1332cbee5..e38e126919 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -62,56 +62,64 @@ def recipient() -> ConversableAgent: return ConversableAgent("recipient", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER") -def test_tool_responses(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - message = { - "role": "tool", - "tool_responses": [ - {"tool_call_id": "call_rJfVpHU3MXuPRR2OAdssVqUV", "role": "tool", "content": "Timer is done!"}, - {"tool_call_id": "call_zFZVYovdsklFYgqxttcOHwlr", "role": "tool", "content": "Stopwatch is done!"}, - ], - "content": "Timer is done!\\n\\nStopwatch is done!", - } - actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - - assert isinstance(actual, ToolResponseMessage) - assert actual.role == "tool" - assert actual.sender_name == "sender" - assert actual.recipient_name == "recipient" - assert actual.content == "Timer is done!\\n\\nStopwatch is done!" - assert len(actual.tool_responses) == 2 +class TestToolResponseMessage: + def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: + message = { + "role": "tool", + "tool_responses": [ + {"tool_call_id": "call_rJfVpHU3MXuPRR2OAdssVqUV", "role": "tool", "content": "Timer is done!"}, + {"tool_call_id": "call_zFZVYovdsklFYgqxttcOHwlr", "role": "tool", "content": "Stopwatch is done!"}, + ], + "content": "Timer is done!\\n\\nStopwatch is done!", + } + actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) + assert isinstance(actual, ToolResponseMessage) - assert isinstance(actual.tool_responses[0], ToolResponse) - assert actual.tool_responses[0].tool_call_id == "call_rJfVpHU3MXuPRR2OAdssVqUV" - assert actual.tool_responses[0].role == "tool" - assert actual.tool_responses[0].content == "Timer is done!" + expected = { + "type": "tool_response", + "content": { + "role": "tool", + "sender_name": "sender", + "recipient_name": "recipient", + "uuid": uuid, + "content": "Timer is done!\\n\\nStopwatch is done!", + "tool_responses": [ + {"tool_call_id": "call_rJfVpHU3MXuPRR2OAdssVqUV", "role": "tool", "content": "Timer is done!"}, + {"tool_call_id": "call_zFZVYovdsklFYgqxttcOHwlr", "role": "tool", "content": "Stopwatch is done!"}, + ], + }, + } - assert isinstance(actual.tool_responses[1], ToolResponse) - assert actual.tool_responses[1].tool_call_id == "call_zFZVYovdsklFYgqxttcOHwlr" - assert actual.tool_responses[1].role == "tool" - assert actual.tool_responses[1].content == "Stopwatch is done!" + assert actual.model_dump() == expected - mock = MagicMock() - actual.print(f=mock) + mock = MagicMock() + actual.print(f=mock) - # print(mock.call_args_list) + # print(mock.call_args_list) - expected_call_args_list = [ - call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), - call("\x1b[32m***** Response from calling tool (call_rJfVpHU3MXuPRR2OAdssVqUV) *****\x1b[0m", flush=True), - call("Timer is done!", flush=True), - call("\x1b[32m**********************************************************************\x1b[0m", flush=True), - call( - "\n", "--------------------------------------------------------------------------------", flush=True, sep="" - ), - call("\x1b[32m***** Response from calling tool (call_zFZVYovdsklFYgqxttcOHwlr) *****\x1b[0m", flush=True), - call("Stopwatch is done!", flush=True), - call("\x1b[32m**********************************************************************\x1b[0m", flush=True), - call( - "\n", "--------------------------------------------------------------------------------", flush=True, sep="" - ), - ] + expected_call_args_list = [ + call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), + call("\x1b[32m***** Response from calling tool (call_rJfVpHU3MXuPRR2OAdssVqUV) *****\x1b[0m", flush=True), + call("Timer is done!", flush=True), + call("\x1b[32m**********************************************************************\x1b[0m", flush=True), + call( + "\n", + "--------------------------------------------------------------------------------", + flush=True, + sep="", + ), + call("\x1b[32m***** Response from calling tool (call_zFZVYovdsklFYgqxttcOHwlr) *****\x1b[0m", flush=True), + call("Stopwatch is done!", flush=True), + call("\x1b[32m**********************************************************************\x1b[0m", flush=True), + call( + "\n", + "--------------------------------------------------------------------------------", + flush=True, + sep="", + ), + ] - assert mock.call_args_list == expected_call_args_list + assert mock.call_args_list == expected_call_args_list @pytest.mark.parametrize( From fe483fbb9d0a0345380be166b4ddd5bd56883fe5 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:17:29 +0530 Subject: [PATCH 41/61] Fix TestToolCallMessage --- test/messages/test_agent_messages.py | 144 +++++++++++++++------------ 1 file changed, 78 insertions(+), 66 deletions(-) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index e38e126919..40d3321c83 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -206,78 +206,90 @@ def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: Conversabl assert mock.call_args_list == expected_call_args_list -@pytest.mark.parametrize( - "role", - ["assistant", None], -) -def test_tool_calls( - uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent, role: Optional[MessageRole] -) -> None: - message = { - "content": None, - "refusal": None, - "role": role, - "audio": None, - "function_call": None, - "tool_calls": [ - { - "id": "call_rJfVpHU3MXuPRR2OAdssVqUV", - "function": {"arguments": '{"num_seconds": "1"}', "name": "timer"}, - "type": "function", - }, - { - "id": "call_zFZVYovdsklFYgqxttcOHwlr", - "function": {"arguments": '{"num_seconds": "2"}', "name": "stopwatch"}, - "type": "function", - }, - ], - } - - actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - - assert isinstance(actual, ToolCallMessage) - - assert actual.content is None - assert actual.refusal is None - assert actual.role == role - assert actual.audio is None - assert actual.function_call is None - assert actual.sender_name == "sender" - assert actual.recipient_name == "recipient" - - assert len(actual.tool_calls) == 2 +class TestToolCallMessage: + @pytest.mark.parametrize( + "role", + ["assistant", None], + ) + def test_print( + self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent, role: Optional[MessageRole] + ) -> None: + message = { + "content": None, + "refusal": None, + "role": role, + "audio": None, + "function_call": None, + "tool_calls": [ + { + "id": "call_rJfVpHU3MXuPRR2OAdssVqUV", + "function": {"arguments": '{"num_seconds": "1"}', "name": "timer"}, + "type": "function", + }, + { + "id": "call_zFZVYovdsklFYgqxttcOHwlr", + "function": {"arguments": '{"num_seconds": "2"}', "name": "stopwatch"}, + "type": "function", + }, + ], + } - assert isinstance(actual.tool_calls[0], ToolCall) - assert actual.tool_calls[0].id == "call_rJfVpHU3MXuPRR2OAdssVqUV" - assert actual.tool_calls[0].function.name == "timer" # type: ignore [union-attr] - assert actual.tool_calls[0].function.arguments == '{"num_seconds": "1"}' # type: ignore [union-attr] - assert actual.tool_calls[0].type == "function" + actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) + assert isinstance(actual, ToolCallMessage) - assert isinstance(actual.tool_calls[1], ToolCall) - assert actual.tool_calls[1].id == "call_zFZVYovdsklFYgqxttcOHwlr" - assert actual.tool_calls[1].function.name == "stopwatch" # type: ignore [union-attr] - assert actual.tool_calls[1].function.arguments == '{"num_seconds": "2"}' # type: ignore [union-attr] - assert actual.tool_calls[1].type == "function" + expected = { + "type": "tool_call", + "content": { + "content": None, + "refusal": None, + "role": role, + "audio": None, + "function_call": None, + "sender_name": "sender", + "recipient_name": "recipient", + "uuid": uuid, + "tool_calls": [ + { + "id": "call_rJfVpHU3MXuPRR2OAdssVqUV", + "function": {"arguments": '{"num_seconds": "1"}', "name": "timer"}, + "type": "function", + }, + { + "id": "call_zFZVYovdsklFYgqxttcOHwlr", + "function": {"arguments": '{"num_seconds": "2"}', "name": "stopwatch"}, + "type": "function", + }, + ], + }, + } + assert actual.model_dump() == expected - mock = MagicMock() - actual.print(f=mock) + mock = MagicMock() + actual.print(f=mock) - # print(mock.call_args_list) + # print(mock.call_args_list) - expected_call_args_list = [ - call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), - call("\x1b[32m***** Suggested tool call (call_rJfVpHU3MXuPRR2OAdssVqUV): timer *****\x1b[0m", flush=True), - call("Arguments: \n", '{"num_seconds": "1"}', flush=True, sep=""), - call("\x1b[32m**********************************************************************\x1b[0m", flush=True), - call("\x1b[32m***** Suggested tool call (call_zFZVYovdsklFYgqxttcOHwlr): stopwatch *****\x1b[0m", flush=True), - call("Arguments: \n", '{"num_seconds": "2"}', flush=True, sep=""), - call("\x1b[32m**************************************************************************\x1b[0m", flush=True), - call( - "\n", "--------------------------------------------------------------------------------", flush=True, sep="" - ), - ] + expected_call_args_list = [ + call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), + call("\x1b[32m***** Suggested tool call (call_rJfVpHU3MXuPRR2OAdssVqUV): timer *****\x1b[0m", flush=True), + call("Arguments: \n", '{"num_seconds": "1"}', flush=True, sep=""), + call("\x1b[32m**********************************************************************\x1b[0m", flush=True), + call( + "\x1b[32m***** Suggested tool call (call_zFZVYovdsklFYgqxttcOHwlr): stopwatch *****\x1b[0m", flush=True + ), + call("Arguments: \n", '{"num_seconds": "2"}', flush=True, sep=""), + call( + "\x1b[32m**************************************************************************\x1b[0m", flush=True + ), + call( + "\n", + "--------------------------------------------------------------------------------", + flush=True, + sep="", + ), + ] - assert mock.call_args_list == expected_call_args_list + assert mock.call_args_list == expected_call_args_list def test_context_message(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: From 21cdc61a900acd5abe64748f7eb6a1a7de3cd93f Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:18:00 +0530 Subject: [PATCH 42/61] Rename classes to have Message suffix --- autogen/messages/client_messages.py | 4 ++-- autogen/oai/client.py | 4 ++-- test/messages/test_client_messages.py | 14 +++++++------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/autogen/messages/client_messages.py b/autogen/messages/client_messages.py index fa60d115e2..d7e4b02684 100644 --- a/autogen/messages/client_messages.py +++ b/autogen/messages/client_messages.py @@ -9,7 +9,7 @@ from .base_message import BaseMessage -__all__ = ["UsageSummary"] +__all__ = ["UsageSummaryMessage"] class ModelUsageSummary(BaseModel): @@ -56,7 +56,7 @@ def _change_usage_summary_format( return summary -class UsageSummary(BaseMessage): +class UsageSummaryMessage(BaseMessage): actual: ActualUsageSummary total: TotalUsageSummary mode: Mode diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 9a3fd38e67..bfb914b115 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -22,7 +22,7 @@ from autogen.runtime_logging import log_chat_completion, log_new_client, log_new_wrapper, logging_enabled from autogen.token_count_utils import count_token -from ..messages.client_messages import StreamMessage, UsageSummary +from ..messages.client_messages import StreamMessage, UsageSummaryMessage TOOL_ENABLED = False try: @@ -1132,7 +1132,7 @@ def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) mode = "total" iostream.send( - UsageSummary( + UsageSummaryMessage( actual_usage_summary=self.actual_usage_summary, total_usage_summary=self.total_usage_summary, mode=mode ) ) diff --git a/test/messages/test_client_messages.py b/test/messages/test_client_messages.py index bc890bce91..d328cde7b2 100644 --- a/test/messages/test_client_messages.py +++ b/test/messages/test_client_messages.py @@ -13,7 +13,7 @@ ModelUsageSummary, StreamMessage, TotalUsageSummary, - UsageSummary, + UsageSummaryMessage, _change_usage_summary_format, ) @@ -109,11 +109,11 @@ def test_usage_summary_print_same_actual_and_total( total_usage_summary: Optional[dict[str, Any]], uuid: UUID, ) -> None: - actual = UsageSummary( + actual = UsageSummaryMessage( uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" ) - assert isinstance(actual, UsageSummary) + assert isinstance(actual, UsageSummaryMessage) assert isinstance(actual.actual, ActualUsageSummary) assert isinstance(actual.total, TotalUsageSummary) assert actual.mode == "both" @@ -208,11 +208,11 @@ def test_usage_summary_print_different_actual_and_total( total_usage_summary: Optional[dict[str, Any]], uuid: UUID, ) -> None: - actual = UsageSummary( + actual = UsageSummaryMessage( uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" ) - assert isinstance(actual, UsageSummary) + assert isinstance(actual, UsageSummaryMessage) assert isinstance(actual.actual, ActualUsageSummary) assert isinstance(actual.total, TotalUsageSummary) assert actual.mode == "both" @@ -296,11 +296,11 @@ def test_usage_summary_print_none_actual_and_total( total_usage_summary: Optional[dict[str, Any]], uuid: UUID, ) -> None: - actual = UsageSummary( + actual = UsageSummaryMessage( uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" ) - assert isinstance(actual, UsageSummary) + assert isinstance(actual, UsageSummaryMessage) assert isinstance(actual.actual, ActualUsageSummary) assert isinstance(actual.total, TotalUsageSummary) assert actual.mode == "both" From a6dae98b9704fca0393d5a78f9f3de1de5bae7f9 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:30:28 +0530 Subject: [PATCH 43/61] Fix tests --- test/messages/test_agent_messages.py | 505 +++++++++++++++------------ 1 file changed, 274 insertions(+), 231 deletions(-) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 40d3321c83..732f7b4f63 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -292,283 +292,326 @@ def test_print( assert mock.call_args_list == expected_call_args_list -def test_context_message(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - message = {"content": "hello {name}", "context": {"name": "there"}} +class TestTestToolCallMessage: + def test_print_context_message(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: + message = {"content": "hello {name}", "context": {"name": "there"}} - actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - - assert isinstance(actual, ContentMessage) - expected_model_dump = { - "uuid": uuid, - "content": "hello {name}", - "sender_name": "sender", - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump - - mock = MagicMock() - actual.print(f=mock) - - # print(mock.call_args_list) + actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - expected_call_args_list = [ - call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), - call("hello {name}", flush=True), - call( - "\n", "--------------------------------------------------------------------------------", flush=True, sep="" - ), - ] + assert isinstance(actual, ContentMessage) + expected_model_dump = { + "type": "content", + "content": { + "uuid": uuid, + "content": "hello {name}", + "sender_name": "sender", + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump - assert mock.call_args_list == expected_call_args_list + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) -def test_context_lambda_message(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - message = { - "content": lambda context: f"hello {context['name']}", - "context": { - "name": "there", - }, - } + expected_call_args_list = [ + call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), + call("hello {name}", flush=True), + call( + "\n", + "--------------------------------------------------------------------------------", + flush=True, + sep="", + ), + ] - actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) + assert mock.call_args_list == expected_call_args_list - assert isinstance(actual, ContentMessage) - expected_model_dump = { - "uuid": uuid, - "content": "hello there", - "sender_name": "sender", - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump + def test_print_context_lambda_message( + self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent + ) -> None: + message = { + "content": lambda context: f"hello {context['name']}", + "context": { + "name": "there", + }, + } - mock = MagicMock() - actual.print(f=mock) + actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - # print(mock.call_args_list) + assert isinstance(actual, ContentMessage) + expected_model_dump = { + "type": "content", + "content": { + "uuid": uuid, + "content": "hello there", + "sender_name": "sender", + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump - expected_call_args_list = [ - call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), - call("hello there", flush=True), - call( - "\n", "--------------------------------------------------------------------------------", flush=True, sep="" - ), - ] + mock = MagicMock() + actual.print(f=mock) - assert mock.call_args_list == expected_call_args_list + # print(mock.call_args_list) + expected_call_args_list = [ + call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), + call("hello there", flush=True), + call( + "\n", + "--------------------------------------------------------------------------------", + flush=True, + sep="", + ), + ] -def test_PostCarryoverProcessing(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - chat_info = { - "carryover": ["This is a test message 1", "This is a test message 2"], - "message": "Start chat", - "verbose": True, - "sender": sender, - "recipient": recipient, - "summary_method": "last_msg", - "max_turns": 5, - } + assert mock.call_args_list == expected_call_args_list - actual = PostCarryoverProcessingMessage(uuid=uuid, chat_info=chat_info) - assert isinstance(actual, PostCarryoverProcessingMessage) - expected = { - "uuid": uuid, - "carryover": ["This is a test message 1", "This is a test message 2"], - "message": "Start chat", - "verbose": True, - "sender_name": "sender", - "recipient_name": "recipient", - "summary_method": "last_msg", - "summary_args": None, - "max_turns": 5, - } - assert actual.model_dump() == expected, f"{actual.model_dump()} != {expected}" +class TestPostCarryoverProcessingMessage: + def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: + chat_info = { + "carryover": ["This is a test message 1", "This is a test message 2"], + "message": "Start chat", + "verbose": True, + "sender": sender, + "recipient": recipient, + "summary_method": "last_msg", + "max_turns": 5, + } - mock = MagicMock() - actual.print(f=mock) + actual = PostCarryoverProcessingMessage(uuid=uuid, chat_info=chat_info) + assert isinstance(actual, PostCarryoverProcessingMessage) - # print(mock.call_args_list) + expected = { + "type": "post_carryover_processing", + "content": { + "uuid": uuid, + "carryover": ["This is a test message 1", "This is a test message 2"], + "message": "Start chat", + "verbose": True, + "sender_name": "sender", + "recipient_name": "recipient", + "summary_method": "last_msg", + "summary_args": None, + "max_turns": 5, + }, + } + assert actual.model_dump() == expected, f"{actual.model_dump()} != {expected}" - expected_call_args_list = [ - call( - "\x1b[34m\n********************************************************************************\x1b[0m", - flush=True, - sep="", - ), - call("\x1b[34mStarting a new chat....\x1b[0m", flush=True), - call("\x1b[34mMessage:\nStart chat\x1b[0m", flush=True), - call("\x1b[34mCarryover:\nThis is a test message 1\nThis is a test message 2\x1b[0m", flush=True), - call( - "\x1b[34m\n********************************************************************************\x1b[0m", - flush=True, - sep="", - ), - ] + mock = MagicMock() + actual.print(f=mock) - assert mock.call_args_list == expected_call_args_list + # print(mock.call_args_list) + expected_call_args_list = [ + call( + "\x1b[34m\n********************************************************************************\x1b[0m", + flush=True, + sep="", + ), + call("\x1b[34mStarting a new chat....\x1b[0m", flush=True), + call("\x1b[34mMessage:\nStart chat\x1b[0m", flush=True), + call("\x1b[34mCarryover:\nThis is a test message 1\nThis is a test message 2\x1b[0m", flush=True), + call( + "\x1b[34m\n********************************************************************************\x1b[0m", + flush=True, + sep="", + ), + ] -@pytest.mark.parametrize( - "carryover, expected", - [ - ("This is a test message 1", "This is a test message 1"), - ( - ["This is a test message 1", "This is a test message 2"], - "This is a test message 1\nThis is a test message 2", - ), - ( - [ - {"content": "This is a test message 1"}, - {"content": "This is a test message 2"}, - ], - "This is a test message 1\nThis is a test message 2", - ), - ([1, 2, 3], "1\n2\n3"), - ], -) -def test__process_carryover( - carryover: Union[str, list[Union[str, dict[str, Any], Any]]], - expected: str, - uuid: UUID, - sender: ConversableAgent, - recipient: ConversableAgent, -) -> None: - chat_info = { - "carryover": carryover, - "message": "Start chat", - "verbose": True, - "sender": sender, - "recipient": recipient, - "summary_method": "last_msg", - "max_turns": 5, - } + assert mock.call_args_list == expected_call_args_list - post_carryover_processing = PostCarryoverProcessingMessage(uuid=uuid, chat_info=chat_info) - expected_model_dump = { - "uuid": uuid, - "carryover": carryover, - "message": "Start chat", - "verbose": True, - "sender_name": "sender", - "recipient_name": "recipient", - "summary_method": "last_msg", - "summary_args": None, - "max_turns": 5, - } - assert post_carryover_processing.model_dump() == expected_model_dump + @pytest.mark.parametrize( + "carryover, expected", + [ + ("This is a test message 1", "This is a test message 1"), + ( + ["This is a test message 1", "This is a test message 2"], + "This is a test message 1\nThis is a test message 2", + ), + ( + [ + {"content": "This is a test message 1"}, + {"content": "This is a test message 2"}, + ], + "This is a test message 1\nThis is a test message 2", + ), + ([1, 2, 3], "1\n2\n3"), + ], + ) + def test__process_carryover( + self, + carryover: Union[str, list[Union[str, dict[str, Any], Any]]], + expected: str, + uuid: UUID, + sender: ConversableAgent, + recipient: ConversableAgent, + ) -> None: + chat_info = { + "carryover": carryover, + "message": "Start chat", + "verbose": True, + "sender": sender, + "recipient": recipient, + "summary_method": "last_msg", + "max_turns": 5, + } - actual = post_carryover_processing._process_carryover() - assert actual == expected + post_carryover_processing = PostCarryoverProcessingMessage(uuid=uuid, chat_info=chat_info) + expected_model_dump = { + "type": "post_carryover_processing", + "content": { + "uuid": uuid, + "carryover": carryover, + "message": "Start chat", + "verbose": True, + "sender_name": "sender", + "recipient_name": "recipient", + "summary_method": "last_msg", + "summary_args": None, + "max_turns": 5, + }, + } + assert post_carryover_processing.model_dump() == expected_model_dump + actual = post_carryover_processing.content._process_carryover() # type: ignore[attr-defined] + assert actual == expected -@pytest.mark.parametrize( - "agent, nr_messages_to_preserve, expected", - [ - (None, None, "Clearing history for all agents."), - (None, 5, "Clearing history for all agents except last 5 messages."), - ( - ConversableAgent("clear_agent", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - None, - "Clearing history for clear_agent.", - ), - ( - ConversableAgent("clear_agent", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - 5, - "Clearing history for clear_agent except last 5 messages.", - ), - ], -) -def test_ClearAgentsHistory( - agent: Optional[ConversableAgent], nr_messages_to_preserve: Optional[int], expected: str, uuid: UUID -) -> None: - actual = ClearAgentsHistoryMessage(uuid=uuid, agent=agent, nr_messages_to_preserve=nr_messages_to_preserve) - assert isinstance(actual, ClearAgentsHistoryMessage) - expected_model_dump = { - "uuid": uuid, - "agent_name": "clear_agent" if agent else None, - "nr_messages_to_preserve": nr_messages_to_preserve, - } - assert actual.model_dump() == expected_model_dump +class TestClearAgentsHistoryMessage: + @pytest.mark.parametrize( + "agent, nr_messages_to_preserve, expected", + [ + (None, None, "Clearing history for all agents."), + (None, 5, "Clearing history for all agents except last 5 messages."), + ( + ConversableAgent( + "clear_agent", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER" + ), + None, + "Clearing history for clear_agent.", + ), + ( + ConversableAgent( + "clear_agent", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER" + ), + 5, + "Clearing history for clear_agent except last 5 messages.", + ), + ], + ) + def test_print( + self, agent: Optional[ConversableAgent], nr_messages_to_preserve: Optional[int], expected: str, uuid: UUID + ) -> None: + actual = ClearAgentsHistoryMessage(uuid=uuid, agent=agent, nr_messages_to_preserve=nr_messages_to_preserve) + assert isinstance(actual, ClearAgentsHistoryMessage) - mock = MagicMock() - actual.print(f=mock) + expected_model_dump = { + "type": "clear_agents_history", + "content": { + "uuid": uuid, + "agent_name": "clear_agent" if agent else None, + "nr_messages_to_preserve": nr_messages_to_preserve, + }, + } + assert actual.model_dump() == expected_model_dump - # print(mock.call_args_list) + mock = MagicMock() + actual.print(f=mock) - expected_call_args_list = [call(expected)] - assert mock.call_args_list == expected_call_args_list + # print(mock.call_args_list) + expected_call_args_list = [call(expected)] + assert mock.call_args_list == expected_call_args_list -@pytest.mark.parametrize( - "mentions, expected", - [ - ({"agent_1": 1}, "\x1b[32m>>>>>>>> Select speaker attempt 1 of 3 successfully selected: agent_1\x1b[0m"), - ( - {"agent_1": 1, "agent_2": 2}, - "\x1b[31m>>>>>>>> Select speaker attempt 1 of 3 failed as it included multiple agent names.\x1b[0m", - ), - ({}, "\x1b[31m>>>>>>>> Select speaker attempt #1 failed as it did not include any agent names.\x1b[0m"), - ], -) -def test_SpeakerAttempt(mentions: dict[str, int], expected: str, uuid: UUID) -> None: - attempt = 1 - attempts_left = 2 - verbose = True - actual = SpeakerAttemptMessage( - uuid=uuid, mentions=mentions, attempt=attempt, attempts_left=attempts_left, select_speaker_auto_verbose=verbose +class TestSpeakerAttemptMessage: + @pytest.mark.parametrize( + "mentions, expected", + [ + ({"agent_1": 1}, "\x1b[32m>>>>>>>> Select speaker attempt 1 of 3 successfully selected: agent_1\x1b[0m"), + ( + {"agent_1": 1, "agent_2": 2}, + "\x1b[31m>>>>>>>> Select speaker attempt 1 of 3 failed as it included multiple agent names.\x1b[0m", + ), + ({}, "\x1b[31m>>>>>>>> Select speaker attempt #1 failed as it did not include any agent names.\x1b[0m"), + ], ) - assert isinstance(actual, SpeakerAttemptMessage) + def test_print(self, mentions: dict[str, int], expected: str, uuid: UUID) -> None: + attempt = 1 + attempts_left = 2 + verbose = True + + actual = SpeakerAttemptMessage( + uuid=uuid, + mentions=mentions, + attempt=attempt, + attempts_left=attempts_left, + select_speaker_auto_verbose=verbose, + ) + assert isinstance(actual, SpeakerAttemptMessage) - expected_model_dump = { - "uuid": uuid, - "mentions": mentions, - "attempt": attempt, - "attempts_left": attempts_left, - "verbose": verbose, - } - assert actual.model_dump() == expected_model_dump + expected_model_dump = { + "type": "speaker_attempt", + "content": { + "uuid": uuid, + "mentions": mentions, + "attempt": attempt, + "attempts_left": attempts_left, + "verbose": verbose, + }, + } + assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) + mock = MagicMock() + actual.print(f=mock) - # print(mock.call_args_list) + # print(mock.call_args_list) - expected_call_args_list = [call(expected, flush=True)] + expected_call_args_list = [call(expected, flush=True)] - assert mock.call_args_list == expected_call_args_list + assert mock.call_args_list == expected_call_args_list -def test_GroupChatResume(uuid: UUID) -> None: - last_speaker_name = "Coder" - messages = [ - {"content": "You are an expert at coding.", "role": "system", "name": "chat_manager"}, - {"content": "Let's get coding, should I use Python?", "name": "Coder", "role": "assistant"}, - ] - silent = False +class TestGroupChatResumeMessage: + def test_print(self, uuid: UUID) -> None: + last_speaker_name = "Coder" + messages = [ + {"content": "You are an expert at coding.", "role": "system", "name": "chat_manager"}, + {"content": "Let's get coding, should I use Python?", "name": "Coder", "role": "assistant"}, + ] + silent = False - actual = GroupChatResumeMessage(uuid=uuid, last_speaker_name=last_speaker_name, messages=messages, silent=silent) - assert isinstance(actual, GroupChatResumeMessage) + actual = GroupChatResumeMessage( + uuid=uuid, last_speaker_name=last_speaker_name, messages=messages, silent=silent + ) + assert isinstance(actual, GroupChatResumeMessage) - expected_model_dump = { - "uuid": uuid, - "last_speaker_name": last_speaker_name, - "messages": messages, - "verbose": True, - } - assert actual.model_dump() == expected_model_dump + expected_model_dump = { + "type": "group_chat_resume", + "content": { + "uuid": uuid, + "last_speaker_name": last_speaker_name, + "messages": messages, + "verbose": True, + }, + } + assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) + mock = MagicMock() + actual.print(f=mock) - # print(mock.call_args_list) + # print(mock.call_args_list) - expected_call_args_list = [ - call("Prepared group chat with 2 messages, the last speaker is", "\x1b[33mCoder\x1b[0m", flush=True) - ] + expected_call_args_list = [ + call("Prepared group chat with 2 messages, the last speaker is", "\x1b[33mCoder\x1b[0m", flush=True) + ] - assert mock.call_args_list == expected_call_args_list + assert mock.call_args_list == expected_call_args_list def test_GroupChatRunChat(uuid: UUID) -> None: From ed80fc52363678d504c494144480c4f1b0ba5eea Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:46:55 +0530 Subject: [PATCH 44/61] Fix tests in test_agent_messages.py --- test/messages/test_agent_messages.py | 701 +++++++++++++++------------ 1 file changed, 390 insertions(+), 311 deletions(-) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 732f7b4f63..829ff03c1d 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -614,387 +614,466 @@ def test_print(self, uuid: UUID) -> None: assert mock.call_args_list == expected_call_args_list -def test_GroupChatRunChat(uuid: UUID) -> None: - speaker = ConversableAgent( - "assistant uno", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER" - ) - silent = False +class TestGroupChatRunChatMessage: + def test_print(self, uuid: UUID) -> None: + speaker = ConversableAgent( + "assistant uno", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER" + ) + silent = False - actual = GroupChatRunChatMessage(uuid=uuid, speaker=speaker, silent=silent) - assert isinstance(actual, GroupChatRunChatMessage) + actual = GroupChatRunChatMessage(uuid=uuid, speaker=speaker, silent=silent) + assert isinstance(actual, GroupChatRunChatMessage) - expected_model_dump = { - "uuid": uuid, - "speaker_name": "assistant uno", - "verbose": True, - } - assert actual.model_dump() == expected_model_dump + expected_model_dump = { + "type": "group_chat_run_chat", + "content": { + "uuid": uuid, + "speaker_name": "assistant uno", + "verbose": True, + }, + } + assert actual.model_dump() == expected_model_dump - assert actual.speaker_name == "assistant uno" - assert actual.verbose is True + mock = MagicMock() + actual.print(f=mock) - mock = MagicMock() - actual.print(f=mock) + # print(mock.call_args_list) - # print(mock.call_args_list) + expected_call_args_list = [call("\x1b[32m\nNext speaker: assistant uno\n\x1b[0m", flush=True)] - expected_call_args_list = [call("\x1b[32m\nNext speaker: assistant uno\n\x1b[0m", flush=True)] + assert mock.call_args_list == expected_call_args_list - assert mock.call_args_list == expected_call_args_list +class TestTerminationAndHumanReplyMessage: + def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: + no_human_input_msg = "NO HUMAN INPUT RECEIVED." -def test_TerminationAndHumanReply(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - no_human_input_msg = "NO HUMAN INPUT RECEIVED." + actual = TerminationAndHumanReplyMessage( + uuid=uuid, + no_human_input_msg=no_human_input_msg, + sender=sender, + recipient=recipient, + ) + assert isinstance(actual, TerminationAndHumanReplyMessage) - actual = TerminationAndHumanReplyMessage( - uuid=uuid, - no_human_input_msg=no_human_input_msg, - sender=sender, - recipient=recipient, - ) - assert isinstance(actual, TerminationAndHumanReplyMessage) + expected_model_dump = { + "type": "termination_and_human_reply", + "content": { + "uuid": uuid, + "no_human_input_msg": no_human_input_msg, + "sender_name": "sender", + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump - expected_model_dump = { - "uuid": uuid, - "no_human_input_msg": no_human_input_msg, - "sender_name": "sender", - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [call("\x1b[31m\n>>>>>>>> NO HUMAN INPUT RECEIVED.\x1b[0m", flush=True)] + assert mock.call_args_list == expected_call_args_list - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [call("\x1b[31m\n>>>>>>>> NO HUMAN INPUT RECEIVED.\x1b[0m", flush=True)] - assert mock.call_args_list == expected_call_args_list +class TestUsingAutoReplyMessage: + def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: + human_input_mode = "ALWAYS" + + actual = UsingAutoReplyMessage( + uuid=uuid, + human_input_mode=human_input_mode, + sender=sender, + recipient=recipient, + ) + assert isinstance(actual, UsingAutoReplyMessage) -def test_UsingAutoReply(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - human_input_mode = "ALWAYS" + expected_model_dump = { + "type": "using_auto_reply", + "content": { + "uuid": uuid, + "human_input_mode": human_input_mode, + "sender_name": "sender", + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump - actual = UsingAutoReplyMessage( - uuid=uuid, - human_input_mode=human_input_mode, - sender=sender, - recipient=recipient, - ) - assert isinstance(actual, UsingAutoReplyMessage) + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [call("\x1b[31m\n>>>>>>>> USING AUTO REPLY...\x1b[0m", flush=True)] + assert mock.call_args_list == expected_call_args_list - expected_model_dump = { - "uuid": uuid, - "human_input_mode": human_input_mode, - "sender_name": "sender", - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [call("\x1b[31m\n>>>>>>>> USING AUTO REPLY...\x1b[0m", flush=True)] - assert mock.call_args_list == expected_call_args_list +class TestExecuteCodeBlockMessage: + def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: + code = """print("hello world")""" + language = "python" + code_block_count = 0 + actual = ExecuteCodeBlockMessage( + uuid=uuid, code=code, language=language, code_block_count=code_block_count, recipient=recipient + ) + assert isinstance(actual, ExecuteCodeBlockMessage) -def test_ExecuteCodeBlock(uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: - code = """print("hello world")""" - language = "python" - code_block_count = 0 + expected_model_dump = { + "type": "execute_code_block", + "content": { + "uuid": uuid, + "code": code, + "language": language, + "code_block_count": code_block_count, + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump - actual = ExecuteCodeBlockMessage( - uuid=uuid, code=code, language=language, code_block_count=code_block_count, recipient=recipient - ) - assert isinstance(actual, ExecuteCodeBlockMessage) + mock = MagicMock() + actual.print(f=mock) - expected_model_dump = { - "uuid": uuid, - "code": code, - "language": language, - "code_block_count": code_block_count, - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump + # print(mock.call_args_list) - mock = MagicMock() - actual.print(f=mock) + expected_call_args_list = [ + call("\x1b[31m\n>>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\x1b[0m", flush=True) + ] - # print(mock.call_args_list) + assert mock.call_args_list == expected_call_args_list - expected_call_args_list = [ - call("\x1b[31m\n>>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\x1b[0m", flush=True) - ] - assert mock.call_args_list == expected_call_args_list +class TestExecuteFunctionMessage: + def test_print(self, uuid: UUID, recipient: ConversableAgent) -> None: + func_name = "add_num" + call_id = "call_12345xyz" + arguments = {"num_to_be_added": 5} + actual = ExecuteFunctionMessage( + uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, recipient=recipient + ) + assert isinstance(actual, ExecuteFunctionMessage) -def test_ExecuteFunction(uuid: UUID, recipient: ConversableAgent) -> None: - func_name = "add_num" - call_id = "call_12345xyz" - arguments = {"num_to_be_added": 5} + expected_model_dump = { + "type": "execute_function", + "content": { + "uuid": uuid, + "func_name": func_name, + "call_id": call_id, + "arguments": arguments, + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump - actual = ExecuteFunctionMessage( - uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, recipient=recipient - ) - assert isinstance(actual, ExecuteFunctionMessage) + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [ + call( + "\x1b[35m\n>>>>>>>> EXECUTING FUNCTION add_num...\nCall ID: call_12345xyz\nInput arguments: {'num_to_be_added': 5}\x1b[0m", + flush=True, + ) + ] + assert mock.call_args_list == expected_call_args_list - expected_model_dump = { - "uuid": uuid, - "func_name": func_name, - "call_id": call_id, - "arguments": arguments, - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [ - call( - "\x1b[35m\n>>>>>>>> EXECUTING FUNCTION add_num...\nCall ID: call_12345xyz\nInput arguments: {'num_to_be_added': 5}\x1b[0m", - flush=True, +class TestExecutedFunctionMessage: + def test_print(self, uuid: UUID, recipient: ConversableAgent) -> None: + func_name = "add_num" + call_id = "call_12345xyz" + arguments = {"num_to_be_added": 5} + content = "15" + + actual = ExecutedFunctionMessage( + uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=recipient ) - ] - assert mock.call_args_list == expected_call_args_list + assert isinstance(actual, ExecutedFunctionMessage) + expected_model_dump = { + "type": "executed_function", + "content": { + "uuid": uuid, + "func_name": func_name, + "call_id": call_id, + "arguments": arguments, + "content": content, + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump -def test_ExecutedFunction(uuid: UUID, recipient: ConversableAgent) -> None: - func_name = "add_num" - call_id = "call_12345xyz" - arguments = {"num_to_be_added": 5} - content = "15" + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [ + call( + "\x1b[35m\n>>>>>>>> EXECUTED FUNCTION add_num...\nCall ID: call_12345xyz\nInput arguments: {'num_to_be_added': 5}\nOutput:\n15\x1b[0m", + flush=True, + ) + ] + assert mock.call_args_list == expected_call_args_list - actual = ExecutedFunctionMessage( - uuid=uuid, func_name=func_name, call_id=call_id, arguments=arguments, content=content, recipient=recipient - ) - assert isinstance(actual, ExecutedFunctionMessage) - - expected_model_dump = { - "uuid": uuid, - "func_name": func_name, - "call_id": call_id, - "arguments": arguments, - "content": content, - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [ - call( - "\x1b[35m\n>>>>>>>> EXECUTED FUNCTION add_num...\nCall ID: call_12345xyz\nInput arguments: {'num_to_be_added': 5}\nOutput:\n15\x1b[0m", - flush=True, - ) - ] - assert mock.call_args_list == expected_call_args_list +class TestSelectSpeakerMessage: + def test_print(self, uuid: UUID) -> None: + agents = [ + ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ] + actual = SelectSpeakerMessage(uuid=uuid, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerMessage) -def test_SelectSpeaker(uuid: UUID) -> None: - agents = [ - ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - ] + expected_model_dump = { + "type": "select_speaker", + "content": { + "uuid": uuid, + "agent_names": ["bob", "charlie"], + }, + } + assert actual.model_dump() == expected_model_dump - actual = SelectSpeakerMessage(uuid=uuid, agents=agents) # type: ignore [arg-type] - assert isinstance(actual, SelectSpeakerMessage) + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [ + call("Please select the next speaker from the following list:"), + call("1: bob"), + call("2: charlie"), + ] + assert mock.call_args_list == expected_call_args_list - expected_model_dump = { - "uuid": uuid, - "agent_names": ["bob", "charlie"], - } - assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [ - call("Please select the next speaker from the following list:"), - call("1: bob"), - call("2: charlie"), - ] - assert mock.call_args_list == expected_call_args_list +class TestSelectSpeakerTryCountExceededMessage: + def test_print(self, uuid: UUID) -> None: + agents = [ + ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ] + try_count = 3 + actual = SelectSpeakerTryCountExceededMessage(uuid=uuid, try_count=try_count, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerTryCountExceededMessage) -def test_SelectSpeakerTryCountExceeded(uuid: UUID) -> None: - agents = [ - ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - ] - try_count = 3 + expected_model_dump = { + "type": "select_speaker_try_count_exceeded", + "content": { + "uuid": uuid, + "try_count": try_count, + "agent_names": ["bob", "charlie"], + }, + } + assert actual.model_dump() == expected_model_dump + + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [call("You have tried 3 times. The next speaker will be selected automatically.")] + assert mock.call_args_list == expected_call_args_list - actual = SelectSpeakerTryCountExceededMessage(uuid=uuid, try_count=try_count, agents=agents) # type: ignore [arg-type] - assert isinstance(actual, SelectSpeakerTryCountExceededMessage) - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [call("You have tried 3 times. The next speaker will be selected automatically.")] - assert mock.call_args_list == expected_call_args_list +class TestSelectSpeakerInvalidInputMessage: + def test_print(self, uuid: UUID) -> None: + agents = [ + ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), + ] + actual = SelectSpeakerInvalidInputMessage(uuid=uuid, agents=agents) # type: ignore [arg-type] + assert isinstance(actual, SelectSpeakerInvalidInputMessage) -def test_SelectSpeakerInvalidInput(uuid: UUID) -> None: - agents = [ - ConversableAgent("bob", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - ConversableAgent("charlie", max_consecutive_auto_reply=0, llm_config=False, human_input_mode="NEVER"), - ] + expected_model_dump = { + "type": "select_speaker_invalid_input", + "content": { + "uuid": uuid, + "agent_names": ["bob", "charlie"], + }, + } + assert actual.model_dump() == expected_model_dump + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [call("Invalid input. Please enter a number between 1 and 2.")] + assert mock.call_args_list == expected_call_args_list - actual = SelectSpeakerInvalidInputMessage(uuid=uuid, agents=agents) # type: ignore [arg-type] - assert isinstance(actual, SelectSpeakerInvalidInputMessage) - expected_model_dump = { - "uuid": uuid, - "agent_names": ["bob", "charlie"], - } - assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [call("Invalid input. Please enter a number between 1 and 2.")] - assert mock.call_args_list == expected_call_args_list +class TestClearConversableAgentHistoryMessage: + def test_print(self, uuid: UUID, recipient: ConversableAgent) -> None: + no_messages_preserved = 5 + actual = ClearConversableAgentHistoryMessage( + uuid=uuid, agent=recipient, no_messages_preserved=no_messages_preserved + ) + assert isinstance(actual, ClearConversableAgentHistoryMessage) -def test_ClearConversableAgentHistory(uuid: UUID, recipient: ConversableAgent) -> None: - no_messages_preserved = 5 + expected_model_dump = { + "type": "clear_conversable_agent_history", + "content": { + "uuid": uuid, + "agent_name": "recipient", + "recipient_name": "recipient", + "no_messages_preserved": no_messages_preserved, + }, + } + assert actual.model_dump() == expected_model_dump - actual = ClearConversableAgentHistoryMessage( - uuid=uuid, agent=recipient, no_messages_preserved=no_messages_preserved - ) - assert isinstance(actual, ClearConversableAgentHistoryMessage) + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [ + call( + "Preserving one more message for recipient to not divide history between tool call and tool response." + ), + call( + "Preserving one more message for recipient to not divide history between tool call and tool response." + ), + call( + "Preserving one more message for recipient to not divide history between tool call and tool response." + ), + call( + "Preserving one more message for recipient to not divide history between tool call and tool response." + ), + call( + "Preserving one more message for recipient to not divide history between tool call and tool response." + ), + ] + assert mock.call_args_list == expected_call_args_list - expected_model_dump = { - "uuid": uuid, - "agent_name": "recipient", - "recipient_name": "recipient", - "no_messages_preserved": no_messages_preserved, - } - assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [ - call("Preserving one more message for recipient to not divide history between tool call and tool response."), - call("Preserving one more message for recipient to not divide history between tool call and tool response."), - call("Preserving one more message for recipient to not divide history between tool call and tool response."), - call("Preserving one more message for recipient to not divide history between tool call and tool response."), - call("Preserving one more message for recipient to not divide history between tool call and tool response."), - ] - assert mock.call_args_list == expected_call_args_list +class TestClearConversableAgentHistoryWarningMessage: + def test_print(self, uuid: UUID, recipient: ConversableAgent) -> None: + actual = ClearConversableAgentHistoryWarningMessage(uuid=uuid, recipient=recipient) + assert isinstance(actual, ClearConversableAgentHistoryWarningMessage) + expected_model_dump = { + "type": "clear_conversable_agent_history_warning", + "content": { + "uuid": uuid, + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump -def test_ClearConversableAgentHistoryWarning(uuid: UUID, recipient: ConversableAgent) -> None: - actual = ClearConversableAgentHistoryWarningMessage(uuid=uuid, recipient=recipient) + mock = MagicMock() + actual.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [ + call( + "\x1b[33mWARNING: `nr_preserved_messages` is ignored when clearing chat history with a specific agent.\x1b[0m", + flush=True, + ) + ] + assert mock.call_args_list == expected_call_args_list - mock = MagicMock() - actual.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [ - call( - "\x1b[33mWARNING: `nr_preserved_messages` is ignored when clearing chat history with a specific agent.\x1b[0m", - flush=True, - ) - ] - assert mock.call_args_list == expected_call_args_list +class TestGenerateCodeExecutionReplyMessage: + @pytest.mark.parametrize( + "code_blocks, expected", + [ + ( + [ + CodeBlock(code="print('hello world')", language="python"), + ], + [call("\x1b[31m\n>>>>>>>> EXECUTING CODE BLOCK (inferred language is python)...\x1b[0m", flush=True)], + ), + ( + [ + CodeBlock(code="print('hello world')", language="python"), + CodeBlock(code="print('goodbye world')", language="python"), + ], + [ + call( + "\x1b[31m\n>>>>>>>> EXECUTING 2 CODE BLOCKS (inferred languages are [python, python])...\x1b[0m", + flush=True, + ) + ], + ), + ], + ) + def test_print( + self, + code_blocks: list[CodeBlock], + expected: list[_Call], + uuid: UUID, + sender: ConversableAgent, + recipient: ConversableAgent, + ) -> None: + actual = GenerateCodeExecutionReplyMessage( + uuid=uuid, code_blocks=code_blocks, sender=sender, recipient=recipient + ) + assert isinstance(actual, GenerateCodeExecutionReplyMessage) -@pytest.mark.parametrize( - "code_blocks, expected", - [ - ( - [ - CodeBlock(code="print('hello world')", language="python"), - ], - [call("\x1b[31m\n>>>>>>>> EXECUTING CODE BLOCK (inferred language is python)...\x1b[0m", flush=True)], - ), - ( - [ - CodeBlock(code="print('hello world')", language="python"), - CodeBlock(code="print('goodbye world')", language="python"), - ], - [ - call( - "\x1b[31m\n>>>>>>>> EXECUTING 2 CODE BLOCKS (inferred languages are [python, python])...\x1b[0m", - flush=True, - ) - ], - ), - ], -) -def test_GenerateCodeExecutionReply( - code_blocks: list[CodeBlock], - expected: list[_Call], - uuid: UUID, - sender: ConversableAgent, - recipient: ConversableAgent, -) -> None: - actual = GenerateCodeExecutionReplyMessage(uuid=uuid, code_blocks=code_blocks, sender=sender, recipient=recipient) - assert isinstance(actual, GenerateCodeExecutionReplyMessage) + expected_model_dump = { + "type": "generate_code_execution_reply", + "content": { + "uuid": uuid, + "code_block_languages": [x.language for x in code_blocks], + "sender_name": "sender", + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump - expected_model_dump = { - "uuid": uuid, - "code_block_languages": [x.language for x in code_blocks], - "sender_name": "sender", - "recipient_name": "recipient", - } - assert actual.model_dump() == expected_model_dump + mock = MagicMock() + actual.print(f=mock) - mock = MagicMock() - actual.print(f=mock) + # print(mock.call_args_list) - # print(mock.call_args_list) + assert mock.call_args_list == expected - assert mock.call_args_list == expected +class TestConversableAgentUsageSummaryMessage: + @pytest.mark.parametrize( + "client, is_client_empty, expected", + [ + (OpenAIWrapper(api_key="dummy api key"), False, [call("Agent 'recipient':")]), + (None, True, [call("No cost incurred from agent 'recipient'.")]), + ], + ) + def test_print( + self, + client: Optional[OpenAIWrapper], + is_client_empty: bool, + expected: list[_Call], + uuid: UUID, + recipient: ConversableAgent, + ) -> None: + actual = ConversableAgentUsageSummaryMessage(uuid=uuid, recipient=recipient, client=client) + assert isinstance(actual, ConversableAgentUsageSummaryMessage) -@pytest.mark.parametrize( - "client, is_client_empty, expected", - [ - (OpenAIWrapper(api_key="dummy api key"), False, [call("Agent 'recipient':")]), - (None, True, [call("No cost incurred from agent 'recipient'.")]), - ], -) -def test_ConversableAgentUsageSummary( - client: Optional[OpenAIWrapper], - is_client_empty: bool, - expected: list[_Call], - uuid: UUID, - recipient: ConversableAgent, -) -> None: - actual = ConversableAgentUsageSummaryMessage(uuid=uuid, recipient=recipient, client=client) - assert isinstance(actual, ConversableAgentUsageSummaryMessage) + expected_model_dump = { + "type": "conversable_agent_usage_summary", + "content": { + "uuid": uuid, + "recipient_name": "recipient", + "is_client_empty": is_client_empty, + }, + } + assert actual.model_dump() == expected_model_dump - expected_model_dump = { - "uuid": uuid, - "recipient_name": "recipient", - "is_client_empty": is_client_empty, - } - assert actual.model_dump() == expected_model_dump + mock = MagicMock() + actual.print(f=mock) - mock = MagicMock() - actual.print(f=mock) + # print(mock.call_args_list) - # print(mock.call_args_list) + assert mock.call_args_list == expected - assert mock.call_args_list == expected +class TestTextMessage: + @pytest.mark.parametrize( + "text, expected", + [ + ("Hello, World!", [call("Hello, World!")]), + ("Over and out!", [call("Over and out!")]), + ], + ) + def test_print(self, text: str, expected: list[_Call], uuid: UUID) -> None: + actual = TextMessage(uuid=uuid, text=text) + assert isinstance(actual, TextMessage) -@pytest.mark.parametrize( - "text, expected", - [ - ("Hello, World!", [call("Hello, World!")]), - ("Over and out!", [call("Over and out!")]), - ], -) -def test_TextMessage(text: str, expected: list[_Call], uuid: UUID) -> None: - actual = TextMessage(uuid=uuid, text=text) - expected_model_dump = {"uuid": uuid, "text": text} - assert isinstance(actual, TextMessage) - assert actual.model_dump() == expected_model_dump + expected_model_dump = {"type": "text", "content": {"uuid": uuid, "text": text}} + assert actual.model_dump() == expected_model_dump - mock = MagicMock() - actual.print(f=mock) + mock = MagicMock() + actual.print(f=mock) - # print(mock.call_args_list) + # print(mock.call_args_list) - assert mock.call_args_list == expected + assert mock.call_args_list == expected From 1653a7264e53d35ca2d19e0462040084c5c47199 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:57:34 +0530 Subject: [PATCH 45/61] Add wrap_message decorator to client_messages and fix tests --- autogen/messages/client_messages.py | 4 +- test/messages/test_client_messages.py | 494 +++++++++++++------------- 2 files changed, 250 insertions(+), 248 deletions(-) diff --git a/autogen/messages/client_messages.py b/autogen/messages/client_messages.py index d7e4b02684..cdd1ecba7e 100644 --- a/autogen/messages/client_messages.py +++ b/autogen/messages/client_messages.py @@ -7,7 +7,7 @@ from pydantic import BaseModel -from .base_message import BaseMessage +from .base_message import BaseMessage, wrap_message __all__ = ["UsageSummaryMessage"] @@ -56,6 +56,7 @@ def _change_usage_summary_format( return summary +@wrap_message class UsageSummaryMessage(BaseMessage): actual: ActualUsageSummary total: TotalUsageSummary @@ -124,6 +125,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f("-" * 100, flush=True) +@wrap_message class StreamMessage(BaseMessage): content: str diff --git a/test/messages/test_client_messages.py b/test/messages/test_client_messages.py index d328cde7b2..961d5504b2 100644 --- a/test/messages/test_client_messages.py +++ b/test/messages/test_client_messages.py @@ -79,270 +79,270 @@ def test__change_usage_summary_format( assert summary_dict == expected -@pytest.mark.parametrize( - "actual_usage_summary, total_usage_summary", - [ - ( - { - "gpt-4o-mini-2024-07-18": { - "completion_tokens": 25, - "cost": 4.23e-05, - "prompt_tokens": 182, - "total_tokens": 207, +class TestUsageSummaryMessage: + @pytest.mark.parametrize( + "actual_usage_summary, total_usage_summary", + [ + ( + { + "gpt-4o-mini-2024-07-18": { + "completion_tokens": 25, + "cost": 4.23e-05, + "prompt_tokens": 182, + "total_tokens": 207, + }, + "total_cost": 4.23e-05, }, - "total_cost": 4.23e-05, - }, - { - "gpt-4o-mini-2024-07-18": { - "completion_tokens": 25, - "cost": 4.23e-05, - "prompt_tokens": 182, - "total_tokens": 207, + { + "gpt-4o-mini-2024-07-18": { + "completion_tokens": 25, + "cost": 4.23e-05, + "prompt_tokens": 182, + "total_tokens": 207, + }, + "total_cost": 4.23e-05, }, - "total_cost": 4.23e-05, - }, - ), - ], -) -def test_usage_summary_print_same_actual_and_total( - actual_usage_summary: Optional[dict[str, Any]], - total_usage_summary: Optional[dict[str, Any]], - uuid: UUID, -) -> None: - actual = UsageSummaryMessage( - uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" + ), + ], ) - - assert isinstance(actual, UsageSummaryMessage) - assert isinstance(actual.actual, ActualUsageSummary) - assert isinstance(actual.total, TotalUsageSummary) - assert actual.mode == "both" - - expected_model_dump = { - "uuid": uuid, - "actual": { - "usages": [ - { - "model": "gpt-4o-mini-2024-07-18", - "completion_tokens": 25, - "cost": 4.23e-05, - "prompt_tokens": 182, - "total_tokens": 207, - } - ], - "total_cost": 4.23e-05, - }, - "total": { - "usages": [ - { - "model": "gpt-4o-mini-2024-07-18", - "completion_tokens": 25, - "cost": 4.23e-05, - "prompt_tokens": 182, - "total_tokens": 207, - } - ], - "total_cost": 4.23e-05, - }, - "mode": "both", - } - assert actual.model_dump() == expected_model_dump - - mock = MagicMock() - actual.print(f=mock) - - # print(mock.call_args_list) - - expected_call_args_list = [ - call( - "----------------------------------------------------------------------------------------------------", - flush=True, - ), - call("Usage summary excluding cached usage: ", flush=True), - call("Total cost: 4e-05", flush=True), - call( - "* Model 'gpt-4o-mini-2024-07-18': cost: 4e-05, prompt_tokens: 182, completion_tokens: 25, total_tokens: 207", - flush=True, - ), - call(), - call( - "All completions are non-cached: the total cost with cached completions is the same as actual cost.", - flush=True, - ), - call( - "----------------------------------------------------------------------------------------------------", - flush=True, - ), - ] - - assert mock.call_args_list == expected_call_args_list - - -@pytest.mark.parametrize( - "actual_usage_summary, total_usage_summary", - [ - ( - { - "gpt-4o-mini-2024-07-18": { - "completion_tokens": 25, - "cost": 4.23e-05, - "prompt_tokens": 182, - "total_tokens": 207, + def test_usage_summary_print_same_actual_and_total( + self, + actual_usage_summary: Optional[dict[str, Any]], + total_usage_summary: Optional[dict[str, Any]], + uuid: UUID, + ) -> None: + actual = UsageSummaryMessage( + uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" + ) + assert isinstance(actual, UsageSummaryMessage) + + expected_model_dump = { + "type": "usage_summary", + "content": { + "uuid": uuid, + "actual": { + "usages": [ + { + "model": "gpt-4o-mini-2024-07-18", + "completion_tokens": 25, + "cost": 4.23e-05, + "prompt_tokens": 182, + "total_tokens": 207, + } + ], + "total_cost": 4.23e-05, }, - "total_cost": 4.23e-05, - }, - { - "gpt-4o-mini-2024-07-18": { - "completion_tokens": 25 * 40, - "cost": 4.23e-05 * 40, - "prompt_tokens": 182 * 40, - "total_tokens": 207 * 40, + "total": { + "usages": [ + { + "model": "gpt-4o-mini-2024-07-18", + "completion_tokens": 25, + "cost": 4.23e-05, + "prompt_tokens": 182, + "total_tokens": 207, + } + ], + "total_cost": 4.23e-05, }, - "total_cost": 4.23e-05 * 40, + "mode": "both", }, - ), - ], -) -def test_usage_summary_print_different_actual_and_total( - actual_usage_summary: Optional[dict[str, Any]], - total_usage_summary: Optional[dict[str, Any]], - uuid: UUID, -) -> None: - actual = UsageSummaryMessage( - uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" - ) - - assert isinstance(actual, UsageSummaryMessage) - assert isinstance(actual.actual, ActualUsageSummary) - assert isinstance(actual.total, TotalUsageSummary) - assert actual.mode == "both" - assert isinstance(actual.actual.usages, list) - assert len(actual.actual.usages) == 1 - assert isinstance(actual.actual.usages[0], ModelUsageSummary) - - expected_model_dump = { - "uuid": uuid, - "actual": { - "usages": [ + } + assert actual.model_dump() == expected_model_dump + + mock = MagicMock() + actual.print(f=mock) + + # print(mock.call_args_list) + + expected_call_args_list = [ + call( + "----------------------------------------------------------------------------------------------------", + flush=True, + ), + call("Usage summary excluding cached usage: ", flush=True), + call("Total cost: 4e-05", flush=True), + call( + "* Model 'gpt-4o-mini-2024-07-18': cost: 4e-05, prompt_tokens: 182, completion_tokens: 25, total_tokens: 207", + flush=True, + ), + call(), + call( + "All completions are non-cached: the total cost with cached completions is the same as actual cost.", + flush=True, + ), + call( + "----------------------------------------------------------------------------------------------------", + flush=True, + ), + ] + + assert mock.call_args_list == expected_call_args_list + + @pytest.mark.parametrize( + "actual_usage_summary, total_usage_summary", + [ + ( { - "model": "gpt-4o-mini-2024-07-18", - "completion_tokens": 25, - "cost": 4.23e-05, - "prompt_tokens": 182, - "total_tokens": 207, - } - ], - "total_cost": 4.23e-05, - }, - "total": { - "usages": [ + "gpt-4o-mini-2024-07-18": { + "completion_tokens": 25, + "cost": 4.23e-05, + "prompt_tokens": 182, + "total_tokens": 207, + }, + "total_cost": 4.23e-05, + }, { - "model": "gpt-4o-mini-2024-07-18", - "completion_tokens": 1000, - "cost": 0.001692, - "prompt_tokens": 7280, - "total_tokens": 8280, - } - ], - "total_cost": 0.001692, - }, - "mode": "both", - } - assert actual.model_dump() == expected_model_dump - - mock = MagicMock() - actual.print(f=mock) - - # print(mock.call_args_list) - - expected_call_args_list = [ - call( - "----------------------------------------------------------------------------------------------------", - flush=True, - ), - call("Usage summary excluding cached usage: ", flush=True), - call("Total cost: 4e-05", flush=True), - call( - "* Model 'gpt-4o-mini-2024-07-18': cost: 4e-05, prompt_tokens: 182, completion_tokens: 25, total_tokens: 207", - flush=True, - ), - call(), - call("Usage summary including cached usage: ", flush=True), - call("Total cost: 0.00169", flush=True), - call( - "* Model 'gpt-4o-mini-2024-07-18': cost: 0.00169, prompt_tokens: 7280, completion_tokens: 1000, total_tokens: 8280", - flush=True, - ), - call( - "----------------------------------------------------------------------------------------------------", - flush=True, - ), - ] - - assert mock.call_args_list == expected_call_args_list - - -@pytest.mark.parametrize( - "actual_usage_summary, total_usage_summary", - [ - ( - None, - None, - ), - ], -) -def test_usage_summary_print_none_actual_and_total( - actual_usage_summary: Optional[dict[str, Any]], - total_usage_summary: Optional[dict[str, Any]], - uuid: UUID, -) -> None: - actual = UsageSummaryMessage( - uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" + "gpt-4o-mini-2024-07-18": { + "completion_tokens": 25 * 40, + "cost": 4.23e-05 * 40, + "prompt_tokens": 182 * 40, + "total_tokens": 207 * 40, + }, + "total_cost": 4.23e-05 * 40, + }, + ), + ], ) + def test_usage_summary_print_different_actual_and_total( + self, + actual_usage_summary: Optional[dict[str, Any]], + total_usage_summary: Optional[dict[str, Any]], + uuid: UUID, + ) -> None: + actual = UsageSummaryMessage( + uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" + ) + assert isinstance(actual, UsageSummaryMessage) + + expected_model_dump = { + "type": "usage_summary", + "content": { + "uuid": uuid, + "actual": { + "usages": [ + { + "model": "gpt-4o-mini-2024-07-18", + "completion_tokens": 25, + "cost": 4.23e-05, + "prompt_tokens": 182, + "total_tokens": 207, + } + ], + "total_cost": 4.23e-05, + }, + "total": { + "usages": [ + { + "model": "gpt-4o-mini-2024-07-18", + "completion_tokens": 1000, + "cost": 0.001692, + "prompt_tokens": 7280, + "total_tokens": 8280, + } + ], + "total_cost": 0.001692, + }, + "mode": "both", + }, + } + assert actual.model_dump() == expected_model_dump + + mock = MagicMock() + actual.print(f=mock) + + # print(mock.call_args_list) + + expected_call_args_list = [ + call( + "----------------------------------------------------------------------------------------------------", + flush=True, + ), + call("Usage summary excluding cached usage: ", flush=True), + call("Total cost: 4e-05", flush=True), + call( + "* Model 'gpt-4o-mini-2024-07-18': cost: 4e-05, prompt_tokens: 182, completion_tokens: 25, total_tokens: 207", + flush=True, + ), + call(), + call("Usage summary including cached usage: ", flush=True), + call("Total cost: 0.00169", flush=True), + call( + "* Model 'gpt-4o-mini-2024-07-18': cost: 0.00169, prompt_tokens: 7280, completion_tokens: 1000, total_tokens: 8280", + flush=True, + ), + call( + "----------------------------------------------------------------------------------------------------", + flush=True, + ), + ] + + assert mock.call_args_list == expected_call_args_list + + @pytest.mark.parametrize( + "actual_usage_summary, total_usage_summary", + [ + ( + None, + None, + ), + ], + ) + def test_usage_summary_print_none_actual_and_total( + self, + actual_usage_summary: Optional[dict[str, Any]], + total_usage_summary: Optional[dict[str, Any]], + uuid: UUID, + ) -> None: + actual = UsageSummaryMessage( + uuid=uuid, actual_usage_summary=actual_usage_summary, total_usage_summary=total_usage_summary, mode="both" + ) + assert isinstance(actual, UsageSummaryMessage) + + expected_model_dump = { + "type": "usage_summary", + "content": { + "uuid": uuid, + "actual": {"usages": None, "total_cost": None}, + "total": {"usages": None, "total_cost": None}, + "mode": "both", + }, + } + assert actual.model_dump() == expected_model_dump - assert isinstance(actual, UsageSummaryMessage) - assert isinstance(actual.actual, ActualUsageSummary) - assert isinstance(actual.total, TotalUsageSummary) - assert actual.mode == "both" - - expected_model_dump = { - "uuid": uuid, - "actual": {"usages": None, "total_cost": None}, - "total": {"usages": None, "total_cost": None}, - "mode": "both", - } - assert actual.model_dump() == expected_model_dump - - mock = MagicMock() - actual.print(f=mock) + mock = MagicMock() + actual.print(f=mock) - # print(mock.call_args_list) + # print(mock.call_args_list) - expected_call_args_list = [call('No usage summary. Please call "create" first.', flush=True)] + expected_call_args_list = [call('No usage summary. Please call "create" first.', flush=True)] - assert mock.call_args_list == expected_call_args_list + assert mock.call_args_list == expected_call_args_list -def test_StreamMessage(uuid: UUID) -> None: - content = "random stream chunk content" - stream_message = StreamMessage(uuid=uuid, content=content) - assert isinstance(stream_message, StreamMessage) +class TestStreamMessage: + def test_print(self, uuid: UUID) -> None: + content = "random stream chunk content" + stream_message = StreamMessage(uuid=uuid, content=content) + assert isinstance(stream_message, StreamMessage) - expected_model_dump = { - "uuid": uuid, - "content": content, - } - assert stream_message.model_dump() == expected_model_dump + expected_model_dump = { + "type": "stream", + "content": { + "uuid": uuid, + "content": content, + }, + } + assert stream_message.model_dump() == expected_model_dump - mock = MagicMock() - stream_message.print(f=mock) + mock = MagicMock() + stream_message.print(f=mock) - # print(mock.call_args_list) + # print(mock.call_args_list) - expected_call_args_list = [ - call("\x1b[32m", end=""), - call("random stream chunk content", end="", flush=True), - call("\x1b[0m\n"), - ] + expected_call_args_list = [ + call("\x1b[32m", end=""), + call("random stream chunk content", end="", flush=True), + call("\x1b[0m\n"), + ] - assert mock.call_args_list == expected_call_args_list + assert mock.call_args_list == expected_call_args_list From 1ab3d1ae687f11938109ec6ae41ebadd7e4480f3 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 13:59:00 +0530 Subject: [PATCH 46/61] Fix TestFunctionResponseMessage --- test/messages/test_agent_messages.py | 82 ++++++++++++++++------------ 1 file changed, 46 insertions(+), 36 deletions(-) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 829ff03c1d..a5377a0233 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -122,42 +122,52 @@ def test_print(self, uuid: UUID, sender: ConversableAgent, recipient: Conversabl assert mock.call_args_list == expected_call_args_list -@pytest.mark.parametrize( - "message", - [ - {"name": "get_random_number", "role": "function", "content": "76"}, - {"name": "get_random_number", "role": "function", "content": 2}, - ], -) -def test_function_response( - uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent, message: dict[str, Any] -) -> None: - actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - - assert isinstance(actual, FunctionResponseMessage) - - assert actual.name == "get_random_number" - assert actual.role == "function" - assert actual.content == message["content"] - assert actual.sender_name == "sender" - assert actual.recipient_name == "recipient" - - mock = MagicMock() - actual.print(f=mock) - - # print(mock.call_args_list) - - expected_call_args_list = [ - call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), - call("\x1b[32m***** Response from calling function (get_random_number) *****\x1b[0m", flush=True), - call(message["content"], flush=True), - call("\x1b[32m**************************************************************\x1b[0m", flush=True), - call( - "\n", "--------------------------------------------------------------------------------", flush=True, sep="" - ), - ] - - assert mock.call_args_list == expected_call_args_list +class TestFunctionResponseMessage: + @pytest.mark.parametrize( + "message", + [ + {"name": "get_random_number", "role": "function", "content": "76"}, + {"name": "get_random_number", "role": "function", "content": 2}, + ], + ) + def test_print( + self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent, message: dict[str, Any] + ) -> None: + actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) + assert isinstance(actual, FunctionResponseMessage) + + expected_model_dump = { + "type": "function_response", + "content": { + "name": "get_random_number", + "role": "function", + "content": message["content"], + "sender_name": "sender", + "recipient_name": "recipient", + "uuid": uuid, + }, + } + assert actual.model_dump() == expected_model_dump + + mock = MagicMock() + actual.print(f=mock) + + # print(mock.call_args_list) + + expected_call_args_list = [ + call("\x1b[33msender\x1b[0m (to recipient):\n", flush=True), + call("\x1b[32m***** Response from calling function (get_random_number) *****\x1b[0m", flush=True), + call(message["content"], flush=True), + call("\x1b[32m**************************************************************\x1b[0m", flush=True), + call( + "\n", + "--------------------------------------------------------------------------------", + flush=True, + sep="", + ), + ] + + assert mock.call_args_list == expected_call_args_list class TestFunctionCallMessage: From 13f7bc3ca4cfbbcb34b625d57e326fa2a3fcb95b Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 14:19:44 +0530 Subject: [PATCH 47/61] Update tests for print_message --- autogen/messages/base_message.py | 8 ++++---- autogen/messages/print_message.py | 3 ++- test/messages/test_print_message.py | 27 +++++++++++++++------------ 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/autogen/messages/base_message.py b/autogen/messages/base_message.py index 4be41447b4..161c377939 100644 --- a/autogen/messages/base_message.py +++ b/autogen/messages/base_message.py @@ -58,15 +58,15 @@ class WrapperBase(BaseModel): type: Literal[type_name] = type_name # type: ignore[valid-type] content: message_cls # type: ignore[valid-type] - def __init__(self, **data: Any): + def __init__(self, *args: Any, **data: Any): if set(data.keys()) <= {"type", "content"} and "content" in data: - super().__init__(**data) + super().__init__(*args, **data) else: if "content" in data: content = data.pop("content") - super().__init__(content=message_cls(**data, content=content), **data) + super().__init__(*args, content=message_cls(*args, **data, content=content), **data) else: - super().__init__(content=message_cls(**data), **data) + super().__init__(content=message_cls(*args, **data), **data) def print(self, f: Optional[Callable[..., Any]] = None) -> None: self.content.print(f) # type: ignore[attr-defined] diff --git a/autogen/messages/print_message.py b/autogen/messages/print_message.py index a5442ef4b4..f3a577f146 100644 --- a/autogen/messages/print_message.py +++ b/autogen/messages/print_message.py @@ -7,9 +7,10 @@ from typing import Any, Callable, Optional from uuid import UUID -from .base_message import BaseMessage +from .base_message import BaseMessage, wrap_message +@wrap_message class PrintMessage(BaseMessage): objects: list[str] sep: str diff --git a/test/messages/test_print_message.py b/test/messages/test_print_message.py index 44ff431f5e..af3fce4d1b 100644 --- a/test/messages/test_print_message.py +++ b/test/messages/test_print_message.py @@ -8,17 +8,20 @@ from autogen.messages.print_message import PrintMessage -def test_print_message() -> None: - uuid = uuid4() - print_message = PrintMessage("Hello, World!", "How are you", sep=" ", end="\n", flush=False, uuid=uuid) +class TestPrintMessage: + def test_print(self) -> None: + uuid = uuid4() + print_message = PrintMessage("Hello, World!", "How are you", sep=" ", end="\n", flush=False, uuid=uuid) + assert isinstance(print_message, PrintMessage) - assert isinstance(print_message, PrintMessage) + expected_model_dump = { + "type": "print", + "content": {"uuid": uuid, "objects": ["Hello, World!", "How are you"], "sep": " ", "end": "\n"}, + } + assert print_message.model_dump() == expected_model_dump - expected_model_dump = {"uuid": uuid, "objects": ["Hello, World!", "How are you"], "sep": " ", "end": "\n"} - assert print_message.model_dump() == expected_model_dump - - mock = MagicMock() - print_message.print(f=mock) - # print(mock.call_args_list) - expected_call_args_list = [call("Hello, World!", "How are you", sep=" ", end="\n", flush=True)] - assert mock.call_args_list == expected_call_args_list + mock = MagicMock() + print_message.print(f=mock) + # print(mock.call_args_list) + expected_call_args_list = [call("Hello, World!", "How are you", sep=" ", end="\n", flush=True)] + assert mock.call_args_list == expected_call_args_list From 1565175e7127232c8735851637f262b09d93f87e Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 14:40:07 +0530 Subject: [PATCH 48/61] Fix test_base_message --- test/messages/test_base_message.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/messages/test_base_message.py b/test/messages/test_base_message.py index de51c4d607..7e74a25065 100644 --- a/test/messages/test_base_message.py +++ b/test/messages/test_base_message.py @@ -43,7 +43,7 @@ def test_model_dump_validate(self, TestMessage: Type[BaseModel]) -> None: message = TestMessage(uuid=uuid, sender="sender", receiver="receiver", content="Hello, World!") expected = { - "type": "test_message", + "type": "test", "content": { "uuid": uuid, "sender": "sender", From b0b40ce0d2db49d8b3efd34f2780af86595c7256 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 14:43:02 +0530 Subject: [PATCH 49/61] Fix websocket test --- test/io/test_websockets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index 270912ea8c..40f0bc4df5 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -86,7 +86,7 @@ def on_connect(iostream: IOWebsockets) -> None: ) try: message_dict = json.loads(message) - actual = message_dict["objects"][0] + actual = message_dict["content"]["objects"][0] except json.JSONDecodeError: actual = message assert actual == expected From 735fcba9b78a80761ca664507ca9ebf6e90a0f6f Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 14:59:57 +0530 Subject: [PATCH 50/61] Break speaker attempt into 3 message classes --- autogen/agentchat/groupchat.py | 39 +++++++-- autogen/messages/agent_messages.py | 114 ++++++++++++++++++++------- test/messages/test_agent_messages.py | 96 ++++++++++++++++++++-- 3 files changed, 206 insertions(+), 43 deletions(-) diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py index 1541034a70..425b817f3d 100644 --- a/autogen/agentchat/groupchat.py +++ b/autogen/agentchat/groupchat.py @@ -25,7 +25,9 @@ SelectSpeakerInvalidInputMessage, SelectSpeakerMessage, SelectSpeakerTryCountExceededMessage, - SpeakerAttemptMessage, + SpeakerAttemptFailedMultipleAgentsMessage, + SpeakerAttemptFailedNoAgentsMessage, + SpeakerAttemptSuccessfullMessage, ) from ..oai.client import ModelClient from ..runtime_logging import log_new_agent, logging_enabled @@ -855,14 +857,35 @@ def _validate_speaker_name( # Output the query and requery results if self.select_speaker_auto_verbose: iostream = IOStream.get_default() - iostream.send( - SpeakerAttemptMessage( - mentions=mentions, - attempt=attempt, - attempts_left=attempts_left, - select_speaker_auto_verbose=self.select_speaker_auto_verbose, + no_of_mentions = len(mentions) + if no_of_mentions == 1: + # Success on retry, we have just one name mentioned + iostream.send( + SpeakerAttemptSuccessfullMessage( + mentions=mentions, + attempt=attempt, + attempts_left=attempts_left, + select_speaker_auto_verbose=self.select_speaker_auto_verbose, + ) + ) + elif no_of_mentions == 1: + iostream.send( + SpeakerAttemptFailedMultipleAgentsMessage( + mentions=mentions, + attempt=attempt, + attempts_left=attempts_left, + select_speaker_auto_verbose=self.select_speaker_auto_verbose, + ) + ) + else: + iostream.send( + SpeakerAttemptFailedNoAgentsMessage( + mentions=mentions, + attempt=attempt, + attempts_left=attempts_left, + select_speaker_auto_verbose=self.select_speaker_auto_verbose, + ) ) - ) if len(mentions) == 1: # Success on retry, we have just one name mentioned diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index b7ad711dfe..e20a956857 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -11,8 +11,6 @@ from termcolor import colored from ..code_utils import content_str - -# ToDo: once you move the code below, we can just delete this import from ..oai.client import OpenAIWrapper from .base_message import BaseMessage, wrap_message @@ -29,7 +27,9 @@ "ContentMessage", "PostCarryoverProcessingMessage", "ClearAgentsHistoryMessage", - "SpeakerAttemptMessage", + "SpeakerAttemptSuccessfullMessage", + "SpeakerAttemptFailedMultipleAgentsMessage", + "SpeakerAttemptFailedNoAgentsMessage", "GroupChatResumeMessage", "GroupChatRunChatMessage", "TerminationAndHumanReplyMessage", @@ -367,7 +367,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: # todo: break into multiple messages @wrap_message -class SpeakerAttemptMessage(BaseMessage): +class SpeakerAttemptSuccessfullMessage(BaseMessage): mentions: dict[str, int] attempt: int attempts_left: int @@ -393,32 +393,86 @@ def __init__( def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if len(self.mentions) == 1: - # Success on retry, we have just one name mentioned - selected_agent_name = next(iter(self.mentions)) - f( - colored( - f">>>>>>>> Select speaker attempt {self.attempt} of {self.attempt + self.attempts_left} successfully selected: {selected_agent_name}", - "green", - ), - flush=True, - ) - elif len(self.mentions) > 1: - f( - colored( - f">>>>>>>> Select speaker attempt {self.attempt} of {self.attempt + self.attempts_left} failed as it included multiple agent names.", - "red", - ), - flush=True, - ) - else: - f( - colored( - f">>>>>>>> Select speaker attempt #{self.attempt} failed as it did not include any agent names.", - "red", - ), - flush=True, - ) + selected_agent_name = next(iter(self.mentions)) + f( + colored( + f">>>>>>>> Select speaker attempt {self.attempt} of {self.attempt + self.attempts_left} successfully selected: {selected_agent_name}", + "green", + ), + flush=True, + ) + + +@wrap_message +class SpeakerAttemptFailedMultipleAgentsMessage(BaseMessage): + mentions: dict[str, int] + attempt: int + attempts_left: int + verbose: Optional[bool] = False + + def __init__( + self, + *, + uuid: Optional[UUID] = None, + mentions: dict[str, int], + attempt: int, + attempts_left: int, + select_speaker_auto_verbose: Optional[bool] = False, + ): + super().__init__( + uuid=uuid, + mentions=deepcopy(mentions), + attempt=attempt, + attempts_left=attempts_left, + verbose=select_speaker_auto_verbose, + ) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + f = f or print + + f( + colored( + f">>>>>>>> Select speaker attempt {self.attempt} of {self.attempt + self.attempts_left} failed as it included multiple agent names.", + "red", + ), + flush=True, + ) + + +@wrap_message +class SpeakerAttemptFailedNoAgentsMessage(BaseMessage): + mentions: dict[str, int] + attempt: int + attempts_left: int + verbose: Optional[bool] = False + + def __init__( + self, + *, + uuid: Optional[UUID] = None, + mentions: dict[str, int], + attempt: int, + attempts_left: int, + select_speaker_auto_verbose: Optional[bool] = False, + ): + super().__init__( + uuid=uuid, + mentions=deepcopy(mentions), + attempt=attempt, + attempts_left=attempts_left, + verbose=select_speaker_auto_verbose, + ) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + f = f or print + + f( + colored( + f">>>>>>>> Select speaker attempt #{self.attempt} failed as it did not include any agent names.", + "red", + ), + flush=True, + ) @wrap_message diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index a5377a0233..40377e8051 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -31,7 +31,9 @@ SelectSpeakerInvalidInputMessage, SelectSpeakerMessage, SelectSpeakerTryCountExceededMessage, - SpeakerAttemptMessage, + SpeakerAttemptFailedMultipleAgentsMessage, + SpeakerAttemptFailedNoAgentsMessage, + SpeakerAttemptSuccessfullMessage, TerminationAndHumanReplyMessage, TextMessage, ToolCall, @@ -539,15 +541,99 @@ def test_print( assert mock.call_args_list == expected_call_args_list -class TestSpeakerAttemptMessage: +class TestSpeakerAttemptSuccessfullMessage: @pytest.mark.parametrize( "mentions, expected", [ ({"agent_1": 1}, "\x1b[32m>>>>>>>> Select speaker attempt 1 of 3 successfully selected: agent_1\x1b[0m"), + ], + ) + def test_print(self, mentions: dict[str, int], expected: str, uuid: UUID) -> None: + attempt = 1 + attempts_left = 2 + verbose = True + + actual = SpeakerAttemptSuccessfullMessage( + uuid=uuid, + mentions=mentions, + attempt=attempt, + attempts_left=attempts_left, + select_speaker_auto_verbose=verbose, + ) + assert isinstance(actual, SpeakerAttemptSuccessfullMessage) + + expected_model_dump = { + "type": "speaker_attempt_successfull", + "content": { + "uuid": uuid, + "mentions": mentions, + "attempt": attempt, + "attempts_left": attempts_left, + "verbose": verbose, + }, + } + assert actual.model_dump() == expected_model_dump + + mock = MagicMock() + actual.print(f=mock) + + # print(mock.call_args_list) + + expected_call_args_list = [call(expected, flush=True)] + + assert mock.call_args_list == expected_call_args_list + + +class TestSpeakerAttemptFailedMultipleAgentsMessage: + @pytest.mark.parametrize( + "mentions, expected", + [ ( {"agent_1": 1, "agent_2": 2}, "\x1b[31m>>>>>>>> Select speaker attempt 1 of 3 failed as it included multiple agent names.\x1b[0m", ), + ], + ) + def test_print(self, mentions: dict[str, int], expected: str, uuid: UUID) -> None: + attempt = 1 + attempts_left = 2 + verbose = True + + actual = SpeakerAttemptFailedMultipleAgentsMessage( + uuid=uuid, + mentions=mentions, + attempt=attempt, + attempts_left=attempts_left, + select_speaker_auto_verbose=verbose, + ) + assert isinstance(actual, SpeakerAttemptFailedMultipleAgentsMessage) + + expected_model_dump = { + "type": "speaker_attempt_failed_multiple_agents", + "content": { + "uuid": uuid, + "mentions": mentions, + "attempt": attempt, + "attempts_left": attempts_left, + "verbose": verbose, + }, + } + assert actual.model_dump() == expected_model_dump + + mock = MagicMock() + actual.print(f=mock) + + # print(mock.call_args_list) + + expected_call_args_list = [call(expected, flush=True)] + + assert mock.call_args_list == expected_call_args_list + + +class TestSpeakerAttemptFailedNoAgentsMessage: + @pytest.mark.parametrize( + "mentions, expected", + [ ({}, "\x1b[31m>>>>>>>> Select speaker attempt #1 failed as it did not include any agent names.\x1b[0m"), ], ) @@ -556,17 +642,17 @@ def test_print(self, mentions: dict[str, int], expected: str, uuid: UUID) -> Non attempts_left = 2 verbose = True - actual = SpeakerAttemptMessage( + actual = SpeakerAttemptFailedNoAgentsMessage( uuid=uuid, mentions=mentions, attempt=attempt, attempts_left=attempts_left, select_speaker_auto_verbose=verbose, ) - assert isinstance(actual, SpeakerAttemptMessage) + assert isinstance(actual, SpeakerAttemptFailedNoAgentsMessage) expected_model_dump = { - "type": "speaker_attempt", + "type": "speaker_attempt_failed_no_agents", "content": { "uuid": uuid, "mentions": mentions, From 3b6aafdbf42e4bbd7a97f08e55e7b068baf1eee1 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 15:09:14 +0530 Subject: [PATCH 51/61] Break ConversableAgentUsageSummaryMessage into 2 classes --- autogen/agentchat/conversable_agent.py | 6 +++- autogen/messages/agent_messages.py | 24 ++++++++++---- test/messages/test_agent_messages.py | 44 ++++++++++++++++++-------- 3 files changed, 52 insertions(+), 22 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 65ae67ff1e..758353d140 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -45,6 +45,7 @@ ClearConversableAgentHistoryMessage, ClearConversableAgentHistoryWarningMessage, ConversableAgentUsageSummaryMessage, + ConversableAgentUsageSummaryNoCostIncurredMessage, ExecuteCodeBlockMessage, ExecutedFunctionMessage, ExecuteFunctionMessage, @@ -2876,7 +2877,10 @@ def process_last_received_message(self, messages: list[dict]) -> list[dict]: def print_usage_summary(self, mode: Union[str, list[str]] = ["actual", "total"]) -> None: """Print the usage summary.""" iostream = IOStream.get_default() - iostream.send(ConversableAgentUsageSummaryMessage(recipient=self, client=self.client)) + if self.client is None: + iostream.send(ConversableAgentUsageSummaryNoCostIncurredMessage(recipient=self)) + else: + iostream.send(ConversableAgentUsageSummaryMessage(recipient=self)) if self.client is not None: self.client.print_usage_summary(mode) diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index e20a956857..df0d6cac04 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -39,6 +39,7 @@ "ClearConversableAgentHistoryMessage", "GenerateCodeExecutionReplyMessage", "ConversableAgentUsageSummaryMessage", + "ConversableAgentUsageSummaryNoCostIncurredMessage", "TextMessage", ] @@ -803,21 +804,30 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: ) +@wrap_message +class ConversableAgentUsageSummaryNoCostIncurredMessage(BaseMessage): + recipient_name: str + + def __init__(self, *, uuid: Optional[UUID] = None, recipient: "Agent"): + super().__init__(uuid=uuid, recipient_name=recipient.name) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + f = f or print + + f(f"No cost incurred from agent '{self.recipient_name}'.") + + @wrap_message class ConversableAgentUsageSummaryMessage(BaseMessage): recipient_name: str - is_client_empty: bool - def __init__(self, *, uuid: Optional[UUID] = None, recipient: "Agent", client: Optional[Any] = None): - super().__init__(uuid=uuid, recipient_name=recipient.name, is_client_empty=True if client is None else False) + def __init__(self, *, uuid: Optional[UUID] = None, recipient: "Agent"): + super().__init__(uuid=uuid, recipient_name=recipient.name) def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print - if self.is_client_empty: - f(f"No cost incurred from agent '{self.recipient_name}'.") - else: - f(f"Agent '{self.recipient_name}':") + f(f"Agent '{self.recipient_name}':") @wrap_message diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 40377e8051..55df113f50 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -17,6 +17,7 @@ ClearConversableAgentHistoryWarningMessage, ContentMessage, ConversableAgentUsageSummaryMessage, + ConversableAgentUsageSummaryNoCostIncurredMessage, ExecuteCodeBlockMessage, ExecutedFunctionMessage, ExecuteFunctionMessage, @@ -1115,23 +1116,39 @@ def test_print( assert mock.call_args_list == expected +class TestConversableAgentUsageSummaryNoCostIncurredMessage: + def test_print( + self, + uuid: UUID, + recipient: ConversableAgent, + ) -> None: + actual = ConversableAgentUsageSummaryNoCostIncurredMessage(uuid=uuid, recipient=recipient) + assert isinstance(actual, ConversableAgentUsageSummaryNoCostIncurredMessage) + + expected_model_dump = { + "type": "conversable_agent_usage_summary_no_cost_incurred", + "content": { + "uuid": uuid, + "recipient_name": "recipient", + }, + } + assert actual.model_dump() == expected_model_dump + + mock = MagicMock() + actual.print(f=mock) + + # print(mock.call_args_list) + expected_call_args_list = [call("No cost incurred from agent 'recipient'.")] + assert mock.call_args_list == expected_call_args_list + + class TestConversableAgentUsageSummaryMessage: - @pytest.mark.parametrize( - "client, is_client_empty, expected", - [ - (OpenAIWrapper(api_key="dummy api key"), False, [call("Agent 'recipient':")]), - (None, True, [call("No cost incurred from agent 'recipient'.")]), - ], - ) def test_print( self, - client: Optional[OpenAIWrapper], - is_client_empty: bool, - expected: list[_Call], uuid: UUID, recipient: ConversableAgent, ) -> None: - actual = ConversableAgentUsageSummaryMessage(uuid=uuid, recipient=recipient, client=client) + actual = ConversableAgentUsageSummaryMessage(uuid=uuid, recipient=recipient) assert isinstance(actual, ConversableAgentUsageSummaryMessage) expected_model_dump = { @@ -1139,7 +1156,6 @@ def test_print( "content": { "uuid": uuid, "recipient_name": "recipient", - "is_client_empty": is_client_empty, }, } assert actual.model_dump() == expected_model_dump @@ -1148,8 +1164,8 @@ def test_print( actual.print(f=mock) # print(mock.call_args_list) - - assert mock.call_args_list == expected + expected_call_args_list = [call("Agent 'recipient':")] + assert mock.call_args_list == expected_call_args_list class TestTextMessage: From a0e0cb6bf4a2bf2b8a27f705b348f0e73421543d Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Tue, 7 Jan 2025 15:56:21 +0100 Subject: [PATCH 52/61] polishing and test fix --- autogen/messages/agent_messages.py | 13 ++++---- test/io/test_websockets.py | 50 ++++++++++++++++++---------- test/messages/test_agent_messages.py | 10 +++--- 3 files changed, 44 insertions(+), 29 deletions(-) diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index df0d6cac04..a884584601 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -24,7 +24,7 @@ "ToolResponseMessage", "FunctionCallMessage", "ToolCallMessage", - "ContentMessage", + "TextMessage", "PostCarryoverProcessingMessage", "ClearAgentsHistoryMessage", "SpeakerAttemptSuccessfullMessage", @@ -40,7 +40,7 @@ "GenerateCodeExecutionReplyMessage", "ConversableAgentUsageSummaryMessage", "ConversableAgentUsageSummaryNoCostIncurredMessage", - "TextMessage", + "MoveToTestTextMessage", ] MessageRole = Literal["assistant", "function", "tool"] @@ -189,7 +189,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message -class ContentMessage(BasePrintReceivedMessage): +class TextMessage(BasePrintReceivedMessage): content: Optional[Union[str, int, float, bool]] = None # type: ignore [assignment] def print(self, f: Optional[Callable[..., Any]] = None) -> None: @@ -204,7 +204,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: def create_received_message_model( *, uuid: Optional[UUID] = None, message: dict[str, Any], sender: "Agent", recipient: "Agent" -) -> BasePrintReceivedMessage: +) -> Union[FunctionResponseMessage, ToolResponseMessage, FunctionCallMessage, ToolCallMessage, TextMessage]: # print(f"{message=}") # print(f"{sender=}") @@ -244,7 +244,7 @@ def create_received_message_model( allow_format_str_template, ) - return ContentMessage( + return TextMessage( content=content, sender_name=sender.name, recipient_name=recipient.name, @@ -830,8 +830,9 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f(f"Agent '{self.recipient_name}':") +# todo: move to test @wrap_message -class TextMessage(BaseMessage): +class MoveToTestTextMessage(BaseMessage): text: str def __init__(self, *, uuid: Optional[UUID] = None, text: str): diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index 40f0bc4df5..a2e86d0d99 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -5,8 +5,10 @@ # Portions derived from https://github.com/microsoft/autogen are under the MIT License. # SPDX-License-Identifier: MIT import json +from pprint import pprint from tempfile import TemporaryDirectory from typing import Dict +from unittest.mock import MagicMock import pytest from websockets.exceptions import ConnectionClosed @@ -15,7 +17,7 @@ from autogen.cache.cache import Cache from autogen.io import IOWebsockets from autogen.io.base import IOStream -from autogen.messages.agent_messages import TextMessage +from autogen.messages.agent_messages import MoveToTestTextMessage from ..conftest import skip_openai @@ -51,7 +53,7 @@ def on_connect(iostream: IOWebsockets) -> None: for msg in ["Hello, World!", "Over and out!"]: print(f" - on_connect(): Sending message '{msg}' to client.", flush=True) - text_message = TextMessage(text=msg) + text_message = MoveToTestTextMessage(text=msg) text_message.print(iostream.print) print(" - on_connect(): Receiving message from client.", flush=True) @@ -100,9 +102,9 @@ def on_connect(iostream: IOWebsockets) -> None: def test_chat(self) -> None: print("Testing setup", flush=True) - success_dict = {"success": False} + mock = MagicMock() - def on_connect(iostream: IOWebsockets, success_dict: dict[str, bool] = success_dict) -> None: + def on_connect(iostream: IOWebsockets) -> None: print(f" - on_connect(): Connected to client using IOWebsockets {iostream}", flush=True) print(" - on_connect(): Receiving message from client.", flush=True) @@ -122,7 +124,7 @@ def on_connect(iostream: IOWebsockets, success_dict: dict[str, bool] = success_d llm_config = { "config_list": config_list, - "stream": True, + # "stream": True, } agent = autogen.ConversableAgent( @@ -148,13 +150,21 @@ def on_connect(iostream: IOWebsockets, success_dict: dict[str, bool] = success_d f" - on_connect(): Initiating chat with agent {agent} using message '{initial_msg}'", flush=True, ) - user_proxy.initiate_chat( # noqa: F704 - agent, - message=initial_msg, - cache=cache, - ) + try: + user_proxy.initiate_chat( # noqa: F704 + agent, + message=initial_msg, + cache=cache, + ) + except Exception as e: + print(f" - on_connect(): Exception {e} raised during chat.", flush=True) + import traceback - success_dict["success"] = True + print(traceback.format_exc()) + raise e + + print(" - on_connect(): Chat completed with success.", flush=True) + mock("Success") return @@ -172,18 +182,22 @@ def on_connect(iostream: IOWebsockets, success_dict: dict[str, bool] = success_d try: message = websocket.recv() message = message.decode("utf-8") if isinstance(message, bytes) else message + message_dict = json.loads(message) # drop the newline character - if message.endswith("\n"): - message = message[:-1] + # if message.endswith("\n"): + # message = message[:-1] - print(message, end="", flush=True) + print("*" * 80) + print("Received message:") + pprint(message_dict) + print() - if "TERMINATE" in message: - print() - print(" - Received TERMINATE message.", flush=True) + # if "TERMINATE" in message: + # print() + # print(" - Received TERMINATE message.", flush=True) except ConnectionClosed as e: print("Connection closed:", e, flush=True) break - assert success_dict["success"] + mock.assert_called_once_with("Success") print("Test passed.", flush=True) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 55df113f50..d69df2db8f 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -15,7 +15,6 @@ ClearAgentsHistoryMessage, ClearConversableAgentHistoryMessage, ClearConversableAgentHistoryWarningMessage, - ContentMessage, ConversableAgentUsageSummaryMessage, ConversableAgentUsageSummaryNoCostIncurredMessage, ExecuteCodeBlockMessage, @@ -28,6 +27,7 @@ GroupChatResumeMessage, GroupChatRunChatMessage, MessageRole, + MoveToTestTextMessage, PostCarryoverProcessingMessage, SelectSpeakerInvalidInputMessage, SelectSpeakerMessage, @@ -311,7 +311,7 @@ def test_print_context_message(self, uuid: UUID, sender: ConversableAgent, recip actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - assert isinstance(actual, ContentMessage) + assert isinstance(actual, TextMessage) expected_model_dump = { "type": "content", "content": { @@ -353,7 +353,7 @@ def test_print_context_lambda_message( actual = create_received_message_model(uuid=uuid, message=message, sender=sender, recipient=recipient) - assert isinstance(actual, ContentMessage) + assert isinstance(actual, TextMessage) expected_model_dump = { "type": "content", "content": { @@ -1177,8 +1177,8 @@ class TestTextMessage: ], ) def test_print(self, text: str, expected: list[_Call], uuid: UUID) -> None: - actual = TextMessage(uuid=uuid, text=text) - assert isinstance(actual, TextMessage) + actual = MoveToTestTextMessage(uuid=uuid, text=text) + assert isinstance(actual, MoveToTestTextMessage) expected_model_dump = {"type": "text", "content": {"uuid": uuid, "text": text}} assert actual.model_dump() == expected_model_dump From 94a8ff953cda1008f3da00eac203fa3fefa7bc92 Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 20:45:29 +0530 Subject: [PATCH 53/61] Fix IOStream test in notebook --- notebook/agentchat_websockets.ipynb | 110 ++++++++++++++-------------- 1 file changed, 57 insertions(+), 53 deletions(-) diff --git a/notebook/agentchat_websockets.ipynb b/notebook/agentchat_websockets.ipynb index 0f6a6cd75e..e82898d0a2 100644 --- a/notebook/agentchat_websockets.ipynb +++ b/notebook/agentchat_websockets.ipynb @@ -63,7 +63,7 @@ "config_list = autogen.config_list_from_json(\n", " env_or_file=\"OAI_CONFIG_LIST\",\n", " filter_dict={\n", - " \"model\": [\"gpt-4\", \"gpt-3.5-turbo\", \"gpt-3.5-turbo-16k\"],\n", + " \"model\": [\"gpt-4o\", \"gpt-4o-mini\"],\n", " },\n", ")" ] @@ -97,7 +97,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "id": "9fb85afb", "metadata": {}, "outputs": [], @@ -109,49 +109,49 @@ "\n", " # 1. Receive Initial Message\n", " initial_msg = iostream.input()\n", - "\n", - " # 2. Instantiate ConversableAgent\n", - " agent = autogen.ConversableAgent(\n", - " name=\"chatbot\",\n", - " system_message=\"Complete a task given to you and reply TERMINATE when the task is done. If asked about the weather, use tool 'weather_forecast(city)' to get the weather forecast for a city.\",\n", - " llm_config={\n", - " \"config_list\": autogen.config_list_from_json(\n", - " env_or_file=\"OAI_CONFIG_LIST\",\n", - " filter_dict={\n", - " \"model\": [\"gpt-4o\"],\n", - " },\n", - " ),\n", - " \"stream\": True,\n", - " },\n", - " )\n", - "\n", - " # 3. Define UserProxyAgent\n", - " user_proxy = autogen.UserProxyAgent(\n", - " name=\"user_proxy\",\n", - " system_message=\"A proxy for the user.\",\n", - " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", - " human_input_mode=\"NEVER\",\n", - " max_consecutive_auto_reply=10,\n", - " code_execution_config=False,\n", - " )\n", - "\n", - " # 4. Define Agent-specific Functions\n", - " def weather_forecast(city: str) -> str:\n", - " return f\"The weather forecast for {city} at {datetime.now()} is sunny.\"\n", - "\n", - " autogen.register_function(\n", - " weather_forecast, caller=agent, executor=user_proxy, description=\"Weather forecast for a city\"\n", - " )\n", - "\n", - " # 5. Initiate conversation\n", - " print(\n", - " f\" - on_connect(): Initiating chat with agent {agent} using message '{initial_msg}'\",\n", - " flush=True,\n", - " )\n", - " user_proxy.initiate_chat( # noqa: F704\n", - " agent,\n", - " message=initial_msg,\n", - " )" + " print(f\"{initial_msg=}\")\n", + "\n", + " try:\n", + " # 2. Instantiate ConversableAgent\n", + " agent = autogen.ConversableAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"Complete a task given to you and reply TERMINATE when the task is done. If asked about the weather, use tool 'weather_forecast(city)' to get the weather forecast for a city.\",\n", + " llm_config={\n", + " \"config_list\": config_list,\n", + " # \"stream\": False,\n", + " },\n", + " )\n", + "\n", + " # 3. Define UserProxyAgent\n", + " user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " system_message=\"A proxy for the user.\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + " code_execution_config=False,\n", + " )\n", + "\n", + " # 4. Define Agent-specific Functions\n", + " def weather_forecast(city: str) -> str:\n", + " return f\"The weather forecast for {city} at {datetime.now()} is sunny.\"\n", + "\n", + " autogen.register_function(\n", + " weather_forecast, caller=agent, executor=user_proxy, description=\"Weather forecast for a city\"\n", + " )\n", + "\n", + " # 5. Initiate conversation\n", + " print(\n", + " f\" - on_connect(): Initiating chat with agent {agent} using message '{initial_msg}'\",\n", + " flush=True,\n", + " )\n", + " user_proxy.initiate_chat( # noqa: F704\n", + " agent,\n", + " message=initial_msg,\n", + " )\n", + " except Exception as e:\n", + " print(f\" - on_connect(): Exception: {e}\", flush=True)\n", + " raise e" ] }, { @@ -212,14 +212,18 @@ " websocket.send(\"Check out the weather in Paris and write a poem about it.\")\n", "\n", " while True:\n", - " message = websocket.recv()\n", - " message = message.decode(\"utf-8\") if isinstance(message, bytes) else message\n", + " try:\n", + " message = websocket.recv()\n", + " message = message.decode(\"utf-8\") if isinstance(message, bytes) else message\n", "\n", - " print(message, end=\"\", flush=True)\n", + " print(message)\n", "\n", - " if \"TERMINATE\" in message:\n", - " print()\n", - " print(\" - Received TERMINATE message. Exiting.\", flush=True)\n", + " # if \"TERMINATE\" in message:\n", + " # print()\n", + " # print(\" - Received TERMINATE message. Exiting.\", flush=True)\n", + " # break\n", + " except Exception as e:\n", + " print(\"Connection closed:\", e, flush=True)\n", " break" ] }, @@ -461,7 +465,7 @@ ] }, "kernelspec": { - "display_name": ".venv-3.9", + "display_name": "venv", "language": "python", "name": "python3" }, @@ -475,7 +479,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.20" + "version": "3.10.12" } }, "nbformat": 4, From 05e3584831b0b1950d6ac83cd9d9c9e57ce00f7f Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 20:52:13 +0530 Subject: [PATCH 54/61] Do not use content as parameter in BaseMessage --- autogen/messages/client_messages.py | 8 ++++---- autogen/oai/client.py | 2 +- test/messages/test_client_messages.py | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/autogen/messages/client_messages.py b/autogen/messages/client_messages.py index cdd1ecba7e..63c31b1090 100644 --- a/autogen/messages/client_messages.py +++ b/autogen/messages/client_messages.py @@ -127,10 +127,10 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: @wrap_message class StreamMessage(BaseMessage): - content: str + chunk_content: str - def __init__(self, *, uuid: Optional[UUID] = None, content: str) -> None: - super().__init__(uuid=uuid, content=content) + def __init__(self, *, uuid: Optional[UUID] = None, chunk_content: str) -> None: + super().__init__(uuid=uuid, chunk_content=chunk_content) def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print @@ -138,7 +138,7 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: # Set the terminal text color to green f("\033[32m", end="") - f(self.content, end="", flush=True) + f(self.chunk_content, end="", flush=True) # Reset the terminal text color f("\033[0m\n") diff --git a/autogen/oai/client.py b/autogen/oai/client.py index bfb914b115..1cb8dc4415 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -362,7 +362,7 @@ def _create_or_parse(*args, **kwargs): # If content is present, print it to the terminal and update response variables if content is not None: - iostream.send(StreamMessage(content=content)) + iostream.send(StreamMessage(chunk_content=content)) response_contents[choice.index] += content completion_tokens += 1 else: diff --git a/test/messages/test_client_messages.py b/test/messages/test_client_messages.py index 961d5504b2..78124ffab6 100644 --- a/test/messages/test_client_messages.py +++ b/test/messages/test_client_messages.py @@ -321,15 +321,15 @@ def test_usage_summary_print_none_actual_and_total( class TestStreamMessage: def test_print(self, uuid: UUID) -> None: - content = "random stream chunk content" - stream_message = StreamMessage(uuid=uuid, content=content) + chunk_content = "random stream chunk content" + stream_message = StreamMessage(uuid=uuid, chunk_content=chunk_content) assert isinstance(stream_message, StreamMessage) expected_model_dump = { "type": "stream", "content": { "uuid": uuid, - "content": content, + "chunk_content": chunk_content, }, } assert stream_message.model_dump() == expected_model_dump From 2d52026b3f4da37612311dea5289d17f0c3befca Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 20:59:09 +0530 Subject: [PATCH 55/61] Use stream=True in websockets tests --- notebook/agentchat_websockets.ipynb | 2 +- test/io/test_websockets.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/notebook/agentchat_websockets.ipynb b/notebook/agentchat_websockets.ipynb index e82898d0a2..b3f7e87879 100644 --- a/notebook/agentchat_websockets.ipynb +++ b/notebook/agentchat_websockets.ipynb @@ -118,7 +118,7 @@ " system_message=\"Complete a task given to you and reply TERMINATE when the task is done. If asked about the weather, use tool 'weather_forecast(city)' to get the weather forecast for a city.\",\n", " llm_config={\n", " \"config_list\": config_list,\n", - " # \"stream\": False,\n", + " \"stream\": True,\n", " },\n", " )\n", "\n", diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index a2e86d0d99..cabfa7d957 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -124,7 +124,7 @@ def on_connect(iostream: IOWebsockets) -> None: llm_config = { "config_list": config_list, - # "stream": True, + "stream": True, } agent = autogen.ConversableAgent( From d08bf4fea6fddb9db3ab758e952e0e1d43c43bdb Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 21:03:11 +0530 Subject: [PATCH 56/61] Move TestTextMessage to tests --- autogen/messages/agent_messages.py | 15 --------------- test/io/test_websockets.py | 20 +++++++++++++++++--- test/messages/test_agent_messages.py | 24 ------------------------ 3 files changed, 17 insertions(+), 42 deletions(-) diff --git a/autogen/messages/agent_messages.py b/autogen/messages/agent_messages.py index a884584601..98d1aae147 100644 --- a/autogen/messages/agent_messages.py +++ b/autogen/messages/agent_messages.py @@ -40,7 +40,6 @@ "GenerateCodeExecutionReplyMessage", "ConversableAgentUsageSummaryMessage", "ConversableAgentUsageSummaryNoCostIncurredMessage", - "MoveToTestTextMessage", ] MessageRole = Literal["assistant", "function", "tool"] @@ -828,17 +827,3 @@ def print(self, f: Optional[Callable[..., Any]] = None) -> None: f = f or print f(f"Agent '{self.recipient_name}':") - - -# todo: move to test -@wrap_message -class MoveToTestTextMessage(BaseMessage): - text: str - - def __init__(self, *, uuid: Optional[UUID] = None, text: str): - super().__init__(uuid=uuid, text=text) - - def print(self, f: Optional[Callable[..., Any]] = None) -> None: - f = f or print - - f(self.text) diff --git a/test/io/test_websockets.py b/test/io/test_websockets.py index cabfa7d957..1f5b1c1eb1 100644 --- a/test/io/test_websockets.py +++ b/test/io/test_websockets.py @@ -7,8 +7,9 @@ import json from pprint import pprint from tempfile import TemporaryDirectory -from typing import Dict +from typing import Any, Callable, Dict, Optional from unittest.mock import MagicMock +from uuid import UUID import pytest from websockets.exceptions import ConnectionClosed @@ -17,7 +18,7 @@ from autogen.cache.cache import Cache from autogen.io import IOWebsockets from autogen.io.base import IOStream -from autogen.messages.agent_messages import MoveToTestTextMessage +from autogen.messages.base_message import BaseMessage, wrap_message from ..conftest import skip_openai @@ -33,6 +34,19 @@ skip_test = False +@wrap_message +class TestTextMessage(BaseMessage): + text: str + + def __init__(self, *, uuid: Optional[UUID] = None, text: str): + super().__init__(uuid=uuid, text=text) + + def print(self, f: Optional[Callable[..., Any]] = None) -> None: + f = f or print + + f(self.text) + + @pytest.mark.skipif(skip_test, reason="websockets module is not available") class TestConsoleIOWithWebsockets: def test_input_print(self) -> None: @@ -53,7 +67,7 @@ def on_connect(iostream: IOWebsockets) -> None: for msg in ["Hello, World!", "Over and out!"]: print(f" - on_connect(): Sending message '{msg}' to client.", flush=True) - text_message = MoveToTestTextMessage(text=msg) + text_message = TestTextMessage(text=msg) text_message.print(iostream.print) print(" - on_connect(): Receiving message from client.", flush=True) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index d69df2db8f..1daaa20756 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -27,7 +27,6 @@ GroupChatResumeMessage, GroupChatRunChatMessage, MessageRole, - MoveToTestTextMessage, PostCarryoverProcessingMessage, SelectSpeakerInvalidInputMessage, SelectSpeakerMessage, @@ -1166,26 +1165,3 @@ def test_print( # print(mock.call_args_list) expected_call_args_list = [call("Agent 'recipient':")] assert mock.call_args_list == expected_call_args_list - - -class TestTextMessage: - @pytest.mark.parametrize( - "text, expected", - [ - ("Hello, World!", [call("Hello, World!")]), - ("Over and out!", [call("Over and out!")]), - ], - ) - def test_print(self, text: str, expected: list[_Call], uuid: UUID) -> None: - actual = MoveToTestTextMessage(uuid=uuid, text=text) - assert isinstance(actual, MoveToTestTextMessage) - - expected_model_dump = {"type": "text", "content": {"uuid": uuid, "text": text}} - assert actual.model_dump() == expected_model_dump - - mock = MagicMock() - actual.print(f=mock) - - # print(mock.call_args_list) - - assert mock.call_args_list == expected From 2062357f926c656fd6d0437d82d9a1d8d53e834b Mon Sep 17 00:00:00 2001 From: Kumaran Rajendhiran Date: Tue, 7 Jan 2025 21:05:26 +0530 Subject: [PATCH 57/61] Fix tests failing because of renaming --- test/messages/test_agent_messages.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/messages/test_agent_messages.py b/test/messages/test_agent_messages.py index 1daaa20756..8b72bc587d 100644 --- a/test/messages/test_agent_messages.py +++ b/test/messages/test_agent_messages.py @@ -304,7 +304,7 @@ def test_print( assert mock.call_args_list == expected_call_args_list -class TestTestToolCallMessage: +class TestTextMessage: def test_print_context_message(self, uuid: UUID, sender: ConversableAgent, recipient: ConversableAgent) -> None: message = {"content": "hello {name}", "context": {"name": "there"}} @@ -312,7 +312,7 @@ def test_print_context_message(self, uuid: UUID, sender: ConversableAgent, recip assert isinstance(actual, TextMessage) expected_model_dump = { - "type": "content", + "type": "text", "content": { "uuid": uuid, "content": "hello {name}", @@ -354,7 +354,7 @@ def test_print_context_lambda_message( assert isinstance(actual, TextMessage) expected_model_dump = { - "type": "content", + "type": "text", "content": { "uuid": uuid, "content": "hello there", From 39c13cd7cb67e05b7e23027791f8b8a4acfff439 Mon Sep 17 00:00:00 2001 From: AgentGenie Date: Tue, 7 Jan 2025 22:49:28 -0800 Subject: [PATCH 58/61] Refine Notebook --- .../agentchat_tabular_data_rag_workflow.ipynb | 117 ++++++++++++------ 1 file changed, 82 insertions(+), 35 deletions(-) diff --git a/notebook/agentchat_tabular_data_rag_workflow.ipynb b/notebook/agentchat_tabular_data_rag_workflow.ipynb index 32090e8d4d..293d1f10bf 100644 --- a/notebook/agentchat_tabular_data_rag_workflow.ipynb +++ b/notebook/agentchat_tabular_data_rag_workflow.ipynb @@ -12,7 +12,17 @@ "\n", "- Parse the pdf file and extract tables into images.\n", "- A single rag agent fails to get the accurate information from tabular data.\n", - "- An agentic workflow using a groupchat to extract accurate information." + "- An agentic workflow using a groupchat to extract accurate information. \n", + " - the agentic workflow uses a RAG agent to extract document metadata (image of table data based on table name)\n", + " - table image to markdown through a multi-modal agent\n", + " - eventually, an assistant answer the original question with " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "extracted information." ] }, { @@ -25,7 +35,7 @@ "\n", "- Install Poppler https://pdf2image.readthedocs.io/en/latest/installation.html\n", "- Install Tesseract https://tesseract-ocr.github.io/tessdoc/Installation.html\n", - "- pip install unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13\n", + "- pip install unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13, ag2[neo4j]\n", ":::\n", "````\n" ] @@ -64,6 +74,7 @@ "\n", "**Skip and use parsed files to run the rest.**\n", "This step is expensive and time consuming, please skip unless you insist.\n", + "The estimate cost is from $10 to $15 to parse the pdf file and build knowledge graph with entire parsed output.\n", "\n", "For the notebook, we use a common finanical document, [Nvidia 2024 10-K](https://investor.nvidia.com/financial-info/sec-filings/sec-filings-details/default.aspx?FilingId=17293267) as an example ([file download link](https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf)).\n", "\n", @@ -138,14 +149,20 @@ " json.dump(output_elements, file, indent=4)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Imports\n", + "**You could resume here**" + ] + }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "# IMPORTS\n", - "\n", "# This is needed to allow nested asyncio calls for Neo4j in Jupyter\n", "import nest_asyncio\n", "\n", @@ -169,7 +186,9 @@ "source": [ "### Create a knowledge graph with sample data\n", "\n", - "To save time, we uses a small subset of the data for the notebook." + "To save time and cost, we use a small subset of the data for the notebook.\n", + "\n", + "**This does not change the fact that native rag agent solution failed to provide the correct answer.**" ] }, { @@ -227,12 +246,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### Connect to knowledge graph if it is built" + "### Connect to knowledge graph if it is built" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -339,7 +358,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -395,12 +414,12 @@ "image2table_convertor = MultimodalConversableAgent(\n", " name=\"image2table_convertor\",\n", " system_message=\"\"\"\n", - " You are an image to table convertor. You will receive an image path of a table. The original table could be in csv, pdf or other format.\n", - " You need to the following step in sequence,\n", - " 1. extract the table content and structure.\n", - " 2. Make sure the structure is complete.\n", + " You are an image to table convertor. You will process an image of one or multiple consecutive tables.\n", + " You need to follow the following steps in sequence,\n", + " 1. extract the complete table contents and structure.\n", + " 2. Make sure the structure is complete and no information is left out. Otherwise, start from step 1 again.\n", " 3. Correct typos in the text fields.\n", - " 4. In the end, output the table in Markdown.\n", + " 4. In the end, output the table(s) in Markdown.\n", " \"\"\",\n", " llm_config={\"config_list\": config_list, \"max_tokens\": 300},\n", " human_input_mode=\"NEVER\",\n", @@ -419,7 +438,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -461,25 +480,25 @@ "\u001b[0m\n", "\u001b[33mimage2table_convertor\u001b[0m (to chat_manager):\n", "\n", - "Here is the table extracted in Markdown format:\n", + "Here is the extracted table from the image in Markdown format:\n", "\n", "```markdown\n", - "| | Jan 28, 2024 | Jan 29, 2023 |\n", - "|--------------------------------------|--------------|--------------|\n", - "| **Assets** | | |\n", - "| Current assets: | | |\n", - "|     Cash and cash equivalents | $7,280 | $3,389 |\n", - "|     Marketable securities | $18,704 | $9,907 |\n", - "|     Accounts receivable, net | $9,999 | $3,827 |\n", - "|     Inventories | $5,282 | $5,159 |\n", - "|     Prepaid expenses and other current assets | $3,080 | $791 |\n", - "| **Total current assets** | $44,345 | $23,073 |\n", - "| Property and equipment, net | $3,914 | $3,807 |\n", - "| Operating lease assets | $1,346 | $1,038 |\n", - "| **Goodwill** | $4,430 | $4,372 |\n", - "| Intangible assets, net | $1,112 | $1,676 |\n", - "| Deferred income tax assets | $6,081 | $3,396 |\n", - "| Other\n", + "| | Jan 28, 2024 | Jan 29, 2023 |\n", + "|------------------------------------------|--------------|--------------|\n", + "| **Assets** | | |\n", + "| Current assets: | | |\n", + "|     Cash and cash equivalents | $7,280 | $3,389 |\n", + "|     Marketable securities | $18,704 | $9,907 |\n", + "|     Accounts receivable, net | $9,999 | $3,827 |\n", + "|     Inventories | $5,282 | $5,159 |\n", + "|     Prepaid expenses and other current assets | $3,080 | $791 |\n", + "| Total current assets | $44,345 | $23,073 |\n", + "| Property and equipment, net | $3,914 | $3,807 |\n", + "| Operating lease assets | $1,346 | $1,038 |\n", + "| Goodwill | $4,430 | $4,372 |\n", + "| Intangible assets, net | $1,112 | $1,676 |\n", + "| Deferred income tax assets | $6,081 | $3,396 |\n", + "\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[32m\n", @@ -487,12 +506,41 @@ "\u001b[0m\n", "\u001b[33mconclusion\u001b[0m (to chat_manager):\n", "\n", - "Based on the table \"NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets,\" the goodwill asset for the fiscal year ending January 28, 2024, is $4,430 million.\n", + "The goodwill asset for NVIDIA Corporation as of January 28, 2024, is $4,430 million.\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[32m\n", "Next speaker: User_proxy\n", - "\u001b[0m\n" + "\u001b[0m\n", + "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "What is the total current assets from the table NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: table_assistant\n", + "\u001b[0m\n", + "\u001b[33mtable_assistant\u001b[0m (to chat_manager):\n", + "\n", + "The total current assets from the table \"NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets\" are $44,345 million as of January 28, 2024.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: nvidia_rag\n", + "\u001b[0m\n", + "\u001b[33mnvidia_rag\u001b[0m (to chat_manager):\n", + "\n", + "Yes, that's correct! The total current assets for NVIDIA Corporation and Subsidiaries as of January 28, 2024, are $44,345 million, according to the table \"NVIDIA Corporation and Subsidiaries Consolidated Balance Sheets.\" If you have any more questions or need further details, feel free to ask!\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[32m\n", + "Next speaker: img_request_format\n", + "\u001b[0m\n", + "\u001b[33mimg_request_format\u001b[0m (to chat_manager):\n", + "\n", + "Great, if you have any more questions or need further clarification, feel free to ask!\n", + "\n", + "--------------------------------------------------------------------------------\n" ] } ], @@ -507,7 +555,6 @@ " conclusion,\n", " ],\n", " messages=[],\n", - " max_round=12,\n", " speaker_selection_method=\"round_robin\",\n", ")\n", "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)\n", From 93d6ee8af66866086d102cb183b11b67b827a4e0 Mon Sep 17 00:00:00 2001 From: Mark Sze Date: Wed, 8 Jan 2025 19:04:39 +0000 Subject: [PATCH 59/61] Text changes Signed-off-by: Mark Sze --- .../agentchat_tabular_data_rag_workflow.ipynb | 41 ++++++++----------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/notebook/agentchat_tabular_data_rag_workflow.ipynb b/notebook/agentchat_tabular_data_rag_workflow.ipynb index 293d1f10bf..e34f03950c 100644 --- a/notebook/agentchat_tabular_data_rag_workflow.ipynb +++ b/notebook/agentchat_tabular_data_rag_workflow.ipynb @@ -6,23 +6,16 @@ "source": [ "# Agentic RAG workflow on tabular data from a PDF file\n", "\n", - "In this notebook, we're building a workflow to extract accurate tabular data information from a pdf file.\n", + "In this notebook, we're building a workflow to extract accurate tabular data information from a PDF file.\n", "\n", "The following bullets summarize the notebook, with highlights being:\n", "\n", - "- Parse the pdf file and extract tables into images.\n", - "- A single rag agent fails to get the accurate information from tabular data.\n", - "- An agentic workflow using a groupchat to extract accurate information. \n", - " - the agentic workflow uses a RAG agent to extract document metadata (image of table data based on table name)\n", - " - table image to markdown through a multi-modal agent\n", - " - eventually, an assistant answer the original question with " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "extracted information." + "- Parse the PDF file and extract tables into images (optional).\n", + "- A single RAG agent fails to get the accurate information from tabular data.\n", + "- An agentic workflow using a groupchat is able to extract information accurately:\n", + " - the agentic workflow uses a RAG agent to extract document metadata (e.g. the image of a data table using just the table name)\n", + " - the table image is converted to Markdown through a multi-modal agent\n", + " - finally, an assistant agent answers the original question with an LLM" ] }, { @@ -31,11 +24,11 @@ "source": [ "````{=mdx}\n", ":::info Requirements\n", - "Unstructured-IO is a dependency for this notebook to parse the pdf. Please install the following dependencies\n", + "Unstructured-IO is a dependency for this notebook to parse the PDF. Please install AG2 (with the neo4j extra) and the dependencies:\n", "\n", "- Install Poppler https://pdf2image.readthedocs.io/en/latest/installation.html\n", "- Install Tesseract https://tesseract-ocr.github.io/tessdoc/Installation.html\n", - "- pip install unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13, ag2[neo4j]\n", + "- pip install ag2[neo4j], unstructured==0.16.11, pi-heif==0.21.0, unstructured_inference==0.8.1, unstructured.pytesseract==0.3.13, pytesseract==0.3.13\n", ":::\n", "````\n" ] @@ -73,14 +66,13 @@ "### Parse PDF file\n", "\n", "**Skip and use parsed files to run the rest.**\n", - "This step is expensive and time consuming, please skip unless you insist.\n", - "The estimate cost is from $10 to $15 to parse the pdf file and build knowledge graph with entire parsed output.\n", + "This step is expensive and time consuming, please skip if you don't need to generate the full data set. The **estimated cost is from $10 to $15 to parse the pdf file and build the knowledge graph with entire parsed output**.\n", "\n", "For the notebook, we use a common finanical document, [Nvidia 2024 10-K](https://investor.nvidia.com/financial-info/sec-filings/sec-filings-details/default.aspx?FilingId=17293267) as an example ([file download link](https://d18rn0p25nwr6d.cloudfront.net/CIK-0001045810/1cbe8fe7-e08a-46e3-8dcc-b429fc06c1a4.pdf)).\n", "\n", - "We use Unstructured-IO to parse the PDF, the table and image from the pdf are extracted out as .jpg files.\n", + "We use Unstructured-IO to parse the PDF, the table and image from the PDF are extracted out as .jpg files.\n", "\n", - "All parsed output are saved in a json file." + "All parsed output are saved in a JSON file." ] }, { @@ -154,7 +146,8 @@ "metadata": {}, "source": [ "### Imports\n", - "**You could resume here**" + "\n", + "**If you want to skip the parsing of the PDF file, you can start here.**" ] }, { @@ -188,7 +181,7 @@ "\n", "To save time and cost, we use a small subset of the data for the notebook.\n", "\n", - "**This does not change the fact that native rag agent solution failed to provide the correct answer.**" + "**This does not change the fact that the native RAG agent solution failed to provide the correct answer.**" ] }, { @@ -272,9 +265,9 @@ "source": [ "### Native RAG Agent Solution\n", "\n", - "The following shows that when use a native rag agent for parsed data, the agent failed to get the right information (5,282 instead of 4,430).\n", + "The following shows that when use a native RAG agent for parsed data, the agent failed to get the right information (5,282 instead of 4,430).\n", "\n", - "Our best guess is that rag agent fails to understand the table structure from text." + "Our best guess is that RAG agent fails to understand the table structure from text." ] }, { From f30ed7ba0c939fc203cb1e1ab068704a7806967b Mon Sep 17 00:00:00 2001 From: Mark Sze Date: Wed, 8 Jan 2025 19:11:10 +0000 Subject: [PATCH 60/61] Add dependency to pyproject Signed-off-by: Mark Sze --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index ccf26716f8..560fe7cb85 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -118,6 +118,7 @@ neo4j = [ "llama-index==0.12.5", "llama-index-graph-stores-neo4j==0.4.2", "llama-index-core==0.12.5", + "llama-index-readers-web==0.3.3", ] # used for agentchat_realtime_swarm notebook and realtime agent twilio demo From bc7efbcc7e965b54fb554f536979e779bb88f54a Mon Sep 17 00:00:00 2001 From: Mark Sze Date: Wed, 8 Jan 2025 19:20:29 +0000 Subject: [PATCH 61/61] Ignore LFS JSON files in pre-commit check Signed-off-by: Mark Sze --- .pre-commit-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c3ea21ee42..1fceab925a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -15,6 +15,7 @@ repos: - id: check-yaml - id: check-toml - id: check-json + exclude: ^notebook/agentchat_pdf_rag/(parsed_elements|processed_elements)\.json$ - id: check-byte-order-marker exclude: .gitignore - id: check-merge-conflict