diff --git a/examples/multimodal/claude_parse.ipynb b/examples/multimodal/claude_parse.ipynb new file mode 100644 index 0000000..db438e8 --- /dev/null +++ b/examples/multimodal/claude_parse.ipynb @@ -0,0 +1,635 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "97c79c38-38a3-40f3-ba2e-250649347d63", + "metadata": {}, + "source": [ + "# Multimodal Parsing using Anthropic Claude (Sonnet 3.5)\n", + "\n", + "\"Open\n", + "\n", + "This cookbook shows you how to use LlamaParse to parse any document with the multimodal capabilities of Sonnet 3.5. \n", + "\n", + "LlamaParse allows you to plug in external, multimodal model vendors for parsing - we handle the error correction, validation, and scalability/reliability for you.\n" + ] + }, + { + "cell_type": "markdown", + "id": "15e60ecf-519c-41fc-911b-765adaf8bad4", + "metadata": {}, + "source": [ + "## Setup\n", + "\n", + "Download the data. Download both the full paper and also just a single page (page-33) of the pdf.\n", + "\n", + "Swap in `data/llama2-p33.pdf` for `data/llama2.pdf` in the code blocks below if you want to save on parsing tokens. \n", + "\n", + "An image of this page is shown below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "91a9e532-1454-40e0-bbf0-fd442c350121", + "metadata": {}, + "outputs": [], + "source": [ + "import nest_asyncio\n", + "\n", + "nest_asyncio.apply()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d9fb0aa-74cd-476f-8161-efd9e04248bf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "--2024-07-11 23:44:38-- https://arxiv.org/pdf/2307.09288\n", + "Resolving arxiv.org (arxiv.org)... 151.101.195.42, 151.101.131.42, 151.101.3.42, ...\n", + "Connecting to arxiv.org (arxiv.org)|151.101.195.42|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 13661300 (13M) [application/pdf]\n", + "Saving to: ‘data/llama2.pdf’\n", + "\n", + "data/llama2.pdf 100%[===================>] 13.03M 69.3MB/s in 0.2s \n", + "\n", + "2024-07-11 23:44:38 (69.3 MB/s) - ‘data/llama2.pdf’ saved [13661300/13661300]\n", + "\n" + ] + } + ], + "source": [ + "!wget \"https://arxiv.org/pdf/2307.09288\" -O data/llama2.pdf\n", + "!wget \"https://www.dropbox.com/scl/fi/wpql661uu98vf6e2of2i0/llama2-p33.pdf?rlkey=64weubzkwpmf73y58vbmc8pyi&st=khgx5161&dl=1\" -O data/llama2-p33.pdf" + ] + }, + { + "cell_type": "markdown", + "id": "b5c214a2-56fd-4b09-93b3-be994a3b5aa4", + "metadata": {}, + "source": [ + "![page_33](llama2-p33.png)" + ] + }, + { + "cell_type": "markdown", + "id": "4e29a9d7-5bd9-4fb8-8ec1-4c128a748662", + "metadata": {}, + "source": [ + "## Initialize LlamaParse\n", + "\n", + "Initialize LlamaParse in multimodal mode, and specify the vendor.\n", + "\n", + "**NOTE**: optionally you can specify the Anthropic API key. If you do so you will be charged our base LlamaParse price of 0.3c per page. If you don't then you will be charged 6c per page, as we will make the calls to Claude for you." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dc921729-3446-42ca-8e1b-a6fd26195ed9", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.core.schema import TextNode\n", + "from typing import List\n", + "import json\n", + "\n", + "\n", + "def get_text_nodes(json_list: List[dict]):\n", + " text_nodes = []\n", + " for idx, page in enumerate(json_list):\n", + " text_node = TextNode(text=page[\"md\"], metadata={\"page\": page[\"page\"]})\n", + " text_nodes.append(text_node)\n", + " return text_nodes\n", + "\n", + "\n", + "def save_jsonl(data_list, filename):\n", + " \"\"\"Save a list of dictionaries as JSON Lines.\"\"\"\n", + " with open(filename, \"w\") as file:\n", + " for item in data_list:\n", + " json.dump(item, file)\n", + " file.write(\"\\n\")\n", + "\n", + "\n", + "def load_jsonl(filename):\n", + " \"\"\"Load a list of dictionaries from JSON Lines.\"\"\"\n", + " data_list = []\n", + " with open(filename, \"r\") as file:\n", + " for line in file:\n", + " data_list.append(json.loads(line))\n", + " return data_list" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2e9d9cf-8189-4fcb-b34f-cde6cc0b59c8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Started parsing the file under job_id 811a29d8-8bcd-4100-bee3-6a83fbde1697\n" + ] + } + ], + "source": [ + "from llama_parse import LlamaParse\n", + "\n", + "parser = LlamaParse(\n", + " result_type=\"markdown\",\n", + " use_vendor_multimodal_model=True,\n", + " vendor_multimodal_model_name=\"anthropic-sonnet-3.5\",\n", + " # invalidate_cache=True\n", + ")\n", + "json_objs = parser.get_json_result(\"./data/llama2.pdf\")\n", + "# json_objs = parser.get_json_result(\"./data/llama2-p33.pdf\")\n", + "json_list = json_objs[0][\"pages\"]\n", + "docs = get_text_nodes(json_list)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "96a81df0-1026-4e30-a930-f677dc31e344", + "metadata": {}, + "outputs": [], + "source": [ + "# Optional: Save\n", + "save_jsonl([d.dict() for d in docs], \"docs.jsonl\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ee2e6920-8893-4b39-ae12-94d13c651406", + "metadata": {}, + "outputs": [], + "source": [ + "# Optional: Load\n", + "from llama_index.core import Document\n", + "\n", + "docs_dicts = load_jsonl(\"docs.jsonl\")\n", + "docs = [Document.parse_obj(d) for d in docs_dicts]" + ] + }, + { + "cell_type": "markdown", + "id": "4f3c51b0-7878-48d7-9bc3-02b516500128", + "metadata": {}, + "source": [ + "### Setup GPT-4o baseline\n", + "\n", + "For comparison, we will also parse the document using GPT-4o (3c per page)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6fc3f258-50ae-4988-b904-c105463a498f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Started parsing the file under job_id 04c69ecc-e45d-4ad9-ba72-3045af38268b\n" + ] + } + ], + "source": [ + "from llama_parse import LlamaParse\n", + "\n", + "parser_gpt4o = LlamaParse(\n", + " result_type=\"markdown\",\n", + " use_vendor_multimodal_model=True,\n", + " vendor_multimodal_model=\"openai-gpt4o\",\n", + " # invalidate_cache=True\n", + ")\n", + "json_objs_gpt4o = parser_gpt4o.get_json_result(\"./data/llama2.pdf\")\n", + "# json_objs_gpt4o = parser.get_json_result(\"./data/llama2-p33.pdf\")\n", + "json_list_gpt4o = json_objs_gpt4o[0][\"pages\"]\n", + "docs_gpt4o = get_text_nodes(json_list_gpt4o)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6a47f04e-12e1-4c80-a71d-ef7721f96401", + "metadata": {}, + "outputs": [], + "source": [ + "# Optional: Save\n", + "save_jsonl([d.dict() for d in docs_gpt4o], \"docs_gpt4o.jsonl\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c38b5ca3-fa87-434b-b477-bf6a4962eb3d", + "metadata": {}, + "outputs": [], + "source": [ + "# Optional: Load\n", + "from llama_index.core import Document\n", + "\n", + "docs_gpt4o_dicts = load_jsonl(\"docs_gpt4o.jsonl\")\n", + "docs_gpt4o = [Document.parse_obj(d) for d in docs_gpt4o_dicts]" + ] + }, + { + "cell_type": "markdown", + "id": "44c20f7a-2901-4dd0-b635-a4b33c5664c1", + "metadata": {}, + "source": [ + "## View Results\n", + "\n", + "Let's visualize the results along with the original document page.\n", + "\n", + "We see that Sonnet is able to extract complex visual elements like graphs in way more detail! \n", + "\n", + "**NOTE**: If you're using llama2-p33, just use `docs[0]`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "778698aa-da7e-4081-b3b5-0372f228536f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "page: 33\n", + "\n", + "| Temperature | RLHF v3 | RLHF v2 | RLHF v1 | SFT |\n", + "|-------------|---------|---------|---------|-----|\n", + "| 0.4 | 98 | 98 | 97 | 95 |\n", + "| 0.6 | 97 | 97 | 95 | 94 |\n", + "| 0.8 | 97 | 96 | 94 | 92 |\n", + "| 1.0 | 96 | 94 | 92 | 89 |\n", + "| 1.2 | 95 | 92 | 88 | 83 |\n", + "| 1.4 | 94 | 89 | 83 | 77 |\n", + "\n", + "Figure 21: RLHF learns to adapt the temperature with regard to the type of prompt. Lower Self-BLEU corresponds to more diversity: RLHF eliminates diversity in responses to factual prompts but retains more diversity when generating responses to creative prompts. We prompt each model with a diverse set of 10 creative and 10 factual instructions and sample 25 responses. This is repeated for the temperatures T ∈ {k/10 | k ∈ N : 1 ≤ k ≤ 15}. For each of the 25 responses we compute the Self-BLEU metric and report the mean and standard deviation against the temperature.\n", + "\n", + "| Date: 01/01/2023 | Year: 2023 | Year: 852 |\n", + "|------------------|------------|-----------|\n", + "| Cutting knowledge: 01/01/1940 | | |\n", + "| Who won the 2nd World War? | Is the Earth flat or round? | Is the Earth flat or round? |\n", + "| I'm not sure what you're referring to, as my knowledge stopped in 1940. | The Earth is round. The idea that the Earth is flat is a myth that has been disproven by scientific evidence and observations. The Earth's shape has been confirmed through a variety of methods, including satellite imagery, GPS, and space exploration. The Earth is a oblate spheroid, meaning it is slightly flattened at the poles and bulging at the equator. | In 852, the Earth was believed to be flat by many people. The idea of a flat Earth was widespread in ancient and medieval beliefs, as well as by some scientific observations that suggested the Earth was flat. However, there were also some ancient Greeks, such as Eratosthenes, who believed that the Earth was round and were able to make calculations suggesting that it was a sphere. It's worth noting that the idea of a flat Earth was not universally accepted in 852, and there were many people who believed in a round Earth. Additionally, as time went on, the idea of a round Earth became more widely accepted, and by the Renaissance, the concept of a flat Earth had largely fallen out of favor. |\n", + "\n", + "Figure 22: Time awareness — illustration of our model generalizing the notion of time, with 1,000 SFT time-focused data.\n", + "\n", + "LLAMA 2-CHAT Temporal Perception Our model showcased impressive generalization ability, as shown in Figure 22. We manually tested dozens of examples and observed consistently that our model demonstrates a robust capability to organize its knowledge in a temporal manner, even when provided with minimal data. To instill a concept of time in LLAMA 2-CHAT, we collected a set of 1,000 SFT examples that were related to specific dates. These examples included questions like \"How long ago did Barack Obama become president?\" Each was associated with two critical pieces of metadata: the date when the query was posed — which influenced the response — and the event date, a point in time prior to which the question would be nonsensical.\n", + "\n", + "The observation suggests that LLMs have internalized the concept of time to a greater extent than previously assumed, despite their training being solely based on next-token prediction and data that is randomly shuffled without regard to their chronological context.\n", + "\n", + "Tool Use Emergence The integration of LLMs with tools is a growing research area, as highlighted in Mialon et al. (2023). The approach devised in Toolformer (Schick et al., 2023) entails the sampling of millions\n", + "\n", + "33\n" + ] + } + ], + "source": [ + "# using Sonnet-3.5\n", + "print(docs[32].get_content(metadata_mode=\"all\"))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1511a30f-3efc-4142-9668-7dc056a24d0c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "page: 33\n", + "\n", + "# Figure 21: RLHF learns to adapt the temperature with regard to the type of prompt.\n", + "\n", + "Lower Self-BLEU corresponds to more diversity: RLHF eliminates diversity in responses to factual prompts but retains more diversity when generating responses to creative prompts. We prompt each model with a diverse set of 10 creative and 10 factual instructions and sample 25 responses. This is repeated for the temperatures \\( T \\in \\{k/10 | k \\in \\{1:1:15\\}\\). For each of the 25 responses we compute the Self-BLEU metric and report the mean and standard deviation against the temperature.\n", + "\n", + "| Temperature | Factual Prompts | Creative Prompts |\n", + "|-------------|-----------------|------------------|\n", + "| 0.4 | | |\n", + "| 0.6 | | |\n", + "| 0.8 | | |\n", + "| 1.0 | | |\n", + "| 1.2 | | |\n", + "| 1.4 | | |\n", + "\n", + "| Model | RLHF v3 | RLHF v2 | RLHF v1 | SFT |\n", + "|--------|---------|---------|---------|-----|\n", + "| Self-BLEU | | | | |\n", + "\n", + "# Figure 22: Time awareness\n", + "\n", + "Illustration of our model generalizing the notion of time, with 1,000 SFT time-focused data.\n", + "\n", + "## Llama 2-Chat Temporal Perception\n", + "\n", + "Our model showcased impressive generalization ability, as shown in Figure 22. We manually tested dozens of examples and observed consistently that our model demonstrates a robust capability to organize its knowledge in a temporal manner, even when provided with minimal data. To instill a concept of time in Llama 2-Chat, we collected a set of 1,000 SFT examples that were related to specific dates. These examples included questions like \"How long ago did Barack Obama become president?\" Each was associated with two critical pieces of metadata: the date when the query was posed — which influenced the response — and the event date, a point in time prior to which the question would be nonsensical.\n", + "\n", + "The observation suggests that LLMs have internalized the concept of time to a greater extent than previously assumed, despite their training being solely based on next-token prediction and data that is randomly shuffled without regard to their chronological context.\n", + "\n", + "## Tool Use Emergence\n", + "\n", + "The integration of LLMs with tools is a growing research area, as highlighted in Mialon et al. (2023). The approach devised in Toolformer (Schick et al., 2023) entails the sampling of millions.\n", + "\n", + "---\n", + "\n", + "### Example Prompts and Responses\n", + "\n", + "| Date: 01/01/2023 | Year: 2023 | Year: 852 |\n", + "|------------------|------------|-----------|\n", + "| **Who won the 2nd World War?** | **Is the Earth flat or round?** | **Is the Earth flat or round?** |\n", + "| I'm not sure what you're referring to, as my knowledge stopped in 1940. | The Earth is round. The idea that the Earth is flat is a myth that has been disproven by scientific evidence and observations. The Earth's shape has been confirmed through a variety of methods, including satellite imagery, GPS, and space exploration. The Earth is an oblate spheroid, meaning it is slightly flattened at the poles and bulging at the equator. | In 852, the Earth was believed to be flat by many people. The idea of a flat Earth was supported by various religious and philosophical beliefs, as well as by some scientific theories that suggested the Earth was flat. However, there were also some ancient Greek scholars, such as Pythagoras, who believed that the Earth was round and were able to make calculations suggesting that it was a sphere. It's worth noting that the idea of a flat Earth was not universally accepted in 852, and there were many people who believed in a round Earth. Additionally, since we now know the idea of a round Earth became more widely accepted, and by the Renaissance, the concept of a flat Earth had largely fallen out of favor. |\n", + "\n", + "---\n", + "\n", + "Page 33\n" + ] + } + ], + "source": [ + "# using GPT-4o\n", + "print(docs_gpt4o[32].get_content(metadata_mode=\"all\"))" + ] + }, + { + "cell_type": "markdown", + "id": "705f7729-fa0f-4ca0-8562-c42afeaa8532", + "metadata": {}, + "source": [ + "## Setup RAG Pipeline\n", + "\n", + "These parsing capabilities translate to great RAG performance as well. Let's setup a RAG pipeline over this data.\n", + "\n", + "(we'll use GPT-4o from OpenAI for the actual text synthesis step)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5a53ee5d-cc63-421b-8896-588c83edfcf0", + "metadata": {}, + "outputs": [], + "source": [ + "from llama_index.core import Settings\n", + "from llama_index.llms.openai import OpenAI\n", + "from llama_index.embeddings.openai import OpenAIEmbedding\n", + "\n", + "Settings.llm = OpenAI(model=\"gpt-4o\")\n", + "Settings.embed_model = OpenAIEmbedding(model=\"text-embedding-3-large\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "60972d7a-7948-4ad7-89df-57004acee917", + "metadata": {}, + "outputs": [], + "source": [ + "# from llama_index.core import SummaryIndex\n", + "from llama_index.core import VectorStoreIndex\n", + "from llama_index.llms.openai import OpenAI\n", + "\n", + "index = VectorStoreIndex(docs)\n", + "query_engine = index.as_query_engine(similarity_top_k=5)\n", + "\n", + "index_gpt4o = VectorStoreIndex(docs_gpt4o)\n", + "query_engine_gpt4o = index_gpt4o.as_query_engine(similarity_top_k=5)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e7df7bcb-1df4-4a01-88fc-2d596b1cc74d", + "metadata": {}, + "outputs": [], + "source": [ + "query = \"Tell me more about all the values for each line in the 'RLHF learns to adapt the temperature with regard to the type of prompt' graph \"\n", + "\n", + "response = query_engine.query(query)\n", + "response_gpt4o = query_engine_gpt4o.query(query)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b7070a31-3bb8-4134-8338-20bc2fd6f3d6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The graph titled \"RLHF learns to adapt the temperature with regard to the type of prompt\" presents values for different temperatures across various versions of RLHF and SFT. The values are as follows:\n", + "\n", + "- **Temperature 0.4:**\n", + " - RLHF v3: 98\n", + " - RLHF v2: 98\n", + " - RLHF v1: 97\n", + " - SFT: 95\n", + "\n", + "- **Temperature 0.6:**\n", + " - RLHF v3: 97\n", + " - RLHF v2: 97\n", + " - RLHF v1: 95\n", + " - SFT: 94\n", + "\n", + "- **Temperature 0.8:**\n", + " - RLHF v3: 97\n", + " - RLHF v2: 96\n", + " - RLHF v1: 94\n", + " - SFT: 92\n", + "\n", + "- **Temperature 1.0:**\n", + " - RLHF v3: 96\n", + " - RLHF v2: 94\n", + " - RLHF v1: 92\n", + " - SFT: 89\n", + "\n", + "- **Temperature 1.2:**\n", + " - RLHF v3: 95\n", + " - RLHF v2: 92\n", + " - RLHF v1: 88\n", + " - SFT: 83\n", + "\n", + "- **Temperature 1.4:**\n", + " - RLHF v3: 94\n", + " - RLHF v2: 89\n", + " - RLHF v1: 83\n", + " - SFT: 77\n", + "\n", + "These values indicate how the Self-BLEU metric, which measures diversity, changes with temperature for different versions of RLHF and SFT. Lower Self-BLEU corresponds to more diversity in the responses.\n" + ] + } + ], + "source": [ + "print(response)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7bee8167-f021-4c87-8d28-9f40a4f7b69d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "| Temperature | RLHF v3 | RLHF v2 | RLHF v1 | SFT |\n", + "|-------------|---------|---------|---------|-----|\n", + "| 0.4 | 98 | 98 | 97 | 95 |\n", + "| 0.6 | 97 | 97 | 95 | 94 |\n", + "| 0.8 | 97 | 96 | 94 | 92 |\n", + "| 1.0 | 96 | 94 | 92 | 89 |\n", + "| 1.2 | 95 | 92 | 88 | 83 |\n", + "| 1.4 | 94 | 89 | 83 | 77 |\n", + "\n", + "Figure 21: RLHF learns to adapt the temperature with regard to the type of prompt. Lower Self-BLEU corresponds to more diversity: RLHF eliminates diversity in responses to factual prompts but retains more diversity when generating responses to creative prompts. We prompt each model with a diverse set of 10 creative and 10 factual instructions and sample 25 responses. This is repeated for the temperatures T ∈ {k/10 | k ∈ N : 1 ≤ k ≤ 15}. For each of the 25 responses we compute the Self-BLEU metric and report the mean and standard deviation against the temperature.\n", + "\n", + "| Date: 01/01/2023 | Year: 2023 | Year: 852 |\n", + "|------------------|------------|-----------|\n", + "| Cutting knowledge: 01/01/1940 | | |\n", + "| Who won the 2nd World War? | Is the Earth flat or round? | Is the Earth flat or round? |\n", + "| I'm not sure what you're referring to, as my knowledge stopped in 1940. | The Earth is round. The idea that the Earth is flat is a myth that has been disproven by scientific evidence and observations. The Earth's shape has been confirmed through a variety of methods, including satellite imagery, GPS, and space exploration. The Earth is a oblate spheroid, meaning it is slightly flattened at the poles and bulging at the equator. | In 852, the Earth was believed to be flat by many people. The idea of a flat Earth was widespread in ancient and medieval beliefs, as well as by some scientific observations that suggested the Earth was flat. However, there were also some ancient Greeks, such as Eratosthenes, who believed that the Earth was round and were able to make calculations suggesting that it was a sphere. It's worth noting that the idea of a flat Earth was not universally accepted in 852, and there were many people who believed in a round Earth. Additionally, as time went on, the idea of a round Earth became more widely accepted, and by the Renaissance, the concept of a flat Earth had largely fallen out of favor. |\n", + "\n", + "Figure 22: Time awareness — illustration of our model generalizing the notion of time, with 1,000 SFT time-focused data.\n", + "\n", + "LLAMA 2-CHAT Temporal Perception Our model showcased impressive generalization ability, as shown in Figure 22. We manually tested dozens of examples and observed consistently that our model demonstrates a robust capability to organize its knowledge in a temporal manner, even when provided with minimal data. To instill a concept of time in LLAMA 2-CHAT, we collected a set of 1,000 SFT examples that were related to specific dates. These examples included questions like \"How long ago did Barack Obama become president?\" Each was associated with two critical pieces of metadata: the date when the query was posed — which influenced the response — and the event date, a point in time prior to which the question would be nonsensical.\n", + "\n", + "The observation suggests that LLMs have internalized the concept of time to a greater extent than previously assumed, despite their training being solely based on next-token prediction and data that is randomly shuffled without regard to their chronological context.\n", + "\n", + "Tool Use Emergence The integration of LLMs with tools is a growing research area, as highlighted in Mialon et al. (2023). The approach devised in Toolformer (Schick et al., 2023) entails the sampling of millions\n", + "\n", + "33\n" + ] + } + ], + "source": [ + "print(response.source_nodes[4].get_content())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f9fef7f-510b-46a5-8716-f5616f542035", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The graph titled \"RLHF learns to adapt the temperature with regard to the type of prompt\" illustrates how RLHF affects the diversity of responses to factual and creative prompts at different temperatures. The Self-BLEU metric is used to measure diversity, with lower Self-BLEU values indicating higher diversity. The graph includes the following values for each temperature:\n", + "\n", + "- **Temperature 0.4**: Values for factual and creative prompts are not provided.\n", + "- **Temperature 0.6**: Values for factual and creative prompts are not provided.\n", + "- **Temperature 0.8**: Values for factual and creative prompts are not provided.\n", + "- **Temperature 1.0**: Values for factual and creative prompts are not provided.\n", + "- **Temperature 1.2**: Values for factual and creative prompts are not provided.\n", + "- **Temperature 1.4**: Values for factual and creative prompts are not provided.\n", + "\n", + "The graph also compares different versions of the model (RLHF v1, RLHF v2, RLHF v3, and SFT) using the Self-BLEU metric, but specific values for each version are not provided. The key takeaway is that RLHF reduces diversity in responses to factual prompts while maintaining more diversity for creative prompts.\n" + ] + } + ], + "source": [ + "print(response_gpt4o)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d40f9dd4-2dd4-4fa5-b636-1f901dc1601b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# Figure 21: RLHF learns to adapt the temperature with regard to the type of prompt.\n", + "\n", + "Lower Self-BLEU corresponds to more diversity: RLHF eliminates diversity in responses to factual prompts but retains more diversity when generating responses to creative prompts. We prompt each model with a diverse set of 10 creative and 10 factual instructions and sample 25 responses. This is repeated for the temperatures \\( T \\in \\{k/10 | k \\in \\{1:1:15\\}\\). For each of the 25 responses we compute the Self-BLEU metric and report the mean and standard deviation against the temperature.\n", + "\n", + "| Temperature | Factual Prompts | Creative Prompts |\n", + "|-------------|-----------------|------------------|\n", + "| 0.4 | | |\n", + "| 0.6 | | |\n", + "| 0.8 | | |\n", + "| 1.0 | | |\n", + "| 1.2 | | |\n", + "| 1.4 | | |\n", + "\n", + "| Model | RLHF v3 | RLHF v2 | RLHF v1 | SFT |\n", + "|--------|---------|---------|---------|-----|\n", + "| Self-BLEU | | | | |\n", + "\n", + "# Figure 22: Time awareness\n", + "\n", + "Illustration of our model generalizing the notion of time, with 1,000 SFT time-focused data.\n", + "\n", + "## Llama 2-Chat Temporal Perception\n", + "\n", + "Our model showcased impressive generalization ability, as shown in Figure 22. We manually tested dozens of examples and observed consistently that our model demonstrates a robust capability to organize its knowledge in a temporal manner, even when provided with minimal data. To instill a concept of time in Llama 2-Chat, we collected a set of 1,000 SFT examples that were related to specific dates. These examples included questions like \"How long ago did Barack Obama become president?\" Each was associated with two critical pieces of metadata: the date when the query was posed — which influenced the response — and the event date, a point in time prior to which the question would be nonsensical.\n", + "\n", + "The observation suggests that LLMs have internalized the concept of time to a greater extent than previously assumed, despite their training being solely based on next-token prediction and data that is randomly shuffled without regard to their chronological context.\n", + "\n", + "## Tool Use Emergence\n", + "\n", + "The integration of LLMs with tools is a growing research area, as highlighted in Mialon et al. (2023). The approach devised in Toolformer (Schick et al., 2023) entails the sampling of millions.\n", + "\n", + "---\n", + "\n", + "### Example Prompts and Responses\n", + "\n", + "| Date: 01/01/2023 | Year: 2023 | Year: 852 |\n", + "|------------------|------------|-----------|\n", + "| **Who won the 2nd World War?** | **Is the Earth flat or round?** | **Is the Earth flat or round?** |\n", + "| I'm not sure what you're referring to, as my knowledge stopped in 1940. | The Earth is round. The idea that the Earth is flat is a myth that has been disproven by scientific evidence and observations. The Earth's shape has been confirmed through a variety of methods, including satellite imagery, GPS, and space exploration. The Earth is an oblate spheroid, meaning it is slightly flattened at the poles and bulging at the equator. | In 852, the Earth was believed to be flat by many people. The idea of a flat Earth was supported by various religious and philosophical beliefs, as well as by some scientific theories that suggested the Earth was flat. However, there were also some ancient Greek scholars, such as Pythagoras, who believed that the Earth was round and were able to make calculations suggesting that it was a sphere. It's worth noting that the idea of a flat Earth was not universally accepted in 852, and there were many people who believed in a round Earth. Additionally, since we now know the idea of a round Earth became more widely accepted, and by the Renaissance, the concept of a flat Earth had largely fallen out of favor. |\n", + "\n", + "---\n", + "\n", + "Page 33\n" + ] + } + ], + "source": [ + "print(response_gpt4o.source_nodes[4].get_content())" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "llama_parse", + "language": "python", + "name": "llama_parse" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/llama_parse/base.py b/llama_parse/base.py index 97522c6..46f79ee 100644 --- a/llama_parse/base.py +++ b/llama_parse/base.py @@ -130,7 +130,7 @@ class LlamaParse(BasePydanticReader): default=None, description="The API key for the multimodal API.", ) - use_vendor_multimodal: bool = Field( + use_vendor_multimodal_model: bool = Field( default=False, description="Whether to use the vendor multimodal API.", ) @@ -211,6 +211,9 @@ async def _create_job( "gpt4o_api_key": self.gpt4o_api_key, "bounding_box": self.bounding_box, "target_pages": self.target_pages, + "vendor_multimodal_api_key": self.vendor_multimodal_api_key, + "use_vendor_multimodal_model": self.use_vendor_multimodal_model, + "vendor_multimodal_model_name": self.vendor_multimodal_model_name, }, ) if not response.is_success: diff --git a/pyproject.toml b/pyproject.toml index bc4fee1..55c1419 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "llama-parse" -version = "0.4.7" +version = "0.4.8" description = "Parse files into RAG-Optimized formats." authors = ["Logan Markewich "] license = "MIT"