From 265261862fb6e146e94540468c4f2aaee8855ec9 Mon Sep 17 00:00:00 2001 From: Sacha Bron Date: Tue, 22 Oct 2024 19:45:46 +0200 Subject: [PATCH] Add continuous_mode (#460) --- llama_parse/base.py | 5 +++++ pyproject.toml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/llama_parse/base.py b/llama_parse/base.py index 1dbefb4..c45f05e 100644 --- a/llama_parse/base.py +++ b/llama_parse/base.py @@ -95,6 +95,10 @@ class LlamaParse(BasePydanticReader): default=False, description="Use our best parser mode if set to True.", ) + continuous_mode: bool = Field( + default=False, + description="Parse documents continuously, leading to better results on documents where tables span across two pages.", + ) do_not_unroll_columns: Optional[bool] = Field( default=False, description="If set to true, the parser will keep column in the text according to document layout. Reduce reconstruction accuracy, and LLM's/embedings performances in most case.", @@ -260,6 +264,7 @@ async def _create_job( "do_not_cache": self.do_not_cache, "fast_mode": self.fast_mode, "premium_mode": self.premium_mode, + "continuous_mode": self.continuous_mode, "do_not_unroll_columns": self.do_not_unroll_columns, "gpt4o_mode": self.gpt4o_mode, "gpt4o_api_key": self.gpt4o_api_key, diff --git a/pyproject.toml b/pyproject.toml index 2fcf221..fa4add6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "llama-parse" -version = "0.5.10" +version = "0.5.11" description = "Parse files into RAG-Optimized formats." authors = ["Logan Markewich "] license = "MIT"