diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml
index 189971e..8292d48 100644
--- a/.github/workflows/python-app.yml
+++ b/.github/workflows/python-app.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.9", "3.10", "3.11", "3.12"]
max-parallel: 1 # Ensures the tests run sequentially
steps:
diff --git a/any_parser/any_parser.py b/any_parser/any_parser.py
index e3550ac..c08e430 100644
--- a/any_parser/any_parser.py
+++ b/any_parser/any_parser.py
@@ -25,6 +25,7 @@ class ProcessType(Enum):
TABLE = "table"
FILE_REFINED = "file_refined"
FILE_REFINED_QUICK = "file_refined_quick"
+ PARSE_WITH_LAYOUT = "parse_with_layout"
class AnyParser:
@@ -221,6 +222,8 @@ def async_extract(
process_type = ProcessType.FILE
elif model == ModelType.PRO:
process_type = ProcessType.FILE_REFINED_QUICK
+ elif model == ModelType.ADVANCED:
+ process_type = ProcessType.PARSE_WITH_LAYOUT
else:
return "Error: Invalid model type", None
diff --git a/any_parser/utils.py b/any_parser/utils.py
index ed70fe1..bea2096 100644
--- a/any_parser/utils.py
+++ b/any_parser/utils.py
@@ -8,6 +8,7 @@
class ModelType(Enum):
BASE = "base"
PRO = "pro"
+ ADVANCED = "advanced"
SUPPORTED_FILE_EXTENSIONS = [
@@ -48,7 +49,7 @@ def upload_file_to_presigned_url(
def check_model(model: ModelType) -> None:
- if model not in {ModelType.BASE, ModelType.PRO}:
+ if model not in {ModelType.BASE, ModelType.PRO, ModelType.ADVANCED}:
valid_models = ", ".join(["`" + model.value + "`" for model in ModelType])
return f"Invalid model type: {model}. Supported `model` types include {valid_models}."
diff --git a/examples/async_parse_with_layout.ipynb b/examples/async_parse_with_layout.ipynb
new file mode 100644
index 0000000..6de081d
--- /dev/null
+++ b/examples/async_parse_with_layout.ipynb
@@ -0,0 +1,143 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Install the libraries (ipython is used for displaying markdown in this demo)\n",
+ "# !pip3 install --upgrade ipython\n",
+ "# !pip3 install --upgrade any-parser"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from IPython.display import display, Markdown\n",
+ "from any_parser import AnyParser, ModelType"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ap = AnyParser(api_key=\"S4iyw7RAEE8CTGkVgHYeI8nsTmSALI1U2HXvAN6j\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "file_path = \"./sample_data/test_1figure_1table.png\"\n",
+ "file_id = ap.async_extract(file_path, ModelType.ADVANCED, {})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Waiting for response...\n",
+ "Waiting for response...\n"
+ ]
+ }
+ ],
+ "source": [
+ "markdown_output = ap.async_fetch(file_id=file_id)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/markdown": [
+ "\n",
+ "
\n",
+ "\n",
+ " | latency | (ms) |
\n",
+ "participants | mean | 99th percentile |
\n",
+ "1 | 17.0 +1.4 | 75.0 34.9 |
\n",
+ "2 | 24.5 +2.5 | 87.6 +35.9 |
\n",
+ "5 | 31.5 +6.2 | 104.5 52.2 |
\n",
+ "10 | 30.0 +3.7 | 95.6 +25.4 |
\n",
+ "25 | 35.5 +5.6 | 100.4 42.7 |
\n",
+ "50 | 42.7 4.1 | 93.7 22.9 |
\n",
+ "100 | 71.4 7.6 | 131.2 +17.6 |
\n",
+ "200 | 150.5 +11.0 | 320.3 35.1 |
\n",
+ "\n",
+ "
\n",
+ "\n",
+ "\n",
+ "\n",
+ "Table 4: Two-phase commit scalability. Mean and standard deviations over 10 runs.\n",
+ "\n",
+ "CPUs. Snapshot reads can execute at any up-to-date replicas, so their throughput increases almost linearly with the number of replicas. Single-read read-only transactions only execute at leaders because timestamp assignment must happen at leaders. Read-only-transaction throughput increases with the number of replicas because the number of effective spanservers increases: in the experimental setup, the number of spanservers equaled the number of replicas, and leaders were randomly distributed among the zones. Write throughput benefits from the same experimental artifact (which explains the increase in throughput from 3 to 5 replicas), but that benefit is outweighed by the linear increase in the amount of work performed per write, as the number of replicas increases.\n",
+ "\n",
+ "Table 4 demonstrates that two-phase commit can scale to a reasonable number of participants: it summarizes a set of experiments run across 3 zones, each with 25 spanservers. Scaling up to 50 participants is reasonable in both mean and 99th-percentile, and latencies start to rise noticeably at 100 participants.\n",
+ "\n",
+ "5.2 Availability\n",
+ "\n",
+ "Figure 5 illustrates the availability benefits of running Spanner in multiple datacenters. It shows the results of three experiments on throughput in the presence of datacenter failure, all of which are overlaid onto the same time scale. The test universe consisted of 5 zones Zi, each of which had 25 spanservers. The test database was sharded into 1250 Paxos groups, and 100 test clients constantly issued non-snapshot reads at an aggregate rate of 50K reads/second. All of the leaders were explicitly placed in Z1. Five seconds into each test, all of the servers in one zone were killed: non-leader kills Z2; leader-hard kills Z1; leader-soft kills Z1, but it gives notifications to all of the servers that they should handoff leadership first.\n",
+ "\n",
+ "Killing Z2 has no effect on read throughput. Killing Z1 while giving the leaders time to handoff leadership to a different zone has a minor effect: the throughput drop is not visible in the graph, but is around 3-4%. On the other hand, killing Z1 with no warning has a severe effect: the rate of completion drops almost to 0. As leaders get re-elected, though, the throughput of the system rises to approximately 100K reads/second because of two artifacts of our experiment: there is extra capacity in the system, and operations are queued while the leader is unavailable. As a result, the throughput of the system rises before leveling off again at its steady-state rate.\n",
+ "\n",
+ "We can also see the effect of the fact that Paxos leader leases are set to 10 seconds. When we kill the zone, the leader-lease expiration times for the groups should be evenly distributed over the next 10 seconds. Soon after each lease from a dead leader expires, a new leader is elected. Approximately 10 seconds after the kill time, all of the groups have leaders and throughput has recovered. Shorter lease times would reduce the effect of server deaths on availability, but would require greater amounts of lease-renewal network traffic. We are in the process of designing and implementing a mechanism that will cause slaves to release Paxos leader leases upon leader failure.\n",
+ "\n",
+ "5.3 TrueTime\n",
+ "\n",
+ "Two questions must be answered with respect to TrueTime: is ε truly a bound on clock uncertainty, and how bad does ε get? For the former, the most serious problem would be if a local clock’s drift were greater than 200us/sec: that would break assumptions made by TrueTime. Our machine statistics show that bad CPUs are 6 times more likely than bad clocks. That is, clock issues are extremely infrequent, relative to much more serious hardware problems. As a result, we believe that TrueTime’s implementation is as trustworthy as any other piece of software upon which Spanner depends.\n",
+ "\n",
+ "![<@mask_p0_e1_figure(timeout=1h)>](https://anyparser-realtime-test-j-assetsconstructfilebucke-2wg0ln280yvz.s3.amazonaws.com/result_parse_with_layout/async_S4iyw7RAEE8CTGkVgHYeI8nsTmSALI1U2HXvAN6j/2024/10/30/test_1figure_1table_215dd6ed-92a2-4636-8dc0-5636c689bf4b.png/%3C%40mask_p0_e1_figure_s3%3E.png?AWSAccessKeyId=ASIAXM24X76XBW2GFBFU&Signature=FyIrRoyyRfiKirQbeuVzNzXAowQ%3D&x-amz-security-token=IQoJb3JpZ2luX2VjEPT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLXdlc3QtMiJHMEUCIQCRUsV%2FHrEppoMWhVoou%2Ft2j%2FrgGV61zgYoeuLwFbKLcAIgKfL4H8nBkzZt2QWR0vm8ZjbKGJclV64yFzh%2FhHChK7Mq0wMIbRAAGgw1MDg2MTEyNjQ0MzAiDDPz9lhEp9Z9OwEe6SqwA6bt%2BuJ8wJD2UQ2OJpUYVwzjhPPKupLRyj0QWt6zNTg%2F%2BKYcLg6mx7s47rmNFpVAjr28CFcX8DxU9DvgILucIPPs2FCvxMoGpXGmrerdRONK1hbSWiEsaiVd7%2B4AW27R5omTB%2F1%2Fk84WUBZRuWPCGn8dZPqAZaBM4dSyUMBjnoTKBmaytwbxTOLQ9mvil%2Fj7JjZaeOtuBWGN8M8GPikXwTtXiJDFqofsWn8hRzMsMYRrueamEn%2BRZ07l9FIie4VRYNYuQDE5DkVI5misl9lpXY1OawATogpCj8SBI%2BwqZUpVe89qj67cMCKH78vAdBqbteNy14M36DIhdiRrVOXzoyibQW4I4jp9wtDQdSbPjS9RqRC36CLfseN%2BVzjWzYxhGoI2sPF5NMPHVGPMSq6VcCA3G%2FW85FLER6XRKCdM03%2FLrt1G7ocowIsbVHE%2BP%2F0jh5Bg8ZQyd%2FUb0yS%2FJQMbQzM72qZOP8nMfl4eytljo4vMDwwP9485uPqM1wcHGrMtk48vqlgcM9Br5cQtikV3q4Q8O3ND9G5xmICPO4LhfE%2B9XeKkXdekRBQ7qTj6ub%2BxEDDS2oa5BjqeAXJbS7XvEc73%2BsWZwamhZLO5Nia3uJDJY5qGjDGEieB%2FhtpLyszAJGHLkJKSaGBM8kwyiPGybwidi7vFcGBZ08CBk7RXVQFpFWmSKtRfbVqOCubcsmgfTUjb7UOfRz3swIvEt3kjazXLowhvy9U1%2B3ISQohEe80zKSJHbc0nE75%2F%2BP97Cl6%2FAVsFqKMZPLcsDrooFR3M5WBPUqebcJQ%2F&Expires=1730264238)\n",
+ "\n",
+ "Figure 5: Effect of killing servers on throughput.\n"
+ ],
+ "text/plain": [
+ ""
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "display(Markdown(markdown_output))"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "any",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.10"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/examples/sample_data/test_1figure_1table.png b/examples/sample_data/test_1figure_1table.png
new file mode 100644
index 0000000..039c8d1
Binary files /dev/null and b/examples/sample_data/test_1figure_1table.png differ