|
43 | 43 | }, |
44 | 44 | { |
45 | 45 | "cell_type": "code", |
46 | | - "execution_count": 1, |
47 | 46 | "metadata": { |
48 | 47 | "ExecuteTime": { |
49 | | - "end_time": "2025-02-10T11:24:17.543284Z", |
50 | | - "start_time": "2025-02-10T11:24:17.415355Z" |
| 48 | + "end_time": "2025-03-10T16:08:44.721334Z", |
| 49 | + "start_time": "2025-03-10T16:08:44.603495Z" |
51 | 50 | } |
52 | 51 | }, |
53 | | - "outputs": [], |
54 | 52 | "source": [ |
55 | 53 | "import os\n", |
56 | 54 | "from getpass import getpass\n", |
|
65 | 63 | " os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n", |
66 | 64 | "\n", |
67 | 65 | "nest_asyncio.apply()" |
68 | | - ] |
| 66 | + ], |
| 67 | + "outputs": [], |
| 68 | + "execution_count": 1 |
69 | 69 | }, |
70 | 70 | { |
71 | 71 | "cell_type": "markdown", |
|
77 | 77 | }, |
78 | 78 | { |
79 | 79 | "cell_type": "code", |
80 | | - "execution_count": 2, |
81 | 80 | "metadata": { |
82 | 81 | "ExecuteTime": { |
83 | | - "end_time": "2025-02-10T11:24:18.899123Z", |
84 | | - "start_time": "2025-02-10T11:24:18.857755Z" |
| 82 | + "end_time": "2025-03-10T16:08:46.797620Z", |
| 83 | + "start_time": "2025-03-10T16:08:46.685305Z" |
85 | 84 | } |
86 | 85 | }, |
87 | | - "outputs": [], |
88 | 86 | "source": [ |
89 | 87 | "from getpass import getpass\n", |
90 | 88 | "\n", |
|
94 | 92 | " os.environ[\"OPENAI_API_KEY\"] = getpass(\"OpenAI API key =\")\n", |
95 | 93 | "\n", |
96 | 94 | "nest_asyncio.apply()" |
97 | | - ] |
| 95 | + ], |
| 96 | + "outputs": [], |
| 97 | + "execution_count": 2 |
98 | 98 | }, |
99 | 99 | { |
100 | 100 | "cell_type": "markdown", |
|
106 | 106 | }, |
107 | 107 | { |
108 | 108 | "cell_type": "code", |
109 | | - "execution_count": 3, |
110 | 109 | "metadata": { |
111 | 110 | "ExecuteTime": { |
112 | | - "end_time": "2025-02-10T11:24:20.152622Z", |
113 | | - "start_time": "2025-02-10T11:24:20.145640Z" |
| 111 | + "end_time": "2025-03-10T16:08:48.610751Z", |
| 112 | + "start_time": "2025-03-10T16:08:48.604079Z" |
114 | 113 | } |
115 | 114 | }, |
116 | | - "outputs": [], |
117 | 115 | "source": [ |
118 | 116 | "# os.environ[\"LANGSMITH_API_KEY\"] = getpass.getpass(\"Enter your LangSmith API key: \")\n", |
119 | 117 | "# os.environ[\"LANGSMITH_TRACING\"] = \"true\"" |
120 | | - ] |
| 118 | + ], |
| 119 | + "outputs": [], |
| 120 | + "execution_count": 3 |
121 | 121 | }, |
122 | 122 | { |
123 | 123 | "cell_type": "markdown", |
|
130 | 130 | }, |
131 | 131 | { |
132 | 132 | "cell_type": "code", |
133 | | - "execution_count": 4, |
134 | 133 | "metadata": { |
135 | 134 | "ExecuteTime": { |
136 | | - "end_time": "2025-02-10T11:24:23.723647Z", |
137 | | - "start_time": "2025-02-10T11:24:21.394870Z" |
| 135 | + "end_time": "2025-03-10T16:30:21.902215Z", |
| 136 | + "start_time": "2025-03-10T16:30:19.755014Z" |
138 | 137 | } |
139 | 138 | }, |
| 139 | + "source": [ |
| 140 | + "%pip install -qU langchain_community py-zerox" |
| 141 | + ], |
140 | 142 | "outputs": [ |
141 | 143 | { |
142 | 144 | "name": "stdout", |
|
146 | 148 | ] |
147 | 149 | } |
148 | 150 | ], |
149 | | - "source": [ |
150 | | - "%pip install -qU langchain_community py-zerox" |
151 | | - ] |
| 151 | + "execution_count": 4 |
152 | 152 | }, |
153 | 153 | { |
154 | 154 | "cell_type": "markdown", |
|
160 | 160 | ] |
161 | 161 | }, |
162 | 162 | { |
163 | | - "cell_type": "code", |
164 | | - "execution_count": 5, |
165 | 163 | "metadata": { |
166 | 164 | "ExecuteTime": { |
167 | | - "end_time": "2025-02-10T11:24:25.819517Z", |
168 | | - "start_time": "2025-02-10T11:24:25.091340Z" |
| 165 | + "end_time": "2025-03-10T16:30:23.336449Z", |
| 166 | + "start_time": "2025-03-10T16:30:23.328747Z" |
169 | 167 | } |
170 | 168 | }, |
| 169 | + "cell_type": "code", |
| 170 | + "source": [ |
| 171 | + "import asyncio\n", |
| 172 | + "import nest_asyncio\n", |
| 173 | + "nest_asyncio.apply()" |
| 174 | + ], |
171 | 175 | "outputs": [], |
| 176 | + "execution_count": 5 |
| 177 | + }, |
| 178 | + { |
| 179 | + "metadata": { |
| 180 | + "ExecuteTime": { |
| 181 | + "end_time": "2025-03-10T16:30:25.533375Z", |
| 182 | + "start_time": "2025-03-10T16:30:25.095083Z" |
| 183 | + } |
| 184 | + }, |
| 185 | + "cell_type": "code", |
| 186 | + "source": [ |
| 187 | + "def run_in_thread():\n", |
| 188 | + " loop = asyncio.get_event_loop() # Créer une nouvelle boucle d'événements pour ce thread\n", |
| 189 | + " result = loop.run_until_complete(toto())\n", |
| 190 | + " return result\n", |
| 191 | + "\n", |
| 192 | + "from multiprocessing.pool import ThreadPool\n", |
| 193 | + "pool = ThreadPool(processes=1)\n", |
| 194 | + "async_result = pool.apply_async(run_in_thread) # tuple of args for foo\n", |
| 195 | + "result=async_result.get()\n", |
| 196 | + "print(result)\n" |
| 197 | + ], |
| 198 | + "outputs": [ |
| 199 | + { |
| 200 | + "ename": "NameError", |
| 201 | + "evalue": "name 'toto' is not defined", |
| 202 | + "output_type": "error", |
| 203 | + "traceback": [ |
| 204 | + "\u001B[31m---------------------------------------------------------------------------\u001B[39m", |
| 205 | + "\u001B[31mNameError\u001B[39m Traceback (most recent call last)", |
| 206 | + "\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[6]\u001B[39m\u001B[32m, line 9\u001B[39m\n\u001B[32m 7\u001B[39m pool = ThreadPool(processes=\u001B[32m1\u001B[39m)\n\u001B[32m 8\u001B[39m async_result = pool.apply_async(run_in_thread) \u001B[38;5;66;03m# tuple of args for foo\u001B[39;00m\n\u001B[32m----> \u001B[39m\u001B[32m9\u001B[39m result=\u001B[43masync_result\u001B[49m\u001B[43m.\u001B[49m\u001B[43mget\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 10\u001B[39m \u001B[38;5;28mprint\u001B[39m(result)\n", |
| 207 | + "\u001B[36mFile \u001B[39m\u001B[32m~/miniconda3/lib/python3.12/multiprocessing/pool.py:774\u001B[39m, in \u001B[36mApplyResult.get\u001B[39m\u001B[34m(self, timeout)\u001B[39m\n\u001B[32m 772\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m._value\n\u001B[32m 773\u001B[39m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m774\u001B[39m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;28mself\u001B[39m._value\n", |
| 208 | + "\u001B[36mFile \u001B[39m\u001B[32m~/miniconda3/lib/python3.12/multiprocessing/pool.py:125\u001B[39m, in \u001B[36mworker\u001B[39m\u001B[34m(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)\u001B[39m\n\u001B[32m 123\u001B[39m job, i, func, args, kwds = task\n\u001B[32m 124\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m125\u001B[39m result = (\u001B[38;5;28;01mTrue\u001B[39;00m, \u001B[43mfunc\u001B[49m\u001B[43m(\u001B[49m\u001B[43m*\u001B[49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwds\u001B[49m\u001B[43m)\u001B[49m)\n\u001B[32m 126\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[32m 127\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m wrap_exception \u001B[38;5;129;01mand\u001B[39;00m func \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m _helper_reraises_exception:\n", |
| 209 | + "\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[6]\u001B[39m\u001B[32m, line 3\u001B[39m, in \u001B[36mrun_in_thread\u001B[39m\u001B[34m()\u001B[39m\n\u001B[32m 1\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34mrun_in_thread\u001B[39m():\n\u001B[32m 2\u001B[39m loop = asyncio.get_event_loop() \u001B[38;5;66;03m# Créer une nouvelle boucle d'événements pour ce thread\u001B[39;00m\n\u001B[32m----> \u001B[39m\u001B[32m3\u001B[39m result = loop.run_until_complete(\u001B[43mtoto\u001B[49m())\n\u001B[32m 4\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m result\n", |
| 210 | + "\u001B[31mNameError\u001B[39m: name 'toto' is not defined" |
| 211 | + ] |
| 212 | + } |
| 213 | + ], |
| 214 | + "execution_count": 6 |
| 215 | + }, |
| 216 | + { |
| 217 | + "metadata": { |
| 218 | + "ExecuteTime": { |
| 219 | + "end_time": "2025-03-10T16:05:43.143734Z", |
| 220 | + "start_time": "2025-03-10T16:05:43.111814Z" |
| 221 | + } |
| 222 | + }, |
| 223 | + "cell_type": "code", |
172 | 224 | "source": [ |
173 | | - "from langchain_community.document_loaders import ZeroxPDFLoader\n", |
| 225 | + "loop = asyncio.get_running_loop()\n", |
| 226 | + "\n", |
| 227 | + "def _run_in_thread(loop):\n", |
| 228 | + " loop = asyncio.get_event_loop() # Créer une nouvelle boucle d'événements pour ce thread\n", |
| 229 | + " result = loop.run_until_complete(toto())\n", |
| 230 | + " return result\n", |
| 231 | + "\n", |
| 232 | + "from multiprocessing.pool import ThreadPool\n", |
| 233 | + "pool = ThreadPool(processes=1)\n", |
| 234 | + "async_result = pool.apply_async(_run_in_thread,(loop,)) # tuple of args for foo\n", |
| 235 | + "result = async_result.get()\n", |
| 236 | + "print(result)" |
| 237 | + ], |
| 238 | + "outputs": [ |
| 239 | + { |
| 240 | + "name": "stdout", |
| 241 | + "output_type": "stream", |
| 242 | + "text": [ |
| 243 | + "toto\n", |
| 244 | + "hello\n" |
| 245 | + ] |
| 246 | + } |
| 247 | + ], |
| 248 | + "execution_count": 26 |
| 249 | + }, |
| 250 | + { |
| 251 | + "cell_type": "code", |
| 252 | + "metadata": { |
| 253 | + "ExecuteTime": { |
| 254 | + "end_time": "2025-03-10T16:30:30.217668Z", |
| 255 | + "start_time": "2025-03-10T16:30:29.274838Z" |
| 256 | + } |
| 257 | + }, |
| 258 | + "source": [ |
| 259 | + "from langchain_community.document_loaders.pdf import ZeroxPDFLoader\n", |
174 | 260 | "\n", |
175 | 261 | "file_path = \"./example_data/layout-parser-paper.pdf\"\n", |
176 | 262 | "loader = ZeroxPDFLoader(file_path)" |
177 | | - ] |
| 263 | + ], |
| 264 | + "outputs": [], |
| 265 | + "execution_count": 7 |
178 | 266 | }, |
179 | 267 | { |
180 | 268 | "cell_type": "markdown", |
|
185 | 273 | }, |
186 | 274 | { |
187 | 275 | "cell_type": "code", |
188 | | - "execution_count": 6, |
189 | 276 | "metadata": { |
190 | 277 | "ExecuteTime": { |
191 | | - "end_time": "2025-02-10T11:27:17.781571Z", |
192 | | - "start_time": "2025-02-10T11:24:27.817480Z" |
| 278 | + "end_time": "2025-03-10T16:31:07.654904Z", |
| 279 | + "start_time": "2025-03-10T16:30:33.071884Z" |
193 | 280 | } |
194 | 281 | }, |
| 282 | + "source": [ |
| 283 | + "docs = loader.load()\n", |
| 284 | + "docs[0]" |
| 285 | + ], |
195 | 286 | "outputs": [ |
| 287 | + { |
| 288 | + "name": "stderr", |
| 289 | + "output_type": "stream", |
| 290 | + "text": [ |
| 291 | + "/home/pprados/workspace.bda/langchain/libs/community/.venv/lib/python3.12/site-packages/pyzerox/models/modellitellm.py:52: UserWarning: \n", |
| 292 | + " Custom system prompt was provided which overrides the default system prompt. We assume that you know what you are doing. \n", |
| 293 | + " . Default prompt for zerox is:\n", |
| 294 | + " \n", |
| 295 | + " Convert the following PDF page to markdown.\n", |
| 296 | + " Return only the markdown with no explanation text.\n", |
| 297 | + " Do not exclude any content from the page.\n", |
| 298 | + " \n", |
| 299 | + " warnings.warn(f\"{Messages.CUSTOM_SYSTEM_PROMPT_WARNING}. Default prompt for zerox is:\\n {DEFAULT_SYSTEM_PROMPT}\")\n" |
| 300 | + ] |
| 301 | + }, |
196 | 302 | { |
197 | 303 | "data": { |
198 | 304 | "text/plain": [ |
199 | | - "Document(metadata={'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': 'False', 'total_pages': 16, 'source': './example_data/layout-parser-paper.pdf', 'num_pages': 16, 'page': 0}, page_content='# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\\n\\nZejian Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\\n\\n1 Allen Institute for AI \\ [email protected] \\n2 Brown University \\ [email protected] \\n3 Harvard University \\n{melissade11, jacob.carlson}@fas.harvard.edu \\n4 University of Washington \\nbgclgs.washington.edu \\n5 University of Waterloo \\ [email protected] \\n\\n## Abstract\\n\\nRecent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io\\n\\n**Keywords:** Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit.\\n\\n## 1 Introduction\\n\\nDeep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11]')" |
| 305 | + "Document(metadata={'producer': 'pdfTeX-1.40.21', 'creator': 'LaTeX with hyperref', 'creationdate': '2021-06-22T01:27:10+00:00', 'author': '', 'keywords': '', 'moddate': '2021-06-22T01:27:10+00:00', 'ptex.fullbanner': 'This is pdfTeX, Version 3.14159265-2.6-1.40.21 (TeX Live 2020) kpathsea version 6.3.2', 'subject': '', 'title': '', 'trapped': 'False', 'total_pages': 16, 'source': './example_data/layout-parser-paper.pdf', 'num_pages': 16, 'page': 0}, page_content='# LayoutParser: A Unified Toolkit for Deep Learning Based Document Image Analysis\\n\\nZejian Shen¹, Ruochen Zhang², Melissa Dell³, Benjamin Charles Germain Lee⁴, Jacob Carlson³, and Weining Li⁵\\n\\n¹ Allen Institute for AI \\ [email protected] \\n² Brown University \\ [email protected] \\n³ Harvard University \\n{melissadell, jacob.carlson}@fas.harvard.edu \\n⁴ University of Washington \\ [email protected] \\n⁵ University of Waterloo \\ [email protected] \\n\\n**Abstract.** Recent advances in document image analysis (DIA) have been primarily driven by the application of neural networks. Ideally, research outcomes could be easily deployed in production and extended for further investigation. However, various factors like loosely organized codebases and sophisticated model configurations complicate the easy reuse of important innovations by a wide audience. Though there have been on-going efforts to improve reusability and simplify deep learning (DL) model development in disciplines like natural language processing and computer vision, none of them are optimized for challenges in the domain of DIA. This represents a major gap in the existing toolkit, as DIA is central to academic research across a wide range of disciplines in the social sciences and humanities. This paper introduces LayoutParser, an open-source library for streamlining the usage of DL in DIA research and applications. The core LayoutParser library comes with a set of simple and intuitive interfaces for applying and customizing DL models for layout detection, character recognition, and many other document processing tasks. To promote extensibility, LayoutParser also incorporates a community platform for sharing both pre-trained models and full document digitization pipelines. We demonstrate that LayoutParser is helpful for both lightweight and large-scale digitization pipelines in real-world use cases. The library is publicly available at https://layout-parser.github.io \\n\\n**Keywords:** Document Image Analysis · Deep Learning · Layout Analysis · Character Recognition · Open Source library · Toolkit. \\n\\n# 1 Introduction\\n\\nDeep Learning (DL)-based approaches are the state-of-the-art for a wide range of document image analysis (DIA) tasks including document image classification [11].')" |
200 | 306 | ] |
201 | 307 | }, |
202 | | - "execution_count": 6, |
| 308 | + "execution_count": 8, |
203 | 309 | "metadata": {}, |
204 | 310 | "output_type": "execute_result" |
205 | 311 | } |
206 | 312 | ], |
207 | | - "source": [ |
208 | | - "docs = loader.load()\n", |
209 | | - "docs[0]" |
210 | | - ] |
| 313 | + "execution_count": 8 |
211 | 314 | }, |
212 | 315 | { |
213 | 316 | "cell_type": "code", |
214 | | - "execution_count": 7, |
215 | 317 | "metadata": { |
216 | 318 | "ExecuteTime": { |
217 | | - "end_time": "2025-02-10T11:27:51.893033Z", |
218 | | - "start_time": "2025-02-10T11:27:51.889072Z" |
| 319 | + "end_time": "2025-03-10T16:31:58.192569Z", |
| 320 | + "start_time": "2025-03-10T16:31:58.182464Z" |
219 | 321 | } |
220 | 322 | }, |
| 323 | + "source": [ |
| 324 | + "import pprint\n", |
| 325 | + "\n", |
| 326 | + "pprint.pp(docs[0].metadata)" |
| 327 | + ], |
221 | 328 | "outputs": [ |
222 | 329 | { |
223 | 330 | "name": "stdout", |
|
241 | 348 | ] |
242 | 349 | } |
243 | 350 | ], |
244 | | - "source": [ |
245 | | - "import pprint\n", |
246 | | - "\n", |
247 | | - "pprint.pp(docs[0].metadata)" |
248 | | - ] |
| 351 | + "execution_count": 9 |
249 | 352 | }, |
250 | 353 | { |
251 | 354 | "cell_type": "markdown", |
|
256 | 359 | }, |
257 | 360 | { |
258 | 361 | "cell_type": "code", |
259 | | - "execution_count": 8, |
260 | 362 | "metadata": { |
| 363 | + "jupyter": { |
| 364 | + "is_executing": true |
| 365 | + }, |
261 | 366 | "ExecuteTime": { |
262 | | - "end_time": "2025-02-10T11:28:44.103420Z", |
263 | | - "start_time": "2025-02-10T11:28:05.933389Z" |
| 367 | + "start_time": "2025-03-10T16:33:11.308283Z" |
264 | 368 | } |
265 | 369 | }, |
266 | | - "outputs": [ |
267 | | - { |
268 | | - "data": { |
269 | | - "text/plain": [ |
270 | | - "6" |
271 | | - ] |
272 | | - }, |
273 | | - "execution_count": 8, |
274 | | - "metadata": {}, |
275 | | - "output_type": "execute_result" |
276 | | - } |
277 | | - ], |
278 | 370 | "source": [ |
279 | 371 | "pages = []\n", |
280 | 372 | "for doc in loader.lazy_load():\n", |
|
285 | 377 | "\n", |
286 | 378 | " pages = []\n", |
287 | 379 | "len(pages)" |
288 | | - ] |
| 380 | + ], |
| 381 | + "outputs": [ |
| 382 | + { |
| 383 | + "name": "stderr", |
| 384 | + "output_type": "stream", |
| 385 | + "text": [ |
| 386 | + "/home/pprados/workspace.bda/langchain/libs/community/.venv/lib/python3.12/site-packages/pyzerox/models/modellitellm.py:52: UserWarning: \n", |
| 387 | + " Custom system prompt was provided which overrides the default system prompt. We assume that you know what you are doing. \n", |
| 388 | + " . Default prompt for zerox is:\n", |
| 389 | + " \n", |
| 390 | + " Convert the following PDF page to markdown.\n", |
| 391 | + " Return only the markdown with no explanation text.\n", |
| 392 | + " Do not exclude any content from the page.\n", |
| 393 | + " \n", |
| 394 | + " warnings.warn(f\"{Messages.CUSTOM_SYSTEM_PROMPT_WARNING}. Default prompt for zerox is:\\n {DEFAULT_SYSTEM_PROMPT}\")\n" |
| 395 | + ] |
| 396 | + } |
| 397 | + ], |
| 398 | + "execution_count": null |
289 | 399 | }, |
290 | 400 | { |
291 | 401 | "cell_type": "code", |
|
0 commit comments