Skip to content

Commit 100367c

Browse files
committed
Address lack of support for office formats
1 parent 6fac970 commit 100367c

File tree

3 files changed

+74
-10
lines changed

3 files changed

+74
-10
lines changed

app/backend/prepdocslib/pdfparser.py

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
)
1515
from azure.core.credentials import AzureKeyCredential
1616
from azure.core.credentials_async import AsyncTokenCredential
17+
from azure.core.exceptions import HttpResponseError
1718
from PIL import Image
1819
from pypdf import PdfReader
1920

@@ -68,6 +69,7 @@ async def parse(self, content: IO) -> AsyncGenerator[Page, None]:
6869
async with DocumentIntelligenceClient(
6970
endpoint=self.endpoint, credential=self.credential
7071
) as document_intelligence_client:
72+
file_analyzed = False
7173
if self.use_content_understanding:
7274
if self.content_understanding_endpoint is None:
7375
raise ValueError("Content Understanding is enabled but no endpoint was provided")
@@ -77,15 +79,29 @@ async def parse(self, content: IO) -> AsyncGenerator[Page, None]:
7779
)
7880
cu_describer = ContentUnderstandingDescriber(self.content_understanding_endpoint, self.credential)
7981
content_bytes = content.read()
80-
poller = await document_intelligence_client.begin_analyze_document(
81-
model_id="prebuilt-layout",
82-
analyze_request=AnalyzeDocumentRequest(bytes_source=content_bytes),
83-
output=["figures"],
84-
features=["ocrHighResolution"],
85-
output_content_format="markdown",
86-
)
87-
doc_for_pymupdf = pymupdf.open(stream=io.BytesIO(content_bytes))
88-
else:
82+
try:
83+
poller = await document_intelligence_client.begin_analyze_document(
84+
model_id="prebuilt-layout",
85+
analyze_request=AnalyzeDocumentRequest(bytes_source=content_bytes),
86+
output=["figures"],
87+
features=["ocrHighResolution"],
88+
output_content_format="markdown",
89+
)
90+
doc_for_pymupdf = pymupdf.open(stream=io.BytesIO(content_bytes))
91+
file_analyzed = True
92+
except HttpResponseError as e:
93+
content.seek(0)
94+
if e.error.code == "InvalidArgument":
95+
logger.warning(
96+
"This document type does not support media description. Proceeding with standard analysis."
97+
)
98+
else:
99+
logger.warning(
100+
"Unexpected error analyzing document for media description: %s. Proceeding with standard analysis.",
101+
e,
102+
)
103+
104+
if file_analyzed is False:
89105
poller = await document_intelligence_client.begin_analyze_document(
90106
model_id=self.model_id, analyze_request=content, content_type="application/octet-stream"
91107
)

docs/deploy_features.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,6 @@ By default, if your documents contain image-like figures, the data ingestion pro
163163
so users will not be able to ask questions about them.
164164

165165
You can optionably enable the description of media content using Azure Content Understanding. When enabled, the data ingestion process will send figures to Azure Content Understanding and replace the figure with the description in the indexed document.
166-
To learn more about this process and compare it to the gpt-4 vision integration, see [this guide](./data_ingestion.md#media-description).
167166

168167
To enable media description with Azure Content Understanding, run:
169168

@@ -175,6 +174,9 @@ If you have already run `azd up`, you will need to run `azd provision` to create
175174
If you have already indexed your documents and want to re-index them with the media descriptions,
176175
first [remove the existing documents](./data_ingestion.md#removing-documents) and then [re-ingest the data](./data_ingestion.md#indexing-additional-documents).
177176

177+
⚠️ This feature does not yet support DOCX, PPTX, or XLSX formats. If you have figures in those formats, they will be ignored.
178+
Convert them first to PDF or image formats to enable media description.
179+
178180
## Enabling client-side chat history
179181

180182
This feature allows users to view the chat history of their conversation, stored in the browser using [IndexedDB](https://developer.mozilla.org/docs/Web/API/IndexedDB_API). That means the chat history will be available only on the device where the chat was initiated. To enable browser-stored chat history, run:

tests/test_pdfparser.py

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
import pathlib
55
from unittest.mock import AsyncMock, MagicMock
66

7+
import azure.core.exceptions
78
import pymupdf
89
import pytest
910
from azure.ai.documentintelligence.aio import DocumentIntelligenceClient
@@ -308,3 +309,48 @@ async def mock_describe_image(self, image_bytes):
308309
pages[0].text
309310
== "# Simple Figure\n\nThis text is before the figure and NOT part of it.\n\n\n<figure><figcaption>Figure 1<br>Pie chart</figcaption></figure>\n\n\nThis is text after the figure that's not part of it."
310311
)
312+
313+
314+
@pytest.mark.asyncio
315+
async def test_parse_unsupportedformat(monkeypatch, caplog):
316+
mock_poller = MagicMock()
317+
318+
async def mock_begin_analyze_document(self, model_id, analyze_request, **kwargs):
319+
class MockErrorResponse:
320+
def __init__(self):
321+
self.reason = "InvalidArgument"
322+
self.status_code = 400
323+
self.error = {"code": "InvalidArgument"}
324+
325+
if kwargs.get("features") == ["ocrHighResolution"]:
326+
raise azure.core.exceptions.HttpResponseError(message="InvalidArgument", response=MockErrorResponse())
327+
else:
328+
return mock_poller
329+
330+
async def mock_poller_result():
331+
return AnalyzeResult(
332+
content="Page content",
333+
pages=[DocumentPage(page_number=1, spans=[DocumentSpan(offset=0, length=12)])],
334+
tables=[],
335+
figures=[],
336+
)
337+
338+
monkeypatch.setattr(DocumentIntelligenceClient, "begin_analyze_document", mock_begin_analyze_document)
339+
monkeypatch.setattr(mock_poller, "result", mock_poller_result)
340+
341+
parser = DocumentAnalysisParser(
342+
endpoint="https://example.com",
343+
credential=MockAzureCredential(),
344+
use_content_understanding=True,
345+
content_understanding_endpoint="https://example.com",
346+
)
347+
content = io.BytesIO(b"pdf content bytes")
348+
content.name = "test.docx"
349+
with caplog.at_level(logging.WARNING):
350+
pages = [page async for page in parser.parse(content)]
351+
assert "This document type does not support media description." in caplog.text
352+
353+
assert len(pages) == 1
354+
assert pages[0].page_num == 0
355+
assert pages[0].offset == 0
356+
assert pages[0].text == "Page content"

0 commit comments

Comments
 (0)