|
13 | 13 | from httpx import AsyncClient |
14 | 14 |
|
15 | 15 | from meilisearch_python_async._http_requests import HttpRequests |
| 16 | +from meilisearch_python_async._utils import is_pydantic_2, iso_to_date_time |
16 | 17 | from meilisearch_python_async.errors import InvalidDocumentError, MeilisearchError |
17 | 18 | from meilisearch_python_async.models.documents import DocumentsInfo |
18 | 19 | from meilisearch_python_async.models.index import IndexStats |
@@ -54,8 +55,8 @@ def __init__( |
54 | 55 | """ |
55 | 56 | self.uid = uid |
56 | 57 | self.primary_key = primary_key |
57 | | - self.created_at: datetime | None = _iso_to_date_time(created_at) |
58 | | - self.updated_at: datetime | None = _iso_to_date_time(updated_at) |
| 58 | + self.created_at: datetime | None = iso_to_date_time(created_at) |
| 59 | + self.updated_at: datetime | None = iso_to_date_time(updated_at) |
59 | 60 | self._base_url = "indexes/" |
60 | 61 | self._base_url_with_uid = f"{self._base_url}{self.uid}" |
61 | 62 | self._documents_url = f"{self._base_url_with_uid}/documents" |
@@ -175,10 +176,10 @@ async def fetch_info(self) -> Index: |
175 | 176 | self.primary_key = index_dict["primaryKey"] |
176 | 177 | loop = get_running_loop() |
177 | 178 | self.created_at = await loop.run_in_executor( |
178 | | - None, partial(_iso_to_date_time, index_dict["createdAt"]) |
| 179 | + None, partial(iso_to_date_time, index_dict["createdAt"]) |
179 | 180 | ) |
180 | 181 | self.updated_at = await loop.run_in_executor( |
181 | | - None, partial(_iso_to_date_time, index_dict["updatedAt"]) |
| 182 | + None, partial(iso_to_date_time, index_dict["updatedAt"]) |
182 | 183 | ) |
183 | 184 | return self |
184 | 185 |
|
@@ -1477,7 +1478,10 @@ async def update_settings(self, body: MeilisearchSettings) -> TaskInfo: |
1477 | 1478 | >>> index = client.index("movies") |
1478 | 1479 | >>> await index.update_settings(new_settings) |
1479 | 1480 | """ |
1480 | | - body_dict = {k: v for k, v in body.dict(by_alias=True).items() if v is not None} |
| 1481 | + if is_pydantic_2: |
| 1482 | + body_dict = {k: v for k, v in body.model_dump(by_alias=True).items() if v is not None} # type: ignore[attr-defined] |
| 1483 | + else: # pragma: no cover |
| 1484 | + body_dict = {k: v for k, v in body.dict(by_alias=True).items() if v is not None} # type: ignore[attr-defined] |
1481 | 1485 |
|
1482 | 1486 | url = f"{self._settings_url}" |
1483 | 1487 | response = await self._http_requests.patch(url, body_dict) |
@@ -2184,7 +2188,11 @@ async def update_typo_tolerance(self, typo_tolerance: TypoTolerance) -> TaskInfo |
2184 | 2188 | >>> await index.update_typo_tolerance() |
2185 | 2189 | """ |
2186 | 2190 | url = f"{self._settings_url}/typo-tolerance" |
2187 | | - response = await self._http_requests.patch(url, typo_tolerance.dict(by_alias=True)) |
| 2191 | + |
| 2192 | + if is_pydantic_2: |
| 2193 | + response = await self._http_requests.patch(url, typo_tolerance.model_dump(by_alias=True)) # type: ignore[attr-defined] |
| 2194 | + else: # pragma: no cover |
| 2195 | + response = await self._http_requests.patch(url, typo_tolerance.dict(by_alias=True)) # type: ignore[attr-defined] |
2188 | 2196 |
|
2189 | 2197 | return TaskInfo(**response.json()) |
2190 | 2198 |
|
@@ -2256,7 +2264,11 @@ async def update_faceting(self, faceting: Faceting) -> TaskInfo: |
2256 | 2264 | >>> await index.update_faceting(faceting=Faceting(max_values_per_facet=100)) |
2257 | 2265 | """ |
2258 | 2266 | url = f"{self._settings_url}/faceting" |
2259 | | - response = await self._http_requests.patch(url, faceting.dict(by_alias=True)) |
| 2267 | + |
| 2268 | + if is_pydantic_2: |
| 2269 | + response = await self._http_requests.patch(url, faceting.model_dump(by_alias=True)) # type: ignore[attr-defined] |
| 2270 | + else: # pragma: no cover |
| 2271 | + response = await self._http_requests.patch(url, faceting.dict(by_alias=True)) # type: ignore[attr-defined] |
2260 | 2272 |
|
2261 | 2273 | return TaskInfo(**response.json()) |
2262 | 2274 |
|
@@ -2329,7 +2341,11 @@ async def update_pagination(self, settings: Pagination) -> TaskInfo: |
2329 | 2341 | >>> await index.update_pagination(settings=Pagination(max_total_hits=123)) |
2330 | 2342 | """ |
2331 | 2343 | url = f"{self._settings_url}/pagination" |
2332 | | - response = await self._http_requests.patch(url, settings.dict(by_alias=True)) |
| 2344 | + |
| 2345 | + if is_pydantic_2: |
| 2346 | + response = await self._http_requests.patch(url, settings.model_dump(by_alias=True)) # type: ignore[attr-defined] |
| 2347 | + else: # pragma: no cover |
| 2348 | + response = await self._http_requests.patch(url, settings.dict(by_alias=True)) # type: ignore[attr-defined] |
2333 | 2349 |
|
2334 | 2350 | return TaskInfo(**response.json()) |
2335 | 2351 |
|
@@ -2375,27 +2391,6 @@ def _combine_documents(documents: list[list[Any]]) -> list[Any]: |
2375 | 2391 | return [x for y in documents for x in y] |
2376 | 2392 |
|
2377 | 2393 |
|
2378 | | -def _iso_to_date_time(iso_date: datetime | str | None) -> datetime | None: |
2379 | | - """Handle conversion of iso string to datetime. |
2380 | | -
|
2381 | | - The microseconds from Meilisearch are sometimes too long for python to convert so this |
2382 | | - strips off the last digits to shorten it when that happens. |
2383 | | - """ |
2384 | | - if not iso_date: |
2385 | | - return None |
2386 | | - |
2387 | | - if isinstance(iso_date, datetime): |
2388 | | - return iso_date |
2389 | | - |
2390 | | - try: |
2391 | | - return datetime.strptime(iso_date, "%Y-%m-%dT%H:%M:%S.%fZ") |
2392 | | - except ValueError: |
2393 | | - split = iso_date.split(".") |
2394 | | - reduce = len(split[1]) - 6 |
2395 | | - reduced = f"{split[0]}.{split[1][:-reduce]}Z" |
2396 | | - return datetime.strptime(reduced, "%Y-%m-%dT%H:%M:%S.%fZ") |
2397 | | - |
2398 | | - |
2399 | 2394 | async def _load_documents_from_file( |
2400 | 2395 | file_path: Path | str, |
2401 | 2396 | csv_delimiter: str | None = None, |
|
0 commit comments