|
303 | 303 | """ |
304 | 304 | Helper: Similar to the `save_objects` method but requires a Push connector (https://www.algolia.com/doc/guides/sending-and-managing-data/send-and-update-your-data/connectors/push/) to be created first, in order to transform records before indexing them to Algolia. The `region` must've been passed to the client's config at instantiation. |
305 | 305 | """ |
306 | | - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_push(index_name=index_name, objects=objects, action=Action.ADDOBJECT, wait_for_tasks=wait_for_tasks, batch_size=batch_size, request_options=request_options) |
| 306 | + if self._ingestion_transporter is None: |
| 307 | + raise ValueError("`region` must be provided at client instantiation before calling this method.") |
| 308 | + return {{^isSyncClient}}await {{/isSyncClient}}self._ingestion_transporter.chunked_push(index_name=index_name, objects=objects, action=Action.ADDOBJECT, wait_for_tasks=wait_for_tasks, batch_size=batch_size, request_options=request_options) |
307 | 309 |
|
308 | 310 | {{^isSyncClient}}async {{/isSyncClient}}def delete_objects( |
309 | 311 | self, |
|
344 | 346 | """ |
345 | 347 | Helper: Similar to the `partial_update_objects` method but requires a Push connector (https://www.algolia.com/doc/guides/sending-and-managing-data/send-and-update-your-data/connectors/push/) to be created first, in order to transform records before indexing them to Algolia. The `region` must've been passed to the client instantiation method. |
346 | 348 | """ |
347 | | - return {{^isSyncClient}}await {{/isSyncClient}}self.chunked_push(index_name=index_name, objects=objects, action=Action.PARTIALUPDATEOBJECT if create_if_not_exists else Action.PARTIALUPDATEOBJECTNOCREATE, wait_for_tasks=wait_for_tasks, batch_size=batch_size, request_options=request_options) |
348 | | -
|
349 | | - {{^isSyncClient}}async {{/isSyncClient}}def chunked_push( |
350 | | - self, |
351 | | - index_name: str, |
352 | | - objects: List[Dict[str, Any]], |
353 | | - action: Action = Action.ADDOBJECT, |
354 | | - wait_for_tasks: bool = False, |
355 | | - batch_size: int = 1000, |
356 | | - reference_index_name: Optional[str] = None, |
357 | | - request_options: Optional[Union[dict, RequestOptions]] = None, |
358 | | - ) -> List[WatchResponse]: |
359 | | - """ |
360 | | - Helper: Chunks the given `objects` list in subset of 1000 elements max in order to make it fit in `push` requests by leveraging the Transformation pipeline setup in the Push connector (https://www.algolia.com/doc/guides/sending-and-managing-data/send-and-update-your-data/connectors/push/). |
361 | | - """ |
362 | | -
|
363 | 349 | if self._ingestion_transporter is None: |
364 | 350 | raise ValueError("`region` must be provided at client instantiation before calling this method.") |
365 | | - records: List[PushTaskRecords] = [] |
366 | | - responses: List[WatchResponse] = [] |
367 | | - for i, obj in enumerate(objects): |
368 | | - records.append(obj) # pyright: ignore |
369 | | - if len(records) == batch_size or i == len(objects) - 1: |
370 | | - responses.append( |
371 | | - {{^isSyncClient}}await {{/isSyncClient}}self._ingestion_transporter.push( |
372 | | - index_name=index_name, |
373 | | - push_task_payload={ |
374 | | - "action": action, |
375 | | - "records": records, |
376 | | - }, |
377 | | - reference_index_name=reference_index_name, |
378 | | - request_options=request_options, |
379 | | - ) |
380 | | - ) |
381 | | - requests = [] |
382 | | - if wait_for_tasks: |
383 | | - for response in responses: |
384 | | - {{^isSyncClient}}async {{/isSyncClient}}def _func(_: Optional[Event]) -> Event: |
385 | | - if self._ingestion_transporter is None: |
386 | | - raise ValueError( |
387 | | - "`region` must be provided at client instantiation before calling this method." |
388 | | - ) |
389 | | - if response.event_id is None: |
390 | | - raise ValueError( |
391 | | - "received unexpected response from the push endpoint, eventID must not be undefined" |
392 | | - ) |
393 | | - try: |
394 | | - return {{^isSyncClient}}await {{/isSyncClient}}self._ingestion_transporter.get_event(run_id=response.run_id, event_id=response.event_id, request_options=request_options) |
395 | | - except RequestException as e: |
396 | | - if e.status_code == 404: |
397 | | - return None # pyright: ignore |
398 | | - raise e |
399 | | -
|
400 | | - _retry_count = 0 |
401 | | -
|
402 | | - def _aggregator(_: Event | None) -> None: |
403 | | - nonlocal _retry_count |
404 | | - _retry_count += 1 |
405 | | -
|
406 | | - def _validate(_resp: Event | None) -> bool: |
407 | | - return _resp is not None |
408 | | -
|
409 | | - timeout = RetryTimeout() |
410 | | -
|
411 | | - {{^isSyncClient}}await {{/isSyncClient}}create_iterable{{#isSyncClient}}_sync{{/isSyncClient}}( |
412 | | - func=_func, |
413 | | - validate=_validate, |
414 | | - aggregator=_aggregator, |
415 | | - timeout=lambda: timeout(_retry_count), |
416 | | - error_validate=lambda _: _retry_count >= 50, |
417 | | - error_message=lambda _: f"The maximum number of retries exceeded. (${_retry_count}/${50})", |
418 | | - ) |
419 | | - return responses |
| 351 | + return {{^isSyncClient}}await {{/isSyncClient}}self._ingestion_transporter.chunked_push(index_name=index_name, objects=objects, action=Action.PARTIALUPDATEOBJECT if create_if_not_exists else Action.PARTIALUPDATEOBJECTNOCREATE, wait_for_tasks=wait_for_tasks, batch_size=batch_size, request_options=request_options) |
420 | 352 |
|
421 | 353 | {{^isSyncClient}}async {{/isSyncClient}}def chunked_batch( |
422 | 354 | self, |
|
0 commit comments