@@ -61,7 +61,7 @@ static void InitializeConsumers(py::class_<DuckDBPyRelation> &m) {
6161 py::arg (" date_as_object" ) = false )
6262 .def (" fetch_df_chunk" , &DuckDBPyRelation::FetchDFChunk, " Execute and fetch a chunk of the rows" ,
6363 py::arg (" vectors_per_chunk" ) = 1 , py::kw_only (), py::arg (" date_as_object" ) = false )
64- .def (" arrow" , &DuckDBPyRelation::ToArrowTable , " Execute and fetch all rows as an Arrow Table " ,
64+ .def (" arrow" , &DuckDBPyRelation::ToRecordBatch , " Execute and return an Arrow Record Batch Reader that yields all rows " ,
6565 py::arg (" batch_size" ) = 1000000 )
6666 .def (" fetch_arrow_table" , &DuckDBPyRelation::ToArrowTable, " Execute and fetch all rows as an Arrow Table" ,
6767 py::arg (" batch_size" ) = 1000000 )
@@ -78,7 +78,7 @@ static void InitializeConsumers(py::class_<DuckDBPyRelation> &m) {
7878 )" ;
7979 m.def (" __arrow_c_stream__" , &DuckDBPyRelation::ToArrowCapsule, capsule_docs,
8080 py::arg (" requested_schema" ) = py::none ());
81- m.def (" record_batch " , &DuckDBPyRelation::ToRecordBatch,
81+ m.def (" fetch_record_batch " , &DuckDBPyRelation::ToRecordBatch,
8282 " Execute and return an Arrow Record Batch Reader that yields all rows" , py::arg (" batch_size" ) = 1000000 )
8383 .def (" fetch_arrow_reader" , &DuckDBPyRelation::ToRecordBatch,
8484 " Execute and return an Arrow Record Batch Reader that yields all rows" , py::arg (" batch_size" ) = 1000000 );
0 commit comments