Skip to content

Commit cc7add6

Browse files
committed
add assert for total_rows
1 parent 00f203e commit cc7add6

File tree

4 files changed

+13
-6
lines changed

4 files changed

+13
-6
lines changed

tests/benchmark/read_gbq_colab/aggregate_output.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
import pathlib
15+
import typing
1516

1617
import benchmark.utils as utils
1718

@@ -27,6 +28,7 @@ def aggregate_output(*, project_id, dataset_id, table_id):
2728

2829
# Simulate getting the first page, since we'll always do that first in the UI.
2930
batches = df.to_pandas_batches(page_size=PAGE_SIZE)
31+
assert typing.cast(typing.Any, batches).total_rows >= 0
3032
next(iter(batches))
3133

3234
# To simulate very small rows that can only fit a boolean,

tests/benchmark/read_gbq_colab/filter_output.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,11 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
import pathlib
15+
import typing
1516

1617
import benchmark.utils as utils
1718

19+
import bigframes.core.blocks
1820
import bigframes.pandas as bpd
1921

2022
PAGE_SIZE = utils.READ_GBQ_COLAB_PAGE_SIZE
@@ -37,13 +39,14 @@ def filter_output(
3739
# Simulate the user filtering by a column and visualizing those results
3840
df_filtered = df[df["col_bool_0"]]
3941
batches_filtered = df_filtered.to_pandas_batches(page_size=PAGE_SIZE)
40-
assert batches_filtered.total_rows >= 0
41-
42+
batches_filtered = typing.cast(
43+
bigframes.core.blocks.PandasBatches, batches_filtered
44+
)
45+
rows = batches_filtered.total_rows
46+
assert rows >= 0
4247
# It's possible we don't have any pages at all, since we filtered out all
4348
# matching rows.
4449
first_page = next(iter(batches_filtered))
45-
rows = batches_filtered.total_rows
46-
assert rows is not None
4750
assert len(first_page.index) <= rows
4851

4952

tests/benchmark/read_gbq_colab/first_page.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,9 @@ def first_page(*, project_id, dataset_id, table_id):
3030

3131
# Get number of rows (to calculate number of pages) and the first page.
3232
batches = df.to_pandas_batches(page_size=PAGE_SIZE)
33+
assert typing.cast(typing.Any, batches).total_rows >= 0
3334
first_page = next(iter(batches))
3435
assert first_page is not None
35-
total_rows = typing.cast(typing.Any, batches).total_rows
36-
assert total_rows is not None
3736

3837

3938
if __name__ == "__main__":

tests/benchmark/read_gbq_colab/sort_output.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414
import pathlib
15+
import typing
1516

1617
import benchmark.utils as utils
1718

@@ -29,6 +30,7 @@ def sort_output(*, project_id, dataset_id, table_id):
2930

3031
# Simulate getting the first page, since we'll always do that first in the UI.
3132
batches = df.to_pandas_batches(page_size=PAGE_SIZE)
33+
assert typing.cast(typing.Any, batches).total_rows >= 0
3234
next(iter(batches))
3335

3436
# Simulate the user sorting by a column and visualizing those results
@@ -38,6 +40,7 @@ def sort_output(*, project_id, dataset_id, table_id):
3840

3941
df_sorted = df.sort_values(sort_column)
4042
batches_sorted = df_sorted.to_pandas_batches(page_size=PAGE_SIZE)
43+
assert typing.cast(typing.Any, batches_sorted).total_rows >= 0
4144
next(iter(batches_sorted))
4245

4346

0 commit comments

Comments
 (0)