Skip to content

Commit 698a786

Browse files
committed
quiet downloading of data and make setup executable
1 parent e540a7f commit 698a786

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

_launcher/setup.sh

100644100755
Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ Rscript -e 'install.packages(c("bit64","rmarkdown","data.table","rpivotTable","f
2222
Rscript -e 'sapply(c("bit64","rmarkdown","data.table","rpivotTable","formattable","lattice"), requireNamespace)'
2323

2424
# install duckdb for unpacking data
25-
curl --fail --location --progress-bar --output duckdb_cli-linux-amd64.zip https://github.com/duckdb/duckdb/releases/download/v1.2.0/duckdb_cli-linux-amd64.zip && unzip duckdb_cli-linux-amd64.zip
26-
sudo mv duckdb /usr/local/bin/
25+
curl --fail --location --progress-bar --output duckdb_cli-linux-amd64.zip https://github.com/duckdb/duckdb/releases/download/v1.2.0/duckdb_cli-linux-amd64.zip
26+
sudo unzip duckdb_cli-linux-amd64.zip -d /usr/local/bin
2727

2828

2929
# install aws client to download benchmark data

_run/run_large.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
# download and expand large data
22

33
# get groupby large (50GB datasets)
4-
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/groupby_large.duckdb data/groupby_large.duckdb --no-sign-request
4+
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/groupby_large.duckdb data/groupby_large.duckdb --no-sign-request --quiet
55
# get join small (50GB datasets)
6-
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/join_large.duckdb data/join_large.duckdb --no-sign-request
6+
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/join_large.duckdb data/join_large.duckdb --no-sign-request --quiet
77

88

99
# expand groupby-large datasets to csv
@@ -31,4 +31,4 @@ echo "Running all solutions on large (50GB) datasets"
3131
echo "done..."
3232
echo "removing data files"
3333
rm data/*.csv
34-
rm data/*.duckdb
34+
rm data/*.duckdb

_run/run_small_medium.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
# first download and expand small data
22

33
# get groupby small (0.5GB and 5GB datasets)
4-
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/groupby_small.duckdb data/groupby_small.duckdb --no-sign-request
4+
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/groupby_small.duckdb data/groupby_small.duckdb --no-sign-request --quiet
55
# get join small (0.5GB and 5GB datasets)
6-
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/join_small.duckdb data/join_small.duckdb --no-sign-request
6+
aws s3 cp s3://duckdb-blobs/data/db-benchmark-data/join_small.duckdb data/join_small.duckdb --no-sign-request --quiet
77

88

99
# expand groupby-small datasets to csv

0 commit comments

Comments
 (0)