diff --git a/README.md b/README.md index 08c26c4..f4fd2c8 100644 --- a/README.md +++ b/README.md @@ -99,8 +99,8 @@ Raised when there is an error backing up a table __Example__ ``` backup.on('error', function(data) { - console.log('Error backing up ' + data.tableName); - console.log(data.error); + console.log('Error backing up ' + data.table); + console.log(data.err); }); ``` @@ -110,7 +110,7 @@ Raised when the backup of a table is begining __Example__ ``` -backup.on('start-backup', function(tableName) { +backup.on('start-backup', function(tableName, startTime) { console.log('Starting to copy table ' + tableName); }); ``` @@ -121,7 +121,7 @@ Raised when the backup of a table is finished __Example__ ``` -backup.on('end-backup', function(tableName) { +backup.on('end-backup', function(tableName, duration) { console.log('Done copying table ' + tableName); }); ``` @@ -146,8 +146,8 @@ __Arguments__ * `tableName` - name of the table to backup * `backupPath` - (optional) the path to use for the backup. - The iterator is passed a `callback(err)` which must be called once it has - completed. If no error has occurred, the `callback` should be run without + The iterator is passed a `callback(err)` which must be called once it has + completed. If no error has occurred, the `callback` should be run without arguments or with an explicit `null` argument. * `callback(err)` - A callback which is called when the table has finished backing up, or an error occurs @@ -157,13 +157,13 @@ __Arguments__ ## Restore S3 backups back to Dynamo. -`dynamo-restore-from-s3` is a utility that restores backups in S3 back to dynamo. It streams data down from S3 and throttles the download speed to match the rate of batch writes to Dynamo. +`dynamo-restore-from-s3` is a utility that restores backups in S3 back to dynamo. It streams data down from S3 and throttles the download speed to match the rate of batch writes to Dynamo. It is suitable for restoring large tables without needing to write to disk or use a large amount of memory. Use it on an AWS EC2 instance for best results and to minimise network latency, this should yield restore speeds of around 15min per GB. Use `--overwrite` if the table already exists. Otherwise it will attempt to generate table on the fly. -Can be run as a command line script or as an npm module. +Can be run as a command line script or as an npm module. # Command line usage @@ -193,10 +193,10 @@ Can be run as a command line script or as an npm module. ``` # Restore over existing table (cmd.exe). - > node ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite + > node ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite # Restore over existing table (shell). - $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite + $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json --overwrite # Restore over existing table, 1000 concurrent requests. Stop if any batch fails 1000 times. $ ./bin/dynamo-restore-from-s3 -t acme-customers -c 1000 -s s3://my-backups/acme-customers.json --overwrite -sf @@ -204,23 +204,23 @@ Can be run as a command line script or as an npm module. # Restore over existing table, 1000 concurrent requests. When finished, set read capacity to 50 and write capacity to 10 (both needed). $ ./bin/dynamo-restore-from-s3 -t acme-customers -c 1000 -s s3://my-backups/acme-customers.json --overwrite --readcapacity 50 --writecapacity 10 - # Auto-generate table (determine PK from backup). + # Auto-generate table (determine PK from backup). $ ./bin/dynamo-restore-from-s3 -t acme-customers -s s3://my-backups/acme-customers.json # Auto-generate table with partition and sort key. - $ ./bin/dynamo-restore-from-s3 -t acme-orders -s s3://my-backups/acme-orders.json -pk customerId -sk createDate + $ ./bin/dynamo-restore-from-s3 -t acme-orders -s s3://my-backups/acme-orders.json -pk customerId -sk createDate # Auto-generate table, defined PK. Concurrency 2000 (~ 2GB backup). - $ ./bin/dynamo-restore-from-s3 -t acme-orders -pk orderId -c 2000 -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -pk orderId -c 2000 -s s3://my-backups/acme-orders.json # Auto-generate table. 2000 write units during restore. When finished set 50 write units and 100 write units (both needed). $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 2000 -s s3://my-backups/acme-orders.json --readcapacity 100 --writecapacity 50 # Auto-generate table. Concurrency 50 (10 MB backup or less). - $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -s s3://my-backups/acme-orders.json # Auto-generate table. Concurrency 50. Stop process if any batch fails 50 times. - $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -sf -s s3://my-backups/acme-orders.json + $ ./bin/dynamo-restore-from-s3 -t acme-orders -c 50 -sf -s s3://my-backups/acme-orders.json ``` @@ -313,7 +313,7 @@ __Example__ ``` restore.on('send-batch', function(batches, requests, streamMeta) { console.log('Batch Sent'); - console.log('Num cached batches: ', batches); + console.log('Num cached batches: ', batches); console.log('Num requests in flight: ', requests); console.log('Stream metadata:, JSON.stringify(streamMeta)); });