|
17 | 17 | package main
|
18 | 18 |
|
19 | 19 | import (
|
| 20 | + "bytes" |
20 | 21 | "errors"
|
21 | 22 | "fmt"
|
22 | 23 | "os"
|
| 24 | + "os/signal" |
23 | 25 | "path/filepath"
|
24 | 26 | "sort"
|
25 | 27 | "strconv"
|
| 28 | + "strings" |
| 29 | + "syscall" |
26 | 30 | "time"
|
27 | 31 |
|
28 | 32 | "github.com/ethereum/go-ethereum/cmd/utils"
|
@@ -63,6 +67,8 @@ Remove blockchain and state databases`,
|
63 | 67 | dbPutCmd,
|
64 | 68 | dbGetSlotsCmd,
|
65 | 69 | dbDumpFreezerIndex,
|
| 70 | + dbImportCmd, |
| 71 | + dbExportCmd, |
66 | 72 | },
|
67 | 73 | }
|
68 | 74 | dbInspectCmd = cli.Command{
|
@@ -188,6 +194,36 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
188 | 194 | },
|
189 | 195 | Description: "This command displays information about the freezer index.",
|
190 | 196 | }
|
| 197 | + dbImportCmd = cli.Command{ |
| 198 | + Action: utils.MigrateFlags(importLDBdata), |
| 199 | + Name: "import", |
| 200 | + Usage: "Imports leveldb-data from an exported RLP dump.", |
| 201 | + ArgsUsage: "<dumpfile> <start (optional)", |
| 202 | + Flags: []cli.Flag{ |
| 203 | + utils.DataDirFlag, |
| 204 | + utils.SyncModeFlag, |
| 205 | + utils.MainnetFlag, |
| 206 | + utils.RopstenFlag, |
| 207 | + utils.RinkebyFlag, |
| 208 | + utils.GoerliFlag, |
| 209 | + }, |
| 210 | + Description: "The import command imports the specific chain data from an RLP encoded stream.", |
| 211 | + } |
| 212 | + dbExportCmd = cli.Command{ |
| 213 | + Action: utils.MigrateFlags(exportChaindata), |
| 214 | + Name: "export", |
| 215 | + Usage: "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.", |
| 216 | + ArgsUsage: "<type> <dumpfile>", |
| 217 | + Flags: []cli.Flag{ |
| 218 | + utils.DataDirFlag, |
| 219 | + utils.SyncModeFlag, |
| 220 | + utils.MainnetFlag, |
| 221 | + utils.RopstenFlag, |
| 222 | + utils.RinkebyFlag, |
| 223 | + utils.GoerliFlag, |
| 224 | + }, |
| 225 | + Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.", |
| 226 | + } |
191 | 227 | )
|
192 | 228 |
|
193 | 229 | func removeDB(ctx *cli.Context) error {
|
@@ -510,3 +546,133 @@ func parseHexOrString(str string) ([]byte, error) {
|
510 | 546 | }
|
511 | 547 | return b, err
|
512 | 548 | }
|
| 549 | + |
| 550 | +func importLDBdata(ctx *cli.Context) error { |
| 551 | + start := 0 |
| 552 | + switch ctx.NArg() { |
| 553 | + case 1: |
| 554 | + break |
| 555 | + case 2: |
| 556 | + s, err := strconv.Atoi(ctx.Args().Get(1)) |
| 557 | + if err != nil { |
| 558 | + return fmt.Errorf("second arg must be an integer: %v", err) |
| 559 | + } |
| 560 | + start = s |
| 561 | + default: |
| 562 | + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) |
| 563 | + } |
| 564 | + var ( |
| 565 | + fName = ctx.Args().Get(0) |
| 566 | + stack, _ = makeConfigNode(ctx) |
| 567 | + interrupt = make(chan os.Signal, 1) |
| 568 | + stop = make(chan struct{}) |
| 569 | + ) |
| 570 | + defer stack.Close() |
| 571 | + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) |
| 572 | + defer signal.Stop(interrupt) |
| 573 | + defer close(interrupt) |
| 574 | + go func() { |
| 575 | + if _, ok := <-interrupt; ok { |
| 576 | + log.Info("Interrupted during ldb import, stopping at next batch") |
| 577 | + } |
| 578 | + close(stop) |
| 579 | + }() |
| 580 | + db := utils.MakeChainDatabase(ctx, stack, false) |
| 581 | + return utils.ImportLDBData(db, fName, int64(start), stop) |
| 582 | +} |
| 583 | + |
| 584 | +type preimageIterator struct { |
| 585 | + iter ethdb.Iterator |
| 586 | +} |
| 587 | + |
| 588 | +func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) { |
| 589 | + for iter.iter.Next() { |
| 590 | + key := iter.iter.Key() |
| 591 | + if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) { |
| 592 | + return utils.OpBatchAdd, key, iter.iter.Value(), true |
| 593 | + } |
| 594 | + } |
| 595 | + return 0, nil, nil, false |
| 596 | +} |
| 597 | + |
| 598 | +func (iter *preimageIterator) Release() { |
| 599 | + iter.iter.Release() |
| 600 | +} |
| 601 | + |
| 602 | +type snapshotIterator struct { |
| 603 | + init bool |
| 604 | + account ethdb.Iterator |
| 605 | + storage ethdb.Iterator |
| 606 | +} |
| 607 | + |
| 608 | +func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) { |
| 609 | + if !iter.init { |
| 610 | + iter.init = true |
| 611 | + return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true |
| 612 | + } |
| 613 | + for iter.account.Next() { |
| 614 | + key := iter.account.Key() |
| 615 | + if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) { |
| 616 | + return utils.OpBatchAdd, key, iter.account.Value(), true |
| 617 | + } |
| 618 | + } |
| 619 | + for iter.storage.Next() { |
| 620 | + key := iter.storage.Key() |
| 621 | + if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) { |
| 622 | + return utils.OpBatchAdd, key, iter.storage.Value(), true |
| 623 | + } |
| 624 | + } |
| 625 | + return 0, nil, nil, false |
| 626 | +} |
| 627 | + |
| 628 | +func (iter *snapshotIterator) Release() { |
| 629 | + iter.account.Release() |
| 630 | + iter.storage.Release() |
| 631 | +} |
| 632 | + |
| 633 | +// chainExporters defines the export scheme for all exportable chain data. |
| 634 | +var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{ |
| 635 | + "preimage": func(db ethdb.Database) utils.ChainDataIterator { |
| 636 | + iter := db.NewIterator(rawdb.PreimagePrefix, nil) |
| 637 | + return &preimageIterator{iter: iter} |
| 638 | + }, |
| 639 | + "snapshot": func(db ethdb.Database) utils.ChainDataIterator { |
| 640 | + account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) |
| 641 | + storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil) |
| 642 | + return &snapshotIterator{account: account, storage: storage} |
| 643 | + }, |
| 644 | +} |
| 645 | + |
| 646 | +func exportChaindata(ctx *cli.Context) error { |
| 647 | + if ctx.NArg() < 2 { |
| 648 | + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) |
| 649 | + } |
| 650 | + // Parse the required chain data type, make sure it's supported. |
| 651 | + kind := ctx.Args().Get(0) |
| 652 | + kind = strings.ToLower(strings.Trim(kind, " ")) |
| 653 | + exporter, ok := chainExporters[kind] |
| 654 | + if !ok { |
| 655 | + var kinds []string |
| 656 | + for kind := range chainExporters { |
| 657 | + kinds = append(kinds, kind) |
| 658 | + } |
| 659 | + return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", ")) |
| 660 | + } |
| 661 | + var ( |
| 662 | + stack, _ = makeConfigNode(ctx) |
| 663 | + interrupt = make(chan os.Signal, 1) |
| 664 | + stop = make(chan struct{}) |
| 665 | + ) |
| 666 | + defer stack.Close() |
| 667 | + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) |
| 668 | + defer signal.Stop(interrupt) |
| 669 | + defer close(interrupt) |
| 670 | + go func() { |
| 671 | + if _, ok := <-interrupt; ok { |
| 672 | + log.Info("Interrupted during db export, stopping at next batch") |
| 673 | + } |
| 674 | + close(stop) |
| 675 | + }() |
| 676 | + db := utils.MakeChainDatabase(ctx, stack, true) |
| 677 | + return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop) |
| 678 | +} |
0 commit comments