@@ -24,16 +24,11 @@ package cluster
2424
2525import (
2626 "fmt"
27- "io"
28- "os"
29- "regexp"
30- "strconv"
31- "strings"
3227
33- "github.com/opencurve/curveadm/pkg/log/zaplog"
3428 "github.com/opencurve/curveadm/cli/cli"
3529 "github.com/opencurve/curveadm/internal/storage"
3630 "github.com/opencurve/curveadm/internal/utils"
31+ "github.com/opencurve/curveadm/pkg/log/zaplog"
3732 "github.com/spf13/cobra"
3833)
3934
@@ -43,7 +38,7 @@ const (
4338
4439var (
4540 importExample = `Examples:
46- $ curveadm cluster import my-cluster # Import a cluster named 'my-cluster'
41+ $ curveadm cluster import my-cluster # Import cluster 'my-cluster' with curveadm.db
4742 $ curveadm cluster import my-cluster -f /path/to/dbfile # Import cluster 'my-cluster' with specified database file`
4843)
4944
@@ -73,100 +68,40 @@ func NewImportCommand(curveadm *cli.CurveAdm) *cobra.Command {
7368 return cmd
7469}
7570
76- func readItem (file * os.File ) (int , string , error ) {
77- // key: --- $ID $LENGTH
78- buffer := []byte {}
79- bytes := make ([]byte , 1 )
80- for {
81- _ , err := file .Read (bytes )
82- if err == io .EOF {
83- if len (buffer ) == 0 {
84- return 0 , "" , io .EOF
85- } else {
86- return 0 , "" , fmt .Errorf ("invalid curveadm database, line: %s" , string (buffer ))
87- }
88- } else if err != nil {
89- return 0 , "" , err
90- } else if bytes [0 ] == '\n' {
91- break
92- }
93- buffer = append (buffer , bytes [0 ])
94- }
95-
96- key := string (buffer )
97- pattern := regexp .MustCompile ("^--- ([0-9]+) ([0-9]+)$" )
98- mu := pattern .FindStringSubmatch (key )
99- if len (mu ) == 0 {
100- return 0 , "" , fmt .Errorf ("invalid curveadm database, line: %s" , key )
101- }
102-
103- id , _ := strconv .Atoi (mu [1 ])
104- nbytes , _ := strconv .Atoi (mu [2 ])
105- if nbytes > MAX_VALUE_BYETS {
106- return 0 , "" , fmt .Errorf ("too big value int curveadm database" )
107- }
108-
109- // value
110- bytes = make ([]byte , nbytes )
111- nread , err := file .Read (bytes )
112- if err == io .EOF || nread != nbytes {
113- return 0 , "" , fmt .Errorf ("value broken in database" )
114- } else if err != nil {
115- return 0 , "" , err
71+ func readDB (filepath , name string ) (* storage.Cluster , []storage.Service , error ) {
72+ dbUrl := fmt .Sprintf ("sqlite://%s" , filepath )
73+ s , err := storage .NewStorage (dbUrl )
74+ if err != nil {
75+ return nil , nil , err
11676 }
11777
118- return id , string (bytes [:nbytes - 1 ]), nil
119- }
120-
121- func readDatabase (filename string ) (storage.Cluster , []storage.Service , error ) {
122- cluster := storage.Cluster {}
123- services := []storage.Service {}
124- file , err := os .Open (filename )
78+ clusters , err := s .GetClusters (name )
12579 if err != nil {
126- return cluster , services , err
80+ return nil , nil , err
81+ } else if len (clusters ) == 0 {
82+ return nil , nil , fmt .Errorf ("cluster '%s' not found" , name )
83+ } else if len (clusters ) > 1 {
84+ return nil , nil , fmt .Errorf ("cluster '%s' is duplicate" , name )
12785 }
128- defer file .Close ()
129-
130- for {
131- id , value , err := readItem (file )
132- if err == io .EOF {
133- break
134- } else if err != nil {
135- return cluster , services , err
136- }
13786
138- switch id {
139- case CLUSTER_DESCRIPTION :
140- cluster .Description = value
141- break
142- case CLUSTER_TOPOLOGY :
143- cluster .Topology = value
144- break
145- case SERVICE :
146- items := strings .Split (value , " " )
147- if len (items ) != 2 {
148- return cluster , services , fmt .Errorf ("invalid service, line: %s" , value )
149- }
150- service := storage.Service {
151- Id : items [0 ],
152- ContainerId : items [1 ],
153- }
154- services = append (services , service )
155- }
87+ cluster := clusters [0 ]
88+ services , err := s .GetServices (cluster .Id )
89+ if err != nil {
90+ return nil , nil , err
15691 }
157-
158- return cluster , services , nil
92+ return & cluster , services , nil
15993}
16094
161- func importCluster (storage * storage.Storage , name , dbfile string ) error {
95+ func importCluster (storage * storage.Storage , dbfile , name string ) error {
16296 // read database file
163- cluster , services , err := readDatabase (dbfile )
97+ cluster , services , err := readDB (dbfile , name )
16498 if err != nil {
16599 return err
166100 }
167101
168102 // insert cluster
169- if storage .InsertCluster (name , cluster .Description , cluster .Topology ); err != nil {
103+ err = storage .InsertCluster (name , cluster .UUId , cluster .Description , cluster .Topology )
104+ if err != nil {
170105 return err
171106 }
172107
@@ -177,7 +112,8 @@ func importCluster(storage *storage.Storage, name, dbfile string) error {
177112 }
178113 clusterId := clusters [0 ].Id
179114 for _ , service := range services {
180- if err := storage .InsertService (clusterId , service .Id , service .ContainerId ); err != nil {
115+ err := storage .InsertService (clusterId , service .Id , service .ContainerId )
116+ if err != nil {
181117 return err
182118 }
183119 }
@@ -191,10 +127,9 @@ func runImport(curveadm *cli.CurveAdm, options importOptions) error {
191127 if err != nil {
192128 zaplog .Error ("GetClusters" , zaplog .Field ("error" , err ))
193129 return err
194- } else if len (clusters ) != 0 {
130+ } else if len (clusters ) != 0 { // TODO: let user enter a new cluster name
195131 return fmt .Errorf ("cluster %s already exist" , name )
196- } else if err := importCluster (storage , name , options .dbfile ); err != nil {
197- storage .DeleteCluster (name )
132+ } else if err := importCluster (storage , options .dbfile , name ); err != nil {
198133 return err
199134 }
200135
0 commit comments