@@ -101,79 +101,88 @@ func (c *Cluster) v2Prepare(_ context.Context, memberlistJoin []string) error {
101101 metastoreLeader := c .metastoreExpectedLeader ()
102102
103103 for _ , comp := range c .Components {
104- dataDir := c .dataDir (comp )
104+ if err := c .v2PrepareComponent (comp , metastoreLeader ); err != nil {
105+ return err
106+ }
107+
108+ // handle memberlist join
109+ for _ , m := range memberlistJoin {
110+ comp .flags = append (comp .flags , fmt .Sprintf ("-memberlist.join=%s" , m ))
111+ }
112+ }
105113
106- comp .cfg .V2 = true
107- comp .flags = c .commonFlags (comp )
114+ return nil
115+ }
116+
117+ func (c * Cluster ) v2PrepareComponent (comp * Component , metastoreLeader * Component ) error {
118+ dataDir := c .dataDir (comp )
119+
120+ comp .cfg .V2 = true
121+ comp .flags = c .commonFlags (comp )
108122
123+ comp .flags = append (comp .flags ,
124+ "-enable-query-backend=true" ,
125+ "-write-path=segment-writer" ,
126+ "-metastore.min-ready-duration=0" ,
127+ fmt .Sprintf ("-metastore.address=%s:%d/%s" , listenAddr , metastoreLeader .grpcPort , metastoreLeader .nodeName ()),
128+ )
129+
130+ if c .debuginfodURL != "" && comp .Target == "query-frontend" {
109131 comp .flags = append (comp .flags ,
110- "-enable-query-backend=true" ,
111- "-write-path=segment-writer" ,
112- "-metastore.min-ready-duration=0" ,
113- fmt .Sprintf ("-metastore.address=%s:%d/%s" , listenAddr , metastoreLeader .grpcPort , metastoreLeader .nodeName ()),
132+ fmt .Sprintf ("-symbolizer.debuginfod-url=%s" , c .debuginfodURL ),
133+ "-symbolizer.enabled=true" ,
114134 )
135+ }
115136
116- if c .debuginfodURL != "" && comp .Target == "query-frontend" {
117- comp .flags = append (comp .flags ,
118- fmt .Sprintf ("-symbolizer.debuginfod-url=%s" , c .debuginfodURL ),
119- "-symbolizer.enabled=true" ,
120- )
121- }
137+ if comp .Target == "segment-writer" {
138+ comp .flags = append (comp .flags ,
139+ "-segment-writer.num-tokens=1" ,
140+ "-segment-writer.min-ready-duration=0" ,
141+ "-segment-writer.lifecycler.addr=" + listenAddr ,
142+ "-segment-writer.lifecycler.ID=" + comp .nodeName (),
143+ "-segment-writer.heartbeat-period=1s" ,
144+ )
145+ }
122146
123- if comp .Target == "segment-writer" {
124- comp .flags = append (comp .flags ,
125- "-segment-writer.num-tokens=1" ,
126- "-segment-writer.min-ready-duration=0" ,
127- "-segment-writer.lifecycler.addr=" + listenAddr ,
128- "-segment-writer.lifecycler.ID=" + comp .nodeName (),
129- "-segment-writer.heartbeat-period=1s" ,
130- )
131- }
147+ if comp .Target == "compaction-worker" {
148+ comp .flags = append (comp .flags ,
149+ "-compaction-worker.job-concurrency=20" ,
150+ "-compaction-worker.job-poll-interval=1s" ,
151+ )
152+ }
132153
133- if comp .Target == "compaction-worker" {
154+ // register query-backends in the frontend and themselves
155+ if comp .Target == "query-frontend" || comp .Target == "query-backend" {
156+ for _ , compidx := range c .perTarget ["query-backend" ] {
134157 comp .flags = append (comp .flags ,
135- "-compaction-worker.job-concurrency=20" ,
136- "-compaction-worker.job-poll-interval=1s" ,
158+ fmt .Sprintf ("-query-backend.address=%s:%d" , listenAddr , c .Components [compidx ].grpcPort ),
137159 )
138160 }
161+ }
139162
140- // register query-backends in the frontend and themselves
141- if comp .Target == "query-frontend" || comp .Target == "query-backend" {
142- for _ , compidx := range c .perTarget ["query-backend" ] {
143- comp .flags = append (comp .flags ,
144- fmt .Sprintf ("-query-backend.address=%s:%d" , listenAddr , c .Components [compidx ].grpcPort ),
145- )
146- }
163+ // handle metastore folders and ports
164+ if comp .Target == "metastore" {
165+ cfgPath , err := c .metastoreConfig ()
166+ if err != nil {
167+ return err
147168 }
169+ comp .flags = append (comp .flags ,
170+ fmt .Sprint ("-config.file=" , cfgPath ),
171+ fmt .Sprintf ("-metastore.data-dir=%s" , dataDir + "../metastore-ephemeral" ),
172+ fmt .Sprintf ("-metastore.raft.dir=%s" , dataDir + "../metastore-raft" ),
173+ fmt .Sprintf ("-metastore.raft.snapshots-dir=%s" , dataDir + "../metastore-snapshots" ),
174+ fmt .Sprintf ("-metastore.raft.bind-address=%s:%d" , listenAddr , comp .raftPort ),
175+ fmt .Sprintf ("-metastore.raft.advertise-address=%s:%d" , listenAddr , comp .raftPort ),
176+ fmt .Sprintf ("-metastore.raft.server-id=%s" , comp .nodeName ()),
177+ fmt .Sprintf ("-metastore.raft.bootstrap-expect-peers=%d" , len (c .perTarget [comp .Target ])),
178+ )
148179
149- // handle metastore folders and ports
150- if comp .Target == "metastore" {
151- cfgPath , err := c .metastoreConfig ()
152- if err != nil {
153- return err
154- }
180+ // add bootstrap peers
181+ for _ , compidx := range c .perTarget [comp .Target ] {
182+ peer := c .Components [compidx ]
155183 comp .flags = append (comp .flags ,
156- fmt .Sprint ("-config.file=" , cfgPath ),
157- fmt .Sprintf ("-metastore.data-dir=%s" , dataDir + "../metastore-ephemeral" ),
158- fmt .Sprintf ("-metastore.raft.dir=%s" , dataDir + "../metastore-raft" ),
159- fmt .Sprintf ("-metastore.raft.snapshots-dir=%s" , dataDir + "../metastore-snapshots" ),
160- fmt .Sprintf ("-metastore.raft.bind-address=%s:%d" , listenAddr , comp .raftPort ),
161- fmt .Sprintf ("-metastore.raft.advertise-address=%s:%d" , listenAddr , comp .raftPort ),
162- fmt .Sprintf ("-metastore.raft.server-id=%s" , comp .nodeName ()),
163- fmt .Sprintf ("-metastore.raft.bootstrap-expect-peers=%d" , len (c .perTarget [comp .Target ])),
184+ fmt .Sprintf ("-metastore.raft.bootstrap-peers=%s:%d/%s" , listenAddr , peer .raftPort , peer .nodeName ()),
164185 )
165-
166- // add bootstrap peers
167- for _ , compidx := range c .perTarget [comp .Target ] {
168- peer := c .Components [compidx ]
169- comp .flags = append (comp .flags ,
170- fmt .Sprintf ("-metastore.raft.bootstrap-peers=%s:%d/%s" , listenAddr , peer .raftPort , peer .nodeName ()),
171- )
172- }
173- }
174- // handle memberlist join
175- for _ , m := range memberlistJoin {
176- comp .flags = append (comp .flags , fmt .Sprintf ("-memberlist.join=%s" , m ))
177186 }
178187 }
179188
@@ -236,3 +245,46 @@ func (comp *Component) metastoreReadyCheck(ctx context.Context, metastores []*Co
236245 })
237246 return err
238247}
248+
249+ func (c * Cluster ) GetMetastoreRaftNodeClient () (raftnodepb.RaftNodeServiceClient , error ) {
250+ leader := c .metastoreExpectedLeader ()
251+ opts := []grpc.DialOption {
252+ grpc .WithTransportCredentials (insecure .NewCredentials ()),
253+ }
254+ cc , err := grpc .NewClient (fmt .Sprintf ("127.0.0.1:%d" , leader .grpcPort ), opts ... )
255+ if err != nil {
256+ return nil , err
257+ }
258+
259+ return raftnodepb .NewRaftNodeServiceClient (cc ), nil
260+ }
261+
262+ func (c * Cluster ) AddMetastoreWithAutoJoin (ctx context.Context ) error {
263+ leader := c .metastoreExpectedLeader ()
264+
265+ comp := newComponent ("metastore" )
266+ comp .replica = len (c .perTarget ["metastore" ])
267+ c .Components = append (c .Components , comp )
268+ c .perTarget ["metastore" ] = append (c .perTarget ["metastore" ], len (c .Components )- 1 )
269+
270+ if err := c .v2PrepareComponent (comp , leader ); err != nil {
271+ return err
272+ }
273+ comp .flags = append (comp .flags , "-metastore.raft.auto-join=true" )
274+
275+ p , err := comp .start (ctx )
276+ if err != nil {
277+ return fmt .Errorf ("failed to start component: %w" , err )
278+ }
279+ comp .p = p
280+
281+ c .wg .Add (1 )
282+ go func () {
283+ defer c .wg .Done ()
284+ if err := p .Run (); err != nil {
285+ fmt .Printf ("metastore with auto-join stopped with error: %v\n " , err )
286+ }
287+ }()
288+
289+ return nil
290+ }
0 commit comments