@@ -93,8 +93,10 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID,
9393 }
9494 }
9595
96- if cmd .Type == defs .LogicalBackup {
97- // wakeup the slicer to not wait for the tick
96+ if cmd .Type == defs .LogicalBackup && cmd .Profile == "" {
97+ // For backups to the main storage,
98+ // wake up the slicer to not wait for the tick.
99+ // This will slice and pause the main PITR
98100 go a .sliceNow (opid )
99101 }
100102
@@ -156,20 +158,25 @@ func (a *Agent) Backup(ctx context.Context, cmd *ctrl.BackupCmd, opid ctrl.OPID,
156158 // not replset. So an `incremental && not_base` backup should land on
157159 // the agent that made a previous (src) backup.
158160 const srcHostMultiplier = 3.0
159- var c map [string ]float64
161+ c := make ( map [string ]float64 )
160162 if cmd .Type == defs .IncrementalBackup && ! cmd .IncrBase {
161163 src , err := backup .LastIncrementalBackup (ctx , a .leadConn )
162164 if err != nil {
163165 // try backup anyway
164166 l .Warning ("define source backup: %v" , err )
165167 } else {
166- c = make (map [string ]float64 )
167168 for _ , rs := range src .Replsets {
168169 c [rs .Node ] = srcHostMultiplier
169170 }
170171 }
171172 }
172173
174+ // When a logical backup targets an external profile (different storage),
175+ // PITR keeps running. Deprioritize nodes currently running PITR slicer
176+ if cmd .Type == defs .LogicalBackup && cmd .Profile != "" {
177+ c = a .deprioritizePITRNodes (ctx , c , l )
178+ }
179+
173180 agents , err := topo .ListSteadyAgents (ctx , a .leadConn )
174181 if err != nil {
175182 l .Error ("get agents list: %v" , err )
@@ -267,6 +274,41 @@ func (a *Agent) getValidCandidates(agents []topo.AgentStat, backupType defs.Back
267274 return validCandidates
268275}
269276
277+ // deprioritizePITRNodes adds low-priority coefficients for nodes currently running PITR slicing.
278+ // It only modifies the coefficient map for nodes not already present (e.g., incremental src host).
279+ // Returns the (possibly modified) coefficient map.
280+ func (a * Agent ) deprioritizePITRNodes (
281+ ctx context.Context ,
282+ coefficients map [string ]float64 ,
283+ l log.LogEvent ,
284+ ) map [string ]float64 {
285+ pitrLocks , err := lock .GetOpLocks (ctx , a .leadConn , & lock.LockHeader {Type : ctrl .CmdPITR })
286+ if err != nil {
287+ l .Warning ("get pitr locks for deprioritization: %v" , err )
288+ return coefficients
289+ }
290+
291+ ts , err := topo .GetClusterTime (ctx , a .leadConn )
292+ if err != nil {
293+ l .Warning ("get cluster time for pitr deprioritization: %v" , err )
294+ return coefficients
295+ }
296+
297+ for i := range pitrLocks {
298+ pl := & pitrLocks [i ]
299+ if pl .Heartbeat .T + defs .StaleFrameSec < ts .T {
300+ continue // stale lock, ignore
301+ }
302+
303+ // Only set if not already present (preserve previous priorities)
304+ if _ , exists := coefficients [pl .Node ]; ! exists {
305+ coefficients [pl .Node ] = prio .DefaultScore - 0.1
306+ }
307+ }
308+
309+ return coefficients
310+ }
311+
270312const renominationFrame = 5 * time .Second
271313
272314func (a * Agent ) nominateRS (ctx context.Context , bcp , rs string , nodes [][]string ) error {
0 commit comments