@@ -16,9 +16,12 @@ import (
16
16
"golang.org/x/xerrors"
17
17
18
18
"github.com/filecoin-project/go-address"
19
+ "github.com/filecoin-project/go-bitfield"
19
20
"github.com/filecoin-project/go-commp-utils/zerocomm"
20
21
commcid "github.com/filecoin-project/go-fil-commcid"
21
22
"github.com/filecoin-project/go-state-types/abi"
23
+ "github.com/filecoin-project/go-state-types/builtin"
24
+ miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
22
25
"github.com/filecoin-project/go-state-types/crypto"
23
26
24
27
"github.com/filecoin-project/curio/harmony/harmonydb"
@@ -44,6 +47,7 @@ var log = logging.Logger("batchseal")
44
47
type SupraSealNodeAPI interface {
45
48
ChainHead (context.Context ) (* types.TipSet , error )
46
49
StateGetRandomnessFromTickets (context.Context , crypto.DomainSeparationTag , abi.ChainEpoch , []byte , types.TipSetKey ) (abi.Randomness , error )
50
+ StateMinerAllocated (context.Context , address.Address , types.TipSetKey ) (* bitfield.BitField , error )
47
51
}
48
52
49
53
type SupraSeal struct {
@@ -530,6 +534,12 @@ func (s *SupraSeal) TypeDetails() harmonytask.TaskTypeDetails {
530
534
func (s * SupraSeal ) Adder (taskFunc harmonytask.AddTaskFunc ) {
531
535
}
532
536
537
+ type sectorClaim struct {
538
+ SpID int64 `db:"sp_id"`
539
+ SectorNumber int64 `db:"sector_number"`
540
+ TaskIDSDR * int64 `db:"task_id_sdr"`
541
+ }
542
+
533
543
func (s * SupraSeal ) schedule (taskFunc harmonytask.AddTaskFunc ) error {
534
544
if s .slots .Available () == 0 {
535
545
return nil
@@ -542,12 +552,8 @@ func (s *SupraSeal) schedule(taskFunc harmonytask.AddTaskFunc) error {
542
552
543
553
taskFunc (func (id harmonytask.TaskID , tx * harmonydb.Tx ) (shouldCommit bool , seriousError error ) {
544
554
// claim [sectors] pipeline entries
545
- var sectors []struct {
546
- SpID int64 `db:"sp_id"`
547
- SectorNumber int64 `db:"sector_number"`
548
- TaskIDSDR * int64 `db:"task_id_sdr"`
549
- }
550
555
556
+ var sectors []sectorClaim
551
557
err := tx .Select (& sectors , `SELECT sp_id, sector_number, task_id_sdr FROM sectors_sdr_pipeline
552
558
LEFT JOIN harmony_task ht on sectors_sdr_pipeline.task_id_sdr = ht.id
553
559
WHERE after_sdr = FALSE AND (task_id_sdr IS NULL OR (ht.owner_id IS NULL AND ht.name = 'SDR')) LIMIT $1` , s .sectors )
@@ -558,9 +564,18 @@ func (s *SupraSeal) schedule(taskFunc harmonytask.AddTaskFunc) error {
558
564
log .Infow ("got sectors, maybe schedule" , "sectors" , len (sectors ), "s.sectors" , s .sectors )
559
565
560
566
if len (sectors ) != s .sectors {
561
- // not enough sectors to fill a batch
562
- log .Infow ("not enough sectors to fill a batch" , "sectors" , len (sectors ))
563
- return false , nil
567
+ // not enough sectors to fill a batch, use CC scheduler
568
+ log .Infow ("not enough sectors to fill a batch, using CC scheduler" , "sectors" , len (sectors ))
569
+ addSectors , err := s .claimsFromCCScheduler (tx , int64 (s .sectors - len (sectors )))
570
+ if err != nil {
571
+ return false , xerrors .Errorf ("getting CC scheduler claims: %w" , err )
572
+ }
573
+ sectors = append (sectors , addSectors ... )
574
+ log .Infow ("got CC scheduler claims" , "sectors" , len (sectors ))
575
+ }
576
+
577
+ if len (sectors ) != s .sectors {
578
+ return false , xerrors .Errorf ("not enough sectors to fill a batch %d != %d" , len (sectors ), s .sectors )
564
579
}
565
580
566
581
// assign to pipeline entries, set task_id_sdr, task_id_tree_r, task_id_tree_c
@@ -585,6 +600,117 @@ func (s *SupraSeal) schedule(taskFunc harmonytask.AddTaskFunc) error {
585
600
return nil
586
601
}
587
602
603
+ func (s * SupraSeal ) claimsFromCCScheduler (tx * harmonydb.Tx , toSeal int64 ) ([]sectorClaim , error ) {
604
+ var enabledSchedules []struct {
605
+ SpID int64 `db:"sp_id"`
606
+ ToSeal int64 `db:"to_seal"`
607
+ Weight int64 `db:"weight"`
608
+ DurationDays int64 `db:"duration_days"`
609
+ }
610
+
611
+ err := tx .Select (& enabledSchedules , `SELECT sp_id, to_seal, weight, duration_days FROM sectors_cc_scheduler WHERE enabled = TRUE AND weight > 0 ORDER BY weight DESC` )
612
+ if err != nil {
613
+ return nil , xerrors .Errorf ("getting enabled schedules: %w" , err )
614
+ }
615
+
616
+ if len (enabledSchedules ) == 0 {
617
+ return nil , nil
618
+ }
619
+
620
+ var totalWeight , totalToSeal int64
621
+ for _ , schedule := range enabledSchedules {
622
+ totalWeight += schedule .Weight
623
+ totalToSeal += schedule .ToSeal
624
+ }
625
+
626
+ if totalToSeal < toSeal {
627
+ log .Debugw ("not enough sectors to fill a batch from CC scheduler" , "totalToSeal" , totalToSeal , "toSeal" , toSeal )
628
+ return nil , nil
629
+ }
630
+
631
+ // Calculate proportional allocation based on weights
632
+ var outClaims []sectorClaim
633
+ remainingToSeal := toSeal
634
+
635
+ for i , schedule := range enabledSchedules {
636
+ if remainingToSeal <= 0 {
637
+ break
638
+ }
639
+
640
+ // Calculate how many sectors this SP should get based on weight
641
+ var sectorsForSP int64
642
+ if i == len (enabledSchedules )- 1 {
643
+ // Last SP gets the remaining sectors
644
+ sectorsForSP = remainingToSeal
645
+ } else {
646
+ // Proportional allocation based on weight
647
+ sectorsForSP = (toSeal * schedule .Weight ) / totalWeight
648
+ if sectorsForSP > schedule .ToSeal {
649
+ sectorsForSP = schedule .ToSeal
650
+ }
651
+ if sectorsForSP > remainingToSeal {
652
+ sectorsForSP = remainingToSeal
653
+ }
654
+ }
655
+
656
+ if sectorsForSP == 0 {
657
+ continue
658
+ }
659
+
660
+ // Allocate sector numbers for this SP
661
+ maddr , err := address .NewIDAddress (uint64 (schedule .SpID ))
662
+ if err != nil {
663
+ return nil , xerrors .Errorf ("getting miner address for %d: %w" , schedule .SpID , err )
664
+ }
665
+
666
+ sectorNumbers , err := seal .AllocateSectorNumbers (context .Background (), s .api , tx , maddr , int (sectorsForSP ))
667
+ if err != nil {
668
+ return nil , xerrors .Errorf ("allocating sector numbers for %d: %w" , schedule .SpID , err )
669
+ }
670
+
671
+ // Create sector claims for allocated sectors
672
+ for _ , sectorNum := range sectorNumbers {
673
+ outClaims = append (outClaims , sectorClaim {
674
+ SpID : schedule .SpID ,
675
+ SectorNumber : int64 (sectorNum ),
676
+ TaskIDSDR : nil , // New sector, no existing task
677
+ })
678
+
679
+ userDuration := int64 (schedule .DurationDays ) * builtin .EpochsInDay
680
+
681
+ if miner12 .MaxSectorExpirationExtension < userDuration {
682
+ return nil , xerrors .Errorf ("duration exceeds max allowed: %d > %d" , userDuration , miner12 .MaxSectorExpirationExtension )
683
+ }
684
+ if miner12 .MinSectorExpiration > userDuration {
685
+ return nil , xerrors .Errorf ("duration is too short: %d < %d" , userDuration , miner12 .MinSectorExpiration )
686
+ }
687
+
688
+ // Insert new sector into sectors_sdr_pipeline
689
+ _ , err := tx .Exec (`INSERT INTO sectors_sdr_pipeline (sp_id, sector_number, reg_seal_proof, user_sector_duration_epochs)
690
+ VALUES ($1, $2, $3, $4)` ,
691
+ schedule .SpID , sectorNum , s .spt , userDuration )
692
+ if err != nil {
693
+ return nil , xerrors .Errorf ("inserting new sector %d for SP %d: %w" , sectorNum , schedule .SpID , err )
694
+ }
695
+ }
696
+
697
+ // Update the to_seal count for this SP
698
+ _ , err = tx .Exec (`UPDATE sectors_cc_scheduler SET to_seal = to_seal - $1 WHERE sp_id = $2` , sectorsForSP , schedule .SpID )
699
+ if err != nil {
700
+ return nil , xerrors .Errorf ("updating to_seal for SP %d: %w" , schedule .SpID , err )
701
+ }
702
+
703
+ remainingToSeal -= sectorsForSP
704
+ log .Debugw ("allocated sectors from CC scheduler" , "sp_id" , schedule .SpID , "count" , sectorsForSP , "remaining" , remainingToSeal , "totalWeight" , totalWeight , "totalToSeal" , totalToSeal )
705
+ }
706
+
707
+ if len (outClaims ) != int (toSeal ) {
708
+ return nil , xerrors .Errorf ("failed to allocate expected number of sectors: got %d, wanted %d" , len (outClaims ), toSeal )
709
+ }
710
+
711
+ return outClaims , nil
712
+ }
713
+
588
714
func (s * SupraSeal ) taskToSectors (id harmonytask.TaskID ) ([]ffi.SectorRef , error ) {
589
715
var sectors []ffi.SectorRef
590
716
0 commit comments