@@ -623,42 +623,51 @@ void PGRecovery::backfill_cancelled()
623623 using BackfillState = crimson::osd::BackfillState;
624624 backfill_state->process_event (
625625 BackfillState::CancelBackfill{}.intrusive_from_this ());
626- backfill_state.reset ();
627626}
628627
629628void PGRecovery::dispatch_backfill_event (
630629 boost::intrusive_ptr<const boost::statechart::event_base> evt)
631630{
632631 logger ().debug (" {}" , __func__);
633- if (backfill_state) {
634- backfill_state->process_event (evt);
635- } else {
636- // TODO: Do we need to worry about cases in which the pg has
637- // been through both backfill cancellations and backfill
638- // restarts between the sendings and replies of
639- // ReplicaScan/ObjectPush requests? Seems classic OSDs
640- // doesn't handle these cases.
641- logger ().debug (" {}, backfill cancelled, dropping evt" );
642- }
632+ assert (backfill_state);
633+ backfill_state->process_event (evt);
634+ // TODO: Do we need to worry about cases in which the pg has
635+ // been through both backfill cancellations and backfill
636+ // restarts between the sendings and replies of
637+ // ReplicaScan/ObjectPush requests? Seems classic OSDs
638+ // doesn't handle these cases.
639+ }
640+
641+ void PGRecovery::on_activate_complete ()
642+ {
643+ logger ().debug (" {} backfill_state={}" ,
644+ __func__, fmt::ptr (backfill_state.get ()));
645+ backfill_state.reset ();
643646}
644647
645648void PGRecovery::on_backfill_reserved ()
646649{
647650 logger ().debug (" {}" , __func__);
648- // PIMP and depedency injection for the sake unittestability.
649- // I'm not afraid about the performance here.
650- using BackfillState = crimson::osd::BackfillState;
651- backfill_state = std::make_unique<BackfillState>(
652- *this ,
653- std::make_unique<crimson::osd::PeeringFacade>(pg->get_peering_state ()),
654- std::make_unique<crimson::osd::PGFacade>(
655- *static_cast <crimson::osd::PG*>(pg)));
656651 // yes, it's **not** backfilling yet. The PG_STATE_BACKFILLING
657652 // will be set after on_backfill_reserved() returns.
658653 // Backfill needs to take this into consideration when scheduling
659654 // events -- they must be mutually exclusive with PeeringEvent
660655 // instances. Otherwise the execution might begin without having
661656 // the state updated.
662657 ceph_assert (!pg->get_peering_state ().is_backfilling ());
658+ // let's be lazy with creating the backfill stuff
659+ using BackfillState = crimson::osd::BackfillState;
660+ if (!backfill_state) {
661+ // PIMP and depedency injection for the sake of unittestability.
662+ // I'm not afraid about the performance here.
663+ backfill_state = std::make_unique<BackfillState>(
664+ *this ,
665+ std::make_unique<crimson::osd::PeeringFacade>(pg->get_peering_state ()),
666+ std::make_unique<crimson::osd::PGFacade>(
667+ *static_cast <crimson::osd::PG*>(pg)));
668+ }
669+ // it may be we either start a completely new backfill (first
670+ // event since last on_activate_complete()) or to resume already
671+ // (but stopped one).
663672 start_backfill_recovery (BackfillState::Triggered{});
664673}
0 commit comments