@@ -477,17 +477,6 @@ void PGRecovery::_committed_pushed_object(epoch_t epoch,
477477 }
478478}
479479
480- template <class EventT >
481- void PGRecovery::start_backfill_recovery (const EventT& evt)
482- {
483- using BackfillRecovery = crimson::osd::BackfillRecovery;
484- std::ignore = pg->get_shard_services ().start_operation <BackfillRecovery>(
485- static_cast <crimson::osd::PG*>(pg),
486- pg->get_shard_services (),
487- pg->get_osdmap_epoch (),
488- evt);
489- }
490-
491480void PGRecovery::request_replica_scan (
492481 const pg_shard_t & target,
493482 const hobject_t & begin,
@@ -520,7 +509,8 @@ void PGRecovery::request_primary_scan(
520509 ).then_interruptible ([this ] (BackfillInterval bi) {
521510 logger ().debug (" request_primary_scan:{}" , __func__);
522511 using BackfillState = crimson::osd::BackfillState;
523- start_backfill_recovery (BackfillState::PrimaryScanned{ std::move (bi) });
512+ backfill_state->process_event (
513+ BackfillState::PrimaryScanned{ std::move (bi) }.intrusive_from_this ());
524514 });
525515}
526516
@@ -542,7 +532,13 @@ void PGRecovery::enqueue_push(
542532 }).then_interruptible ([this , obj] {
543533 logger ().debug (" enqueue_push:{}" , __func__);
544534 using BackfillState = crimson::osd::BackfillState;
545- start_backfill_recovery (BackfillState::ObjectPushed (std::move (obj)));
535+ if (backfill_state->is_triggered ()) {
536+ backfill_state->post_event (
537+ BackfillState::ObjectPushed (std::move (obj)).intrusive_from_this ());
538+ } else {
539+ backfill_state->process_event (
540+ BackfillState::ObjectPushed (std::move (obj)).intrusive_from_this ());
541+ }
546542 });
547543}
548544
@@ -643,8 +639,6 @@ void PGRecovery::backfilled()
643639
644640void PGRecovery::backfill_suspended ()
645641{
646- // We are not creating a new BackfillRecovery request here, as we
647- // need to cancel the backfill synchronously (before this method returns).
648642 using BackfillState = crimson::osd::BackfillState;
649643 backfill_state->process_event (
650644 BackfillState::SuspendBackfill{}.intrusive_from_this ());
@@ -673,13 +667,7 @@ void PGRecovery::on_activate_complete()
673667void PGRecovery::on_backfill_reserved ()
674668{
675669 logger ().debug (" {}" , __func__);
676- // yes, it's **not** backfilling yet. The PG_STATE_BACKFILLING
677- // will be set after on_backfill_reserved() returns.
678- // Backfill needs to take this into consideration when scheduling
679- // events -- they must be mutually exclusive with PeeringEvent
680- // instances. Otherwise the execution might begin without having
681- // the state updated.
682- ceph_assert (!pg->get_peering_state ().is_backfilling ());
670+ ceph_assert (pg->get_peering_state ().is_backfilling ());
683671 // let's be lazy with creating the backfill stuff
684672 using BackfillState = crimson::osd::BackfillState;
685673 if (!backfill_state) {
@@ -694,5 +682,6 @@ void PGRecovery::on_backfill_reserved()
694682 // it may be we either start a completely new backfill (first
695683 // event since last on_activate_complete()) or to resume already
696684 // (but stopped one).
697- start_backfill_recovery (BackfillState::Triggered{});
685+ backfill_state->process_event (
686+ BackfillState::Triggered{}.intrusive_from_this ());
698687}
0 commit comments