@@ -779,15 +779,13 @@ enum scx_tg_flags {
779779};
780780
781781enum scx_ops_enable_state {
782- SCX_OPS_PREPPING ,
783782 SCX_OPS_ENABLING ,
784783 SCX_OPS_ENABLED ,
785784 SCX_OPS_DISABLING ,
786785 SCX_OPS_DISABLED ,
787786};
788787
789788static const char * scx_ops_enable_state_str [] = {
790- [SCX_OPS_PREPPING ] = "prepping" ,
791789 [SCX_OPS_ENABLING ] = "enabling" ,
792790 [SCX_OPS_ENABLED ] = "enabled" ,
793791 [SCX_OPS_DISABLING ] = "disabling" ,
@@ -5016,12 +5014,12 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
50165014 }
50175015
50185016 /*
5019- * Set scx_ops, transition to PREPPING and clear exit info to arm the
5017+ * Set scx_ops, transition to ENABLING and clear exit info to arm the
50205018 * disable path. Failure triggers full disabling from here on.
50215019 */
50225020 scx_ops = * ops ;
50235021
5024- WARN_ON_ONCE (scx_ops_set_enable_state (SCX_OPS_PREPPING ) !=
5022+ WARN_ON_ONCE (scx_ops_set_enable_state (SCX_OPS_ENABLING ) !=
50255023 SCX_OPS_DISABLED );
50265024
50275025 atomic_set (& scx_exit_kind , SCX_EXIT_NONE );
@@ -5174,23 +5172,6 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
51745172 */
51755173 preempt_disable ();
51765174
5177- /*
5178- * From here on, the disable path must assume that tasks have ops
5179- * enabled and need to be recovered.
5180- *
5181- * Transition to ENABLING fails iff the BPF scheduler has already
5182- * triggered scx_bpf_error(). Returning an error code here would lose
5183- * the recorded error information. Exit indicating success so that the
5184- * error is notified through ops.exit() with all the details.
5185- */
5186- if (!scx_ops_tryset_enable_state (SCX_OPS_ENABLING , SCX_OPS_PREPPING )) {
5187- preempt_enable ();
5188- spin_unlock_irq (& scx_tasks_lock );
5189- WARN_ON_ONCE (atomic_read (& scx_exit_kind ) == SCX_EXIT_NONE );
5190- ret = 0 ;
5191- goto err_disable_unlock_all ;
5192- }
5193-
51945175 /*
51955176 * We're fully committed and can't fail. The PREPPED -> ENABLED
51965177 * transitions here are synchronized against sched_ext_free() through
@@ -5221,7 +5202,11 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
52215202 cpus_read_unlock ();
52225203 percpu_up_write (& scx_fork_rwsem );
52235204
5224- /* see above ENABLING transition for the explanation on exiting with 0 */
5205+ /*
5206+ * Returning an error code here would lose the recorded error
5207+ * information. Exit indicating success so that the error is notified
5208+ * through ops.exit() with all the details.
5209+ */
52255210 if (!scx_ops_tryset_enable_state (SCX_OPS_ENABLED , SCX_OPS_ENABLING )) {
52265211 WARN_ON_ONCE (atomic_read (& scx_exit_kind ) == SCX_EXIT_NONE );
52275212 ret = 0 ;
0 commit comments