Skip to content

Commit c70055d

Browse files
committed
Merge tag 'sched_urgent_for_v6.1_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: - Adjust code to not trip up CFI - Fix sched group cookie matching * tag 'sched_urgent_for_v6.1_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Introduce struct balance_callback to avoid CFI mismatches sched/core: Fix comparison in sched_group_cookie_match()
2 parents 6204a81 + 8e5bad7 commit c70055d

File tree

4 files changed

+35
-29
lines changed

4 files changed

+35
-29
lines changed

kernel/sched/core.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -4823,10 +4823,10 @@ static inline void finish_task(struct task_struct *prev)
48234823

48244824
#ifdef CONFIG_SMP
48254825

4826-
static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
4826+
static void do_balance_callbacks(struct rq *rq, struct balance_callback *head)
48274827
{
48284828
void (*func)(struct rq *rq);
4829-
struct callback_head *next;
4829+
struct balance_callback *next;
48304830

48314831
lockdep_assert_rq_held(rq);
48324832

@@ -4853,15 +4853,15 @@ static void balance_push(struct rq *rq);
48534853
* This abuse is tolerated because it places all the unlikely/odd cases behind
48544854
* a single test, namely: rq->balance_callback == NULL.
48554855
*/
4856-
struct callback_head balance_push_callback = {
4856+
struct balance_callback balance_push_callback = {
48574857
.next = NULL,
4858-
.func = (void (*)(struct callback_head *))balance_push,
4858+
.func = balance_push,
48594859
};
48604860

4861-
static inline struct callback_head *
4861+
static inline struct balance_callback *
48624862
__splice_balance_callbacks(struct rq *rq, bool split)
48634863
{
4864-
struct callback_head *head = rq->balance_callback;
4864+
struct balance_callback *head = rq->balance_callback;
48654865

48664866
if (likely(!head))
48674867
return NULL;
@@ -4883,7 +4883,7 @@ __splice_balance_callbacks(struct rq *rq, bool split)
48834883
return head;
48844884
}
48854885

4886-
static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4886+
static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
48874887
{
48884888
return __splice_balance_callbacks(rq, true);
48894889
}
@@ -4893,7 +4893,7 @@ static void __balance_callbacks(struct rq *rq)
48934893
do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
48944894
}
48954895

4896-
static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4896+
static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
48974897
{
48984898
unsigned long flags;
48994899

@@ -4910,12 +4910,12 @@ static inline void __balance_callbacks(struct rq *rq)
49104910
{
49114911
}
49124912

4913-
static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
4913+
static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
49144914
{
49154915
return NULL;
49164916
}
49174917

4918-
static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
4918+
static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
49194919
{
49204920
}
49214921

@@ -6188,7 +6188,7 @@ static void sched_core_balance(struct rq *rq)
61886188
preempt_enable();
61896189
}
61906190

6191-
static DEFINE_PER_CPU(struct callback_head, core_balance_head);
6191+
static DEFINE_PER_CPU(struct balance_callback, core_balance_head);
61926192

61936193
static void queue_core_balance(struct rq *rq)
61946194
{
@@ -7419,7 +7419,7 @@ static int __sched_setscheduler(struct task_struct *p,
74197419
int oldpolicy = -1, policy = attr->sched_policy;
74207420
int retval, oldprio, newprio, queued, running;
74217421
const struct sched_class *prev_class;
7422-
struct callback_head *head;
7422+
struct balance_callback *head;
74237423
struct rq_flags rf;
74247424
int reset_on_fork;
74257425
int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;

kernel/sched/deadline.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -644,8 +644,8 @@ static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
644644
return rq->online && dl_task(prev);
645645
}
646646

647-
static DEFINE_PER_CPU(struct callback_head, dl_push_head);
648-
static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
647+
static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
648+
static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
649649

650650
static void push_dl_tasks(struct rq *);
651651
static void pull_dl_task(struct rq *);

kernel/sched/rt.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -410,8 +410,8 @@ static inline int has_pushable_tasks(struct rq *rq)
410410
return !plist_head_empty(&rq->rt.pushable_tasks);
411411
}
412412

413-
static DEFINE_PER_CPU(struct callback_head, rt_push_head);
414-
static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
413+
static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
414+
static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
415415

416416
static void push_rt_tasks(struct rq *);
417417
static void pull_rt_task(struct rq *);

kernel/sched/sched.h

Lines changed: 19 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -938,6 +938,12 @@ struct uclamp_rq {
938938
DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
939939
#endif /* CONFIG_UCLAMP_TASK */
940940

941+
struct rq;
942+
struct balance_callback {
943+
struct balance_callback *next;
944+
void (*func)(struct rq *rq);
945+
};
946+
941947
/*
942948
* This is the main, per-CPU runqueue data structure.
943949
*
@@ -1036,7 +1042,7 @@ struct rq {
10361042
unsigned long cpu_capacity;
10371043
unsigned long cpu_capacity_orig;
10381044

1039-
struct callback_head *balance_callback;
1045+
struct balance_callback *balance_callback;
10401046

10411047
unsigned char nohz_idle_balance;
10421048
unsigned char idle_balance;
@@ -1182,6 +1188,14 @@ static inline bool is_migration_disabled(struct task_struct *p)
11821188
#endif
11831189
}
11841190

1191+
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1192+
1193+
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1194+
#define this_rq() this_cpu_ptr(&runqueues)
1195+
#define task_rq(p) cpu_rq(task_cpu(p))
1196+
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1197+
#define raw_rq() raw_cpu_ptr(&runqueues)
1198+
11851199
struct sched_group;
11861200
#ifdef CONFIG_SCHED_CORE
11871201
static inline struct cpumask *sched_group_span(struct sched_group *sg);
@@ -1269,7 +1283,7 @@ static inline bool sched_group_cookie_match(struct rq *rq,
12691283
return true;
12701284

12711285
for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
1272-
if (sched_core_cookie_match(rq, p))
1286+
if (sched_core_cookie_match(cpu_rq(cpu), p))
12731287
return true;
12741288
}
12751289
return false;
@@ -1384,14 +1398,6 @@ static inline void update_idle_core(struct rq *rq)
13841398
static inline void update_idle_core(struct rq *rq) { }
13851399
#endif
13861400

1387-
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1388-
1389-
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1390-
#define this_rq() this_cpu_ptr(&runqueues)
1391-
#define task_rq(p) cpu_rq(task_cpu(p))
1392-
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1393-
#define raw_rq() raw_cpu_ptr(&runqueues)
1394-
13951401
#ifdef CONFIG_FAIR_GROUP_SCHED
13961402
static inline struct task_struct *task_of(struct sched_entity *se)
13971403
{
@@ -1544,7 +1550,7 @@ struct rq_flags {
15441550
#endif
15451551
};
15461552

1547-
extern struct callback_head balance_push_callback;
1553+
extern struct balance_callback balance_push_callback;
15481554

15491555
/*
15501556
* Lockdep annotation that avoids accidental unlocks; it's like a
@@ -1724,7 +1730,7 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
17241730

17251731
static inline void
17261732
queue_balance_callback(struct rq *rq,
1727-
struct callback_head *head,
1733+
struct balance_callback *head,
17281734
void (*func)(struct rq *rq))
17291735
{
17301736
lockdep_assert_rq_held(rq);
@@ -1737,7 +1743,7 @@ queue_balance_callback(struct rq *rq,
17371743
if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
17381744
return;
17391745

1740-
head->func = (void (*)(struct callback_head *))func;
1746+
head->func = func;
17411747
head->next = rq->balance_callback;
17421748
rq->balance_callback = head;
17431749
}

0 commit comments

Comments
 (0)