Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,24 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: $(timeconst-file) $(bounds-file)
$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
$(call filechk,offsets,__ASM_OFFSETS_H__)

# Generate rq-offsets.h

rq-offsets-file := include/generated/rq-offsets.h

targets += kernel/sched/rq-offsets.s

kernel/sched/rq-offsets.s: $(offsets-file)

$(rq-offsets-file): kernel/sched/rq-offsets.s FORCE
$(call filechk,offsets,__RQ_OFFSETS_H__)

# Check for missing system calls

quiet_cmd_syscalls = CALL $<
cmd_syscalls = $(CONFIG_SHELL) $< $(CC) $(c_flags) $(missing_syscalls_flags)

PHONY += missing-syscalls
missing-syscalls: scripts/checksyscalls.sh $(offsets-file)
missing-syscalls: scripts/checksyscalls.sh $(rq-offsets-file)
$(call cmd,syscalls)

# Check the manual modification of atomic headers
Expand Down
1 change: 1 addition & 0 deletions arch/alpha/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
#define COMPILE_OFFSETS

#include <linux/types.h>
#include <linux/stddef.h>
Expand Down
1 change: 1 addition & 0 deletions arch/arc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*/
#define COMPILE_OFFSETS

#include <linux/sched.h>
#include <linux/mm.h>
Expand Down
2 changes: 2 additions & 0 deletions arch/arm/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
#define COMPILE_OFFSETS

#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/mm.h>
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
* 2001-2002 Keith Owens
* Copyright (C) 2012 ARM Ltd.
*/
#define COMPILE_OFFSETS

#include <linux/arm_sdei.h>
#include <linux/sched.h>
Expand Down
1 change: 1 addition & 0 deletions arch/csky/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#define COMPILE_OFFSETS

#include <linux/sched.h>
#include <linux/kernel_stat.h>
Expand Down
1 change: 1 addition & 0 deletions arch/hexagon/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
*
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
*/
#define COMPILE_OFFSETS

#include <linux/compat.h>
#include <linux/types.h>
Expand Down
2 changes: 2 additions & 0 deletions arch/loongarch/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#define COMPILE_OFFSETS

#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
Expand Down
1 change: 1 addition & 0 deletions arch/m68k/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
* #defines from the assembly-language output.
*/

#define COMPILE_OFFSETS
#define ASM_OFFSETS_C

#include <linux/stddef.h>
Expand Down
1 change: 1 addition & 0 deletions arch/microblaze/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define COMPILE_OFFSETS

#include <linux/init.h>
#include <linux/stddef.h>
Expand Down
2 changes: 2 additions & 0 deletions arch/mips/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
* Kevin Kissell, [email protected] and Carsten Langgaard, [email protected]
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#define COMPILE_OFFSETS

#include <linux/compat.h>
#include <linux/types.h>
#include <linux/sched.h>
Expand Down
1 change: 1 addition & 0 deletions arch/nios2/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
/*
* Copyright (C) 2011 Tobias Klauser <[email protected]>
*/
#define COMPILE_OFFSETS

#include <linux/stddef.h>
#include <linux/sched.h>
Expand Down
1 change: 1 addition & 0 deletions arch/openrisc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define COMPILE_OFFSETS

#include <linux/signal.h>
#include <linux/sched.h>
Expand Down
1 change: 1 addition & 0 deletions arch/parisc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*/
#define COMPILE_OFFSETS

#include <linux/types.h>
#include <linux/sched.h>
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define COMPILE_OFFSETS

#include <linux/compat.h>
#include <linux/signal.h>
Expand Down
1 change: 1 addition & 0 deletions arch/riscv/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#define COMPILE_OFFSETS

#include <linux/kbuild.h>
#include <linux/mm.h>
Expand Down
1 change: 1 addition & 0 deletions arch/s390/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*/
#define COMPILE_OFFSETS

#include <linux/kbuild.h>
#include <linux/sched.h>
Expand Down
1 change: 1 addition & 0 deletions arch/sh/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define COMPILE_OFFSETS

#include <linux/stddef.h>
#include <linux/types.h>
Expand Down
1 change: 1 addition & 0 deletions arch/sparc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
*
* On sparc, thread_info data is static and TI_XXX offsets are computed by hand.
*/
#define COMPILE_OFFSETS

#include <linux/sched.h>
#include <linux/mm_types.h>
Expand Down
2 changes: 2 additions & 0 deletions arch/um/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
#define COMPILE_OFFSETS

#include <sysdep/kernel-offsets.h>
1 change: 1 addition & 0 deletions arch/xtensa/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
*
* Chris Zankel <[email protected]>
*/
#define COMPILE_OFFSETS

#include <asm/processor.h>
#include <asm/coprocessor.h>
Expand Down
11 changes: 4 additions & 7 deletions include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
/*
* Migrate-Disable and why it is undesired.
*
* When a preempted task becomes elegible to run under the ideal model (IOW it
* When a preempted task becomes eligible to run under the ideal model (IOW it
* becomes one of the M highest priority tasks), it might still have to wait
* for the preemptee's migrate_disable() section to complete. Thereby suffering
* a reduction in bandwidth in the exact duration of the migrate_disable()
Expand All @@ -387,7 +387,7 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* - a lower priority tasks; which under preempt_disable() could've instantly
* migrated away when another CPU becomes available, is now constrained
* by the ability to push the higher priority task away, which might itself be
* in a migrate_disable() section, reducing it's available bandwidth.
* in a migrate_disable() section, reducing its available bandwidth.
*
* IOW it trades latency / moves the interference term, but it stays in the
* system, and as long as it remains unbounded, the system is not fully
Expand All @@ -399,15 +399,15 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
* number of primitives into becoming preemptible, they would also allow
* migration. This turns out to break a bunch of per-cpu usage. To this end,
* all these primitives employ migirate_disable() to restore this implicit
* all these primitives employ migrate_disable() to restore this implicit
* assumption.
*
* This is a 'temporary' work-around at best. The correct solution is getting
* rid of the above assumptions and reworking the code to employ explicit
* per-cpu locking or short preempt-disable regions.
*
* The end goal must be to get rid of migrate_disable(), alternatively we need
* a schedulability theory that does not depend on abritrary migration.
* a schedulability theory that does not depend on arbitrary migration.
*
*
* Notes on the implementation.
Expand All @@ -424,8 +424,6 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
* work-conserving schedulers.
*
*/
extern void migrate_disable(void);
extern void migrate_enable(void);

/**
* preempt_disable_nested - Disable preemption inside a normally preempt disabled section
Expand Down Expand Up @@ -471,7 +469,6 @@ static __always_inline void preempt_enable_nested(void)

DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable())
DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace())
DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())

#ifdef CONFIG_PREEMPT_DYNAMIC

Expand Down
77 changes: 77 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
#include <asm/kmap_size.h>
#ifndef COMPILE_OFFSETS
#include <generated/rq-offsets.h>
#endif

/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
Expand Down Expand Up @@ -2307,4 +2310,78 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
#define alloc_tag_restore(_tag, _old) do {} while (0)
#endif

#ifndef COMPILE_OFFSETS

extern void __migrate_enable(void);

struct rq;
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

#ifdef CONFIG_SMP
#define this_rq_raw() arch_raw_cpu_ptr(&runqueues)
#else
#define this_rq_raw() PERCPU_PTR(&runqueues)
#endif

static inline void migrate_enable(void)
{
struct task_struct *p = current;

#ifdef CONFIG_DEBUG_PREEMPT
/*
* Check both overflow from migrate_disable() and superfluous
* migrate_enable().
*/
if (WARN_ON_ONCE((s16)p->migration_disabled <= 0))
return;
#endif

if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
}

/*
* Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
*/
guard(preempt)();
if (unlikely(p->cpus_ptr != &p->cpus_mask))
__migrate_enable();
/*
* Mustn't clear migration_disabled() until cpus_ptr points back at the
* regular cpus_mask, otherwise things that race (eg.
* select_fallback_rq) get confused.
*/
barrier();
p->migration_disabled = 0;
(*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))--;
}

static inline void migrate_disable(void)
{
struct task_struct *p = current;

if (p->migration_disabled) {
#ifdef CONFIG_DEBUG_PREEMPT
/*
*Warn about overflow half-way through the range.
*/
WARN_ON_ONCE((s16)p->migration_disabled < 0);
#endif
p->migration_disabled++;
return;
}

guard(preempt)();
(*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned))++;
p->migration_disabled = 1;
}
#else
static inline void migrate_disable(void) { }
static inline void migrate_enable(void) { }
#endif

DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())

#endif
3 changes: 1 addition & 2 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -23870,8 +23870,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
BTF_SET_START(btf_id_deny)
BTF_ID_UNUSED
#ifdef CONFIG_SMP
BTF_ID(func, migrate_disable)
BTF_ID(func, migrate_enable)
BTF_ID(func, __migrate_enable)
#endif
#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
BTF_ID(func, rcu_read_unlock_strict)
Expand Down
Loading
Loading