|
49 | 49 | #include <linux/tracepoint-defs.h> |
50 | 50 | #include <linux/unwind_deferred_types.h> |
51 | 51 | #include <asm/kmap_size.h> |
| 52 | +#ifndef COMPILE_OFFSETS |
| 53 | +#include <generated/rq-offsets.h> |
| 54 | +#endif |
52 | 55 |
|
53 | 56 | /* task_struct member predeclarations (sorted alphabetically): */ |
54 | 57 | struct audit_context; |
@@ -2307,4 +2310,114 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo |
2307 | 2310 | #define alloc_tag_restore(_tag, _old) do {} while (0) |
2308 | 2311 | #endif |
2309 | 2312 |
|
| 2313 | +#ifndef MODULE |
| 2314 | +#ifndef COMPILE_OFFSETS |
| 2315 | + |
| 2316 | +extern void ___migrate_enable(void); |
| 2317 | + |
| 2318 | +struct rq; |
| 2319 | +DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| 2320 | + |
| 2321 | +/* The "struct rq" is not available here, so we can't access the |
| 2322 | + * "runqueues" with this_cpu_ptr(), as the compilation will fail in |
| 2323 | + * this_cpu_ptr() -> raw_cpu_ptr() -> __verify_pcpu_ptr(): |
| 2324 | + * typeof((ptr) + 0) |
| 2325 | + * |
| 2326 | + * So use arch_raw_cpu_ptr()/PERCPU_PTR() directly here. |
| 2327 | + */ |
| 2328 | +#ifdef CONFIG_SMP |
| 2329 | +#define this_rq_raw() arch_raw_cpu_ptr(&runqueues) |
| 2330 | +#else |
| 2331 | +#define this_rq_raw() PERCPU_PTR(&runqueues) |
| 2332 | +#endif |
| 2333 | +#define this_rq_pinned() (*(unsigned int *)((void *)this_rq_raw() + RQ_nr_pinned)) |
| 2334 | + |
| 2335 | +static inline void __migrate_enable(void) |
| 2336 | +{ |
| 2337 | + struct task_struct *p = current; |
| 2338 | + |
| 2339 | +#ifdef CONFIG_DEBUG_PREEMPT |
| 2340 | + /* |
| 2341 | + * Check both overflow from migrate_disable() and superfluous |
| 2342 | + * migrate_enable(). |
| 2343 | + */ |
| 2344 | + if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) |
| 2345 | + return; |
| 2346 | +#endif |
| 2347 | + |
| 2348 | + if (p->migration_disabled > 1) { |
| 2349 | + p->migration_disabled--; |
| 2350 | + return; |
| 2351 | + } |
| 2352 | + |
| 2353 | + /* |
| 2354 | + * Ensure stop_task runs either before or after this, and that |
| 2355 | + * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). |
| 2356 | + */ |
| 2357 | + guard(preempt)(); |
| 2358 | + if (unlikely(p->cpus_ptr != &p->cpus_mask)) |
| 2359 | + ___migrate_enable(); |
| 2360 | + /* |
| 2361 | + * Mustn't clear migration_disabled() until cpus_ptr points back at the |
| 2362 | + * regular cpus_mask, otherwise things that race (eg. |
| 2363 | + * select_fallback_rq) get confused. |
| 2364 | + */ |
| 2365 | + barrier(); |
| 2366 | + p->migration_disabled = 0; |
| 2367 | + this_rq_pinned()--; |
| 2368 | +} |
| 2369 | + |
| 2370 | +static inline void __migrate_disable(void) |
| 2371 | +{ |
| 2372 | + struct task_struct *p = current; |
| 2373 | + |
| 2374 | + if (p->migration_disabled) { |
| 2375 | +#ifdef CONFIG_DEBUG_PREEMPT |
| 2376 | + /* |
| 2377 | + *Warn about overflow half-way through the range. |
| 2378 | + */ |
| 2379 | + WARN_ON_ONCE((s16)p->migration_disabled < 0); |
| 2380 | +#endif |
| 2381 | + p->migration_disabled++; |
| 2382 | + return; |
| 2383 | + } |
| 2384 | + |
| 2385 | + guard(preempt)(); |
| 2386 | + this_rq_pinned()++; |
| 2387 | + p->migration_disabled = 1; |
| 2388 | +} |
| 2389 | +#else /* !COMPILE_OFFSETS */ |
| 2390 | +static inline void __migrate_disable(void) { } |
| 2391 | +static inline void __migrate_enable(void) { } |
| 2392 | +#endif /* !COMPILE_OFFSETS */ |
| 2393 | + |
| 2394 | +/* |
| 2395 | + * The variable "runqueues" is not visible in the kernel modules, and export |
| 2396 | + * it is not a good idea. As Peter Zijlstra advised, define and export |
| 2397 | + * migrate_enable/migrate_disable in kernel/sched/core.c too, and use |
| 2398 | + * them for the modules. The macro "INSTANTIATE_EXPORTED_MIGRATE_DISABLE" |
| 2399 | + * will be defined in kernel/sched/core.c. |
| 2400 | + */ |
| 2401 | +#ifndef INSTANTIATE_EXPORTED_MIGRATE_DISABLE |
| 2402 | +static inline void migrate_disable(void) |
| 2403 | +{ |
| 2404 | + __migrate_disable(); |
| 2405 | +} |
| 2406 | + |
| 2407 | +static inline void migrate_enable(void) |
| 2408 | +{ |
| 2409 | + __migrate_enable(); |
| 2410 | +} |
| 2411 | +#else /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */ |
| 2412 | +extern void migrate_disable(void); |
| 2413 | +extern void migrate_enable(void); |
| 2414 | +#endif /* INSTANTIATE_EXPORTED_MIGRATE_DISABLE */ |
| 2415 | + |
| 2416 | +#else /* MODULE */ |
| 2417 | +extern void migrate_disable(void); |
| 2418 | +extern void migrate_enable(void); |
| 2419 | +#endif /* MODULE */ |
| 2420 | + |
| 2421 | +DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) |
| 2422 | + |
2310 | 2423 | #endif |
0 commit comments