|
10 | 10 | #include <trace/events/cgroup.h>
|
11 | 11 |
|
12 | 12 | static DEFINE_SPINLOCK(rstat_base_lock);
|
13 |
| -static DEFINE_PER_CPU(raw_spinlock_t, rstat_base_cpu_lock); |
14 | 13 | static DEFINE_PER_CPU(struct llist_head, rstat_backlog_list);
|
15 | 14 |
|
16 | 15 | static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
|
@@ -53,74 +52,6 @@ static inline struct llist_head *ss_lhead_cpu(struct cgroup_subsys *ss, int cpu)
|
53 | 52 | return per_cpu_ptr(&rstat_backlog_list, cpu);
|
54 | 53 | }
|
55 | 54 |
|
56 |
| -static raw_spinlock_t *ss_rstat_cpu_lock(struct cgroup_subsys *ss, int cpu) |
57 |
| -{ |
58 |
| - if (ss) |
59 |
| - return per_cpu_ptr(ss->rstat_ss_cpu_lock, cpu); |
60 |
| - |
61 |
| - return per_cpu_ptr(&rstat_base_cpu_lock, cpu); |
62 |
| -} |
63 |
| - |
64 |
| -/* |
65 |
| - * Helper functions for rstat per CPU locks. |
66 |
| - * |
67 |
| - * This makes it easier to diagnose locking issues and contention in |
68 |
| - * production environments. The parameter @fast_path determine the |
69 |
| - * tracepoints being added, allowing us to diagnose "flush" related |
70 |
| - * operations without handling high-frequency fast-path "update" events. |
71 |
| - */ |
72 |
| -static __always_inline |
73 |
| -unsigned long _css_rstat_cpu_lock(struct cgroup_subsys_state *css, int cpu, |
74 |
| - const bool fast_path) |
75 |
| -{ |
76 |
| - struct cgroup *cgrp = css->cgroup; |
77 |
| - raw_spinlock_t *cpu_lock; |
78 |
| - unsigned long flags; |
79 |
| - bool contended; |
80 |
| - |
81 |
| - /* |
82 |
| - * The _irqsave() is needed because the locks used for flushing are |
83 |
| - * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring this lock |
84 |
| - * with the _irq() suffix only disables interrupts on a non-PREEMPT_RT |
85 |
| - * kernel. The raw_spinlock_t below disables interrupts on both |
86 |
| - * configurations. The _irqsave() ensures that interrupts are always |
87 |
| - * disabled and later restored. |
88 |
| - */ |
89 |
| - cpu_lock = ss_rstat_cpu_lock(css->ss, cpu); |
90 |
| - contended = !raw_spin_trylock_irqsave(cpu_lock, flags); |
91 |
| - if (contended) { |
92 |
| - if (fast_path) |
93 |
| - trace_cgroup_rstat_cpu_lock_contended_fastpath(cgrp, cpu, contended); |
94 |
| - else |
95 |
| - trace_cgroup_rstat_cpu_lock_contended(cgrp, cpu, contended); |
96 |
| - |
97 |
| - raw_spin_lock_irqsave(cpu_lock, flags); |
98 |
| - } |
99 |
| - |
100 |
| - if (fast_path) |
101 |
| - trace_cgroup_rstat_cpu_locked_fastpath(cgrp, cpu, contended); |
102 |
| - else |
103 |
| - trace_cgroup_rstat_cpu_locked(cgrp, cpu, contended); |
104 |
| - |
105 |
| - return flags; |
106 |
| -} |
107 |
| - |
108 |
| -static __always_inline |
109 |
| -void _css_rstat_cpu_unlock(struct cgroup_subsys_state *css, int cpu, |
110 |
| - unsigned long flags, const bool fast_path) |
111 |
| -{ |
112 |
| - struct cgroup *cgrp = css->cgroup; |
113 |
| - raw_spinlock_t *cpu_lock; |
114 |
| - |
115 |
| - if (fast_path) |
116 |
| - trace_cgroup_rstat_cpu_unlock_fastpath(cgrp, cpu, false); |
117 |
| - else |
118 |
| - trace_cgroup_rstat_cpu_unlock(cgrp, cpu, false); |
119 |
| - |
120 |
| - cpu_lock = ss_rstat_cpu_lock(css->ss, cpu); |
121 |
| - raw_spin_unlock_irqrestore(cpu_lock, flags); |
122 |
| -} |
123 |
| - |
124 | 55 | /**
|
125 | 56 | * css_rstat_updated - keep track of updated rstat_cpu
|
126 | 57 | * @css: target cgroup subsystem state
|
@@ -323,15 +254,12 @@ static struct cgroup_subsys_state *css_rstat_updated_list(
|
323 | 254 | {
|
324 | 255 | struct css_rstat_cpu *rstatc = css_rstat_cpu(root, cpu);
|
325 | 256 | struct cgroup_subsys_state *head = NULL, *parent, *child;
|
326 |
| - unsigned long flags; |
327 |
| - |
328 |
| - flags = _css_rstat_cpu_lock(root, cpu, false); |
329 | 257 |
|
330 | 258 | css_process_update_tree(root->ss, cpu);
|
331 | 259 |
|
332 | 260 | /* Return NULL if this subtree is not on-list */
|
333 | 261 | if (!rstatc->updated_next)
|
334 |
| - goto unlock_ret; |
| 262 | + return NULL; |
335 | 263 |
|
336 | 264 | /*
|
337 | 265 | * Unlink @root from its parent. As the updated_children list is
|
@@ -363,8 +291,7 @@ static struct cgroup_subsys_state *css_rstat_updated_list(
|
363 | 291 | rstatc->updated_children = root;
|
364 | 292 | if (child != root)
|
365 | 293 | head = css_rstat_push_children(head, child, cpu);
|
366 |
| -unlock_ret: |
367 |
| - _css_rstat_cpu_unlock(root, cpu, flags, false); |
| 294 | + |
368 | 295 | return head;
|
369 | 296 | }
|
370 | 297 |
|
@@ -560,34 +487,15 @@ int __init ss_rstat_init(struct cgroup_subsys *ss)
|
560 | 487 | {
|
561 | 488 | int cpu;
|
562 | 489 |
|
563 |
| -#ifdef CONFIG_SMP |
564 |
| - /* |
565 |
| - * On uniprocessor machines, arch_spinlock_t is defined as an empty |
566 |
| - * struct. Avoid allocating a size of zero by having this block |
567 |
| - * excluded in this case. It's acceptable to leave the subsystem locks |
568 |
| - * unitialized since the associated lock functions are no-ops in the |
569 |
| - * non-smp case. |
570 |
| - */ |
571 |
| - if (ss) { |
572 |
| - ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t); |
573 |
| - if (!ss->rstat_ss_cpu_lock) |
574 |
| - return -ENOMEM; |
575 |
| - } |
576 |
| -#endif |
577 |
| - |
578 | 490 | if (ss) {
|
579 | 491 | ss->lhead = alloc_percpu(struct llist_head);
|
580 |
| - if (!ss->lhead) { |
581 |
| - free_percpu(ss->rstat_ss_cpu_lock); |
| 492 | + if (!ss->lhead) |
582 | 493 | return -ENOMEM;
|
583 |
| - } |
584 | 494 | }
|
585 | 495 |
|
586 | 496 | spin_lock_init(ss_rstat_lock(ss));
|
587 |
| - for_each_possible_cpu(cpu) { |
588 |
| - raw_spin_lock_init(ss_rstat_cpu_lock(ss, cpu)); |
| 497 | + for_each_possible_cpu(cpu) |
589 | 498 | init_llist_head(ss_lhead_cpu(ss, cpu));
|
590 |
| - } |
591 | 499 |
|
592 | 500 | return 0;
|
593 | 501 | }
|
|
0 commit comments