|
11 | 11 |
|
12 | 12 | static DEFINE_SPINLOCK(rstat_base_lock);
|
13 | 13 | static DEFINE_PER_CPU(raw_spinlock_t, rstat_base_cpu_lock);
|
| 14 | +static DEFINE_PER_CPU(struct llist_head, rstat_backlog_list); |
14 | 15 |
|
15 | 16 | static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu);
|
16 | 17 |
|
@@ -45,6 +46,13 @@ static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss)
|
45 | 46 | return &rstat_base_lock;
|
46 | 47 | }
|
47 | 48 |
|
| 49 | +static inline struct llist_head *ss_lhead_cpu(struct cgroup_subsys *ss, int cpu) |
| 50 | +{ |
| 51 | + if (ss) |
| 52 | + return per_cpu_ptr(ss->lhead, cpu); |
| 53 | + return per_cpu_ptr(&rstat_backlog_list, cpu); |
| 54 | +} |
| 55 | + |
48 | 56 | static raw_spinlock_t *ss_rstat_cpu_lock(struct cgroup_subsys *ss, int cpu)
|
49 | 57 | {
|
50 | 58 | if (ss)
|
@@ -456,7 +464,8 @@ int css_rstat_init(struct cgroup_subsys_state *css)
|
456 | 464 | for_each_possible_cpu(cpu) {
|
457 | 465 | struct css_rstat_cpu *rstatc = css_rstat_cpu(css, cpu);
|
458 | 466 |
|
459 |
| - rstatc->updated_children = css; |
| 467 | + rstatc->owner = rstatc->updated_children = css; |
| 468 | + init_llist_node(&rstatc->lnode); |
460 | 469 |
|
461 | 470 | if (is_self) {
|
462 | 471 | struct cgroup_rstat_base_cpu *rstatbc;
|
@@ -525,9 +534,19 @@ int __init ss_rstat_init(struct cgroup_subsys *ss)
|
525 | 534 | }
|
526 | 535 | #endif
|
527 | 536 |
|
| 537 | + if (ss) { |
| 538 | + ss->lhead = alloc_percpu(struct llist_head); |
| 539 | + if (!ss->lhead) { |
| 540 | + free_percpu(ss->rstat_ss_cpu_lock); |
| 541 | + return -ENOMEM; |
| 542 | + } |
| 543 | + } |
| 544 | + |
528 | 545 | spin_lock_init(ss_rstat_lock(ss));
|
529 |
| - for_each_possible_cpu(cpu) |
| 546 | + for_each_possible_cpu(cpu) { |
530 | 547 | raw_spin_lock_init(ss_rstat_cpu_lock(ss, cpu));
|
| 548 | + init_llist_head(ss_lhead_cpu(ss, cpu)); |
| 549 | + } |
531 | 550 |
|
532 | 551 | return 0;
|
533 | 552 | }
|
|
0 commit comments