@@ -1954,6 +1954,7 @@ static void perf_group_attach(struct perf_event *event)
19541954
19551955 list_add_tail (& event -> sibling_list , & group_leader -> sibling_list );
19561956 group_leader -> nr_siblings ++ ;
1957+ group_leader -> group_generation ++ ;
19571958
19581959 perf_event__header_size (group_leader );
19591960
@@ -2144,6 +2145,7 @@ static void perf_group_detach(struct perf_event *event)
21442145 if (leader != event ) {
21452146 list_del_init (& event -> sibling_list );
21462147 event -> group_leader -> nr_siblings -- ;
2148+ event -> group_leader -> group_generation ++ ;
21472149 goto out ;
21482150 }
21492151
@@ -5440,7 +5442,7 @@ static int __perf_read_group_add(struct perf_event *leader,
54405442 u64 read_format , u64 * values )
54415443{
54425444 struct perf_event_context * ctx = leader -> ctx ;
5443- struct perf_event * sub ;
5445+ struct perf_event * sub , * parent ;
54445446 unsigned long flags ;
54455447 int n = 1 ; /* skip @nr */
54465448 int ret ;
@@ -5450,6 +5452,33 @@ static int __perf_read_group_add(struct perf_event *leader,
54505452 return ret ;
54515453
54525454 raw_spin_lock_irqsave (& ctx -> lock , flags );
5455+ /*
5456+ * Verify the grouping between the parent and child (inherited)
5457+ * events is still in tact.
5458+ *
5459+ * Specifically:
5460+ * - leader->ctx->lock pins leader->sibling_list
5461+ * - parent->child_mutex pins parent->child_list
5462+ * - parent->ctx->mutex pins parent->sibling_list
5463+ *
5464+ * Because parent->ctx != leader->ctx (and child_list nests inside
5465+ * ctx->mutex), group destruction is not atomic between children, also
5466+ * see perf_event_release_kernel(). Additionally, parent can grow the
5467+ * group.
5468+ *
5469+ * Therefore it is possible to have parent and child groups in a
5470+ * different configuration and summing over such a beast makes no sense
5471+ * what so ever.
5472+ *
5473+ * Reject this.
5474+ */
5475+ parent = leader -> parent ;
5476+ if (parent &&
5477+ (parent -> group_generation != leader -> group_generation ||
5478+ parent -> nr_siblings != leader -> nr_siblings )) {
5479+ ret = - ECHILD ;
5480+ goto unlock ;
5481+ }
54535482
54545483 /*
54555484 * Since we co-schedule groups, {enabled,running} times of siblings
@@ -5483,8 +5512,9 @@ static int __perf_read_group_add(struct perf_event *leader,
54835512 values [n ++ ] = atomic64_read (& sub -> lost_samples );
54845513 }
54855514
5515+ unlock :
54865516 raw_spin_unlock_irqrestore (& ctx -> lock , flags );
5487- return 0 ;
5517+ return ret ;
54885518}
54895519
54905520static int perf_read_group (struct perf_event * event ,
@@ -5503,10 +5533,6 @@ static int perf_read_group(struct perf_event *event,
55035533
55045534 values [0 ] = 1 + leader -> nr_siblings ;
55055535
5506- /*
5507- * By locking the child_mutex of the leader we effectively
5508- * lock the child list of all siblings.. XXX explain how.
5509- */
55105536 mutex_lock (& leader -> child_mutex );
55115537
55125538 ret = __perf_read_group_add (leader , read_format , values );
@@ -13346,6 +13372,7 @@ static int inherit_group(struct perf_event *parent_event,
1334613372 !perf_get_aux_event (child_ctr , leader ))
1334713373 return - EINVAL ;
1334813374 }
13375+ leader -> group_generation = parent_event -> group_generation ;
1334913376 return 0 ;
1335013377}
1335113378
0 commit comments