Skip to content

Commit de0029f

Browse files
James-A-ClarkSuzuki K Poulose
authored andcommitted
coresight: Remove pending trace ID release mechanism
Pending the release of IDs was a way of managing concurrent sysfs and Perf sessions in a single global ID map. Perf may have finished while sysfs hadn't, and Perf shouldn't release the IDs in use by sysfs and vice versa. Now that Perf uses its own exclusive ID maps, pending release doesn't result in any different behavior than just releasing all IDs when the last Perf session finishes. As part of the per-sink trace ID change, we would have still had to make the pending mechanism work on a per-sink basis, due to the overlapping ID allocations, so instead of making that more complicated, just remove it. Signed-off-by: James Clark <[email protected]> Reviewed-by: Mike Leach <[email protected]> Signed-off-by: James Clark <[email protected]> Signed-off-by: Suzuki K Poulose <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 5ad628a commit de0029f

File tree

4 files changed

+43
-79
lines changed

4 files changed

+43
-79
lines changed

drivers/hwtracing/coresight/coresight-etm-perf.c

Lines changed: 11 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -232,15 +232,21 @@ static void free_event_data(struct work_struct *work)
232232
if (!(IS_ERR_OR_NULL(*ppath))) {
233233
struct coresight_device *sink = coresight_get_sink(*ppath);
234234

235-
coresight_trace_id_put_cpu_id_map(cpu, &sink->perf_sink_id_map);
235+
/*
236+
* Mark perf event as done for trace id allocator, but don't call
237+
* coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
238+
* never free trace IDs to ensure that the ID associated with a CPU
239+
* cannot change during their and other's concurrent sessions. Instead,
240+
* a refcount is used so that the last event to call
241+
* coresight_trace_id_perf_stop() frees all IDs.
242+
*/
243+
coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
244+
236245
coresight_release_path(*ppath);
237246
}
238247
*ppath = NULL;
239248
}
240249

241-
/* mark perf event as done for trace id allocator */
242-
coresight_trace_id_perf_stop();
243-
244250
free_percpu(event_data->path);
245251
kfree(event_data);
246252
}
@@ -328,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
328334
sink = user_sink = coresight_get_sink_by_id(id);
329335
}
330336

331-
/* tell the trace ID allocator that a perf event is starting up */
332-
coresight_trace_id_perf_start();
333-
334337
/* check if user wants a coresight configuration selected */
335338
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
336339
if (cfg_hash) {
@@ -411,6 +414,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
411414
continue;
412415
}
413416

417+
coresight_trace_id_perf_start(&sink->perf_sink_id_map);
414418
*etm_event_cpu_path_ptr(event_data, cpu) = path;
415419
}
416420

drivers/hwtracing/coresight/coresight-trace-id.c

Lines changed: 16 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,6 @@ static struct coresight_trace_id_map id_map_default = {
1818
.cpu_map = &id_map_default_cpu_ids
1919
};
2020

21-
/* maintain a record of the pending releases per cpu */
22-
static cpumask_t cpu_id_release_pending;
23-
24-
/* perf session active counter */
25-
static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
26-
2721
/* lock to protect id_map and cpu data */
2822
static DEFINE_SPINLOCK(id_map_lock);
2923

@@ -35,7 +29,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
3529
{
3630
pr_debug("%s id_map::\n", func_name);
3731
pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
38-
pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
3932
}
4033
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
4134
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
@@ -122,34 +115,18 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma
122115
clear_bit(id, id_map->used_ids);
123116
}
124117

125-
static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
126-
{
127-
if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
128-
return;
129-
set_bit(id, id_map->pend_rel_ids);
130-
}
131-
132118
/*
133-
* release all pending IDs for all current maps & clear CPU associations
134-
*
135-
* This currently operates on the default id map, but may be extended to
136-
* operate on all registered id maps if per sink id maps are used.
119+
* Release all IDs and clear CPU associations.
137120
*/
138-
static void coresight_trace_id_release_all_pending(void)
121+
static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
139122
{
140-
struct coresight_trace_id_map *id_map = &id_map_default;
141123
unsigned long flags;
142-
int cpu, bit;
124+
int cpu;
143125

144126
spin_lock_irqsave(&id_map_lock, flags);
145-
for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
146-
clear_bit(bit, id_map->used_ids);
147-
clear_bit(bit, id_map->pend_rel_ids);
148-
}
149-
for_each_cpu(cpu, &cpu_id_release_pending) {
150-
atomic_set(per_cpu_ptr(id_map_default.cpu_map, cpu), 0);
151-
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
152-
}
127+
bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
128+
for_each_possible_cpu(cpu)
129+
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
153130
spin_unlock_irqrestore(&id_map_lock, flags);
154131
DUMP_ID_MAP(id_map);
155132
}
@@ -164,7 +141,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
164141
/* check for existing allocation for this CPU */
165142
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
166143
if (id)
167-
goto get_cpu_id_clr_pend;
144+
goto get_cpu_id_out_unlock;
168145

169146
/*
170147
* Find a new ID.
@@ -185,11 +162,6 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
185162
/* allocate the new id to the cpu */
186163
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
187164

188-
get_cpu_id_clr_pend:
189-
/* we are (re)using this ID - so ensure it is not marked for release */
190-
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
191-
clear_bit(id, id_map->pend_rel_ids);
192-
193165
get_cpu_id_out_unlock:
194166
spin_unlock_irqrestore(&id_map_lock, flags);
195167

@@ -210,15 +182,8 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
210182

211183
spin_lock_irqsave(&id_map_lock, flags);
212184

213-
if (atomic_read(&perf_cs_etm_session_active)) {
214-
/* set release at pending if perf still active */
215-
coresight_trace_id_set_pend_rel(id, id_map);
216-
cpumask_set_cpu(cpu, &cpu_id_release_pending);
217-
} else {
218-
/* otherwise clear id */
219-
coresight_trace_id_free(id, id_map);
220-
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
221-
}
185+
coresight_trace_id_free(id, id_map);
186+
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
222187

223188
spin_unlock_irqrestore(&id_map_lock, flags);
224189
DUMP_ID_CPU(cpu, id);
@@ -302,17 +267,17 @@ void coresight_trace_id_put_system_id(int id)
302267
}
303268
EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
304269

305-
void coresight_trace_id_perf_start(void)
270+
void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
306271
{
307-
atomic_inc(&perf_cs_etm_session_active);
308-
PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
272+
atomic_inc(&id_map->perf_cs_etm_session_active);
273+
PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
309274
}
310275
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
311276

312-
void coresight_trace_id_perf_stop(void)
277+
void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
313278
{
314-
if (!atomic_dec_return(&perf_cs_etm_session_active))
315-
coresight_trace_id_release_all_pending();
316-
PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
279+
if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
280+
coresight_trace_id_release_all(id_map);
281+
PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
317282
}
318283
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);

drivers/hwtracing/coresight/coresight-trace-id.h

Lines changed: 14 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,10 @@
1717
* released when done.
1818
*
1919
* In order to ensure that a consistent cpu / ID matching is maintained
20-
* throughout a perf cs_etm event session - a session in progress flag will
21-
* be maintained, and released IDs not cleared until the perf session is
22-
* complete. This allows the same CPU to be re-allocated its prior ID.
20+
* throughout a perf cs_etm event session - a session in progress flag will be
21+
* maintained for each sink, and IDs are cleared when all the perf sessions
22+
* complete. This allows the same CPU to be re-allocated its prior ID when
23+
* events are scheduled in and out.
2324
*
2425
*
2526
* Trace ID maps will be created and initialised to prevent architecturally
@@ -66,11 +67,7 @@ int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id
6667
/**
6768
* Release an allocated trace ID associated with the CPU.
6869
*
69-
* This will release the CoreSight trace ID associated with the CPU,
70-
* unless a perf session is in operation.
71-
*
72-
* If a perf session is in operation then the ID will be marked as pending
73-
* release.
70+
* This will release the CoreSight trace ID associated with the CPU.
7471
*
7572
* @cpu: The CPU index to release the associated trace ID.
7673
*/
@@ -133,21 +130,21 @@ void coresight_trace_id_put_system_id(int id);
133130
/**
134131
* Notify the Trace ID allocator that a perf session is starting.
135132
*
136-
* Increase the perf session reference count - called by perf when setting up
137-
* a trace event.
133+
* Increase the perf session reference count - called by perf when setting up a
134+
* trace event.
138135
*
139-
* This reference count is used by the ID allocator to ensure that trace IDs
140-
* associated with a CPU cannot change or be released during a perf session.
136+
* Perf sessions never free trace IDs to ensure that the ID associated with a
137+
* CPU cannot change during their and other's concurrent sessions. Instead,
138+
* this refcount is used so that the last event to finish always frees all IDs.
141139
*/
142-
void coresight_trace_id_perf_start(void);
140+
void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map);
143141

144142
/**
145143
* Notify the ID allocator that a perf session is stopping.
146144
*
147-
* Decrease the perf session reference count.
148-
* if this causes the count to go to zero, then all Trace IDs marked as pending
149-
* release, will be released.
145+
* Decrease the perf session reference count. If this causes the count to go to
146+
* zero, then all Trace IDs will be released.
150147
*/
151-
void coresight_trace_id_perf_stop(void);
148+
void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map);
152149

153150
#endif /* _CORESIGHT_TRACE_ID_H */

include/linux/coresight.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -227,14 +227,12 @@ struct coresight_sysfs_link {
227227
* @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
228228
* Initialised so that the reserved IDs are permanently marked as
229229
* in use.
230-
* @pend_rel_ids: CPU IDs that have been released by the trace source but not
231-
* yet marked as available, to allow re-allocation to the same
232-
* CPU during a perf session.
230+
* @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
233231
*/
234232
struct coresight_trace_id_map {
235233
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
236-
DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
237234
atomic_t __percpu *cpu_map;
235+
atomic_t perf_cs_etm_session_active;
238236
};
239237

240238
/**

0 commit comments

Comments
 (0)