15
15
/* Default trace ID map. Used in sysfs mode and for system sources */
16
16
static DEFINE_PER_CPU (atomic_t , id_map_default_cpu_ids ) = ATOMIC_INIT (0 );
17
17
static struct coresight_trace_id_map id_map_default = {
18
- .cpu_map = & id_map_default_cpu_ids
18
+ .cpu_map = & id_map_default_cpu_ids ,
19
+ .lock = __SPIN_LOCK_UNLOCKED (id_map_default .lock )
19
20
};
20
21
21
- /* lock to protect id_map and cpu data */
22
- static DEFINE_SPINLOCK (id_map_lock );
23
-
24
22
/* #define TRACE_ID_DEBUG 1 */
25
23
#if defined(TRACE_ID_DEBUG ) || defined(CONFIG_COMPILE_TEST )
26
24
@@ -123,11 +121,11 @@ static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map
123
121
unsigned long flags ;
124
122
int cpu ;
125
123
126
- spin_lock_irqsave (& id_map_lock , flags );
124
+ spin_lock_irqsave (& id_map -> lock , flags );
127
125
bitmap_zero (id_map -> used_ids , CORESIGHT_TRACE_IDS_MAX );
128
126
for_each_possible_cpu (cpu )
129
127
atomic_set (per_cpu_ptr (id_map -> cpu_map , cpu ), 0 );
130
- spin_unlock_irqrestore (& id_map_lock , flags );
128
+ spin_unlock_irqrestore (& id_map -> lock , flags );
131
129
DUMP_ID_MAP (id_map );
132
130
}
133
131
@@ -136,7 +134,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
136
134
unsigned long flags ;
137
135
int id ;
138
136
139
- spin_lock_irqsave (& id_map_lock , flags );
137
+ spin_lock_irqsave (& id_map -> lock , flags );
140
138
141
139
/* check for existing allocation for this CPU */
142
140
id = _coresight_trace_id_read_cpu_id (cpu , id_map );
@@ -163,7 +161,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
163
161
atomic_set (per_cpu_ptr (id_map -> cpu_map , cpu ), id );
164
162
165
163
get_cpu_id_out_unlock :
166
- spin_unlock_irqrestore (& id_map_lock , flags );
164
+ spin_unlock_irqrestore (& id_map -> lock , flags );
167
165
168
166
DUMP_ID_CPU (cpu , id );
169
167
DUMP_ID_MAP (id_map );
@@ -180,12 +178,12 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
180
178
if (!id )
181
179
return ;
182
180
183
- spin_lock_irqsave (& id_map_lock , flags );
181
+ spin_lock_irqsave (& id_map -> lock , flags );
184
182
185
183
coresight_trace_id_free (id , id_map );
186
184
atomic_set (per_cpu_ptr (id_map -> cpu_map , cpu ), 0 );
187
185
188
- spin_unlock_irqrestore (& id_map_lock , flags );
186
+ spin_unlock_irqrestore (& id_map -> lock , flags );
189
187
DUMP_ID_CPU (cpu , id );
190
188
DUMP_ID_MAP (id_map );
191
189
}
@@ -195,10 +193,10 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
195
193
unsigned long flags ;
196
194
int id ;
197
195
198
- spin_lock_irqsave (& id_map_lock , flags );
196
+ spin_lock_irqsave (& id_map -> lock , flags );
199
197
/* prefer odd IDs for system components to avoid legacy CPU IDS */
200
198
id = coresight_trace_id_alloc_new_id (id_map , 0 , true);
201
- spin_unlock_irqrestore (& id_map_lock , flags );
199
+ spin_unlock_irqrestore (& id_map -> lock , flags );
202
200
203
201
DUMP_ID (id );
204
202
DUMP_ID_MAP (id_map );
@@ -209,9 +207,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
209
207
{
210
208
unsigned long flags ;
211
209
212
- spin_lock_irqsave (& id_map_lock , flags );
210
+ spin_lock_irqsave (& id_map -> lock , flags );
213
211
coresight_trace_id_free (id , id_map );
214
- spin_unlock_irqrestore (& id_map_lock , flags );
212
+ spin_unlock_irqrestore (& id_map -> lock , flags );
215
213
216
214
DUMP_ID (id );
217
215
DUMP_ID_MAP (id_map );
0 commit comments