1
1
// SPDX-License-Identifier: GPL-2.0
2
2
/*
3
- * JZ47xx SoCs TCU IRQ driver
3
+ * Ingenic SoCs TCU IRQ driver
4
4
* Copyright (C) 2019 Paul Cercueil <[email protected] >
5
+ * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <[email protected] >
5
6
*/
6
7
7
8
#include <linux/bitops.h>
15
16
#include <linux/of_address.h>
16
17
#include <linux/of_irq.h>
17
18
#include <linux/of_platform.h>
19
+ #include <linux/overflow.h>
18
20
#include <linux/platform_device.h>
19
21
#include <linux/regmap.h>
20
22
#include <linux/sched_clock.h>
21
23
22
24
#include <dt-bindings/clock/ingenic,tcu.h>
23
25
26
+ static DEFINE_PER_CPU (call_single_data_t , ingenic_cevt_csd ) ;
27
+
24
28
struct ingenic_soc_info {
25
29
unsigned int num_channels ;
26
30
};
27
31
32
+ struct ingenic_tcu_timer {
33
+ unsigned int cpu ;
34
+ unsigned int channel ;
35
+ struct clock_event_device cevt ;
36
+ struct clk * clk ;
37
+ char name [8 ];
38
+ };
39
+
28
40
struct ingenic_tcu {
29
41
struct regmap * map ;
30
- struct clk * timer_clk , * cs_clk ;
31
- unsigned int timer_channel , cs_channel ;
32
- struct clock_event_device cevt ;
42
+ struct device_node * np ;
43
+ struct clk * cs_clk ;
44
+ unsigned int cs_channel ;
33
45
struct clocksource cs ;
34
- char name [4 ];
35
46
unsigned long pwm_channels_mask ;
47
+ struct ingenic_tcu_timer timers [];
36
48
};
37
49
38
50
static struct ingenic_tcu * ingenic_tcu ;
@@ -52,44 +64,65 @@ static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
52
64
return ingenic_tcu_timer_read ();
53
65
}
54
66
55
- static inline struct ingenic_tcu * to_ingenic_tcu (struct clock_event_device * evt )
67
+ static inline struct ingenic_tcu *
68
+ to_ingenic_tcu (struct ingenic_tcu_timer * timer )
69
+ {
70
+ return container_of (timer , struct ingenic_tcu , timers [timer -> cpu ]);
71
+ }
72
+
73
+ static inline struct ingenic_tcu_timer *
74
+ to_ingenic_tcu_timer (struct clock_event_device * evt )
56
75
{
57
- return container_of (evt , struct ingenic_tcu , cevt );
76
+ return container_of (evt , struct ingenic_tcu_timer , cevt );
58
77
}
59
78
60
79
static int ingenic_tcu_cevt_set_state_shutdown (struct clock_event_device * evt )
61
80
{
62
- struct ingenic_tcu * tcu = to_ingenic_tcu (evt );
81
+ struct ingenic_tcu_timer * timer = to_ingenic_tcu_timer (evt );
82
+ struct ingenic_tcu * tcu = to_ingenic_tcu (timer );
63
83
64
- regmap_write (tcu -> map , TCU_REG_TECR , BIT (tcu -> timer_channel ));
84
+ regmap_write (tcu -> map , TCU_REG_TECR , BIT (timer -> channel ));
65
85
66
86
return 0 ;
67
87
}
68
88
69
89
static int ingenic_tcu_cevt_set_next (unsigned long next ,
70
90
struct clock_event_device * evt )
71
91
{
72
- struct ingenic_tcu * tcu = to_ingenic_tcu (evt );
92
+ struct ingenic_tcu_timer * timer = to_ingenic_tcu_timer (evt );
93
+ struct ingenic_tcu * tcu = to_ingenic_tcu (timer );
73
94
74
95
if (next > 0xffff )
75
96
return - EINVAL ;
76
97
77
- regmap_write (tcu -> map , TCU_REG_TDFRc (tcu -> timer_channel ), next );
78
- regmap_write (tcu -> map , TCU_REG_TCNTc (tcu -> timer_channel ), 0 );
79
- regmap_write (tcu -> map , TCU_REG_TESR , BIT (tcu -> timer_channel ));
98
+ regmap_write (tcu -> map , TCU_REG_TDFRc (timer -> channel ), next );
99
+ regmap_write (tcu -> map , TCU_REG_TCNTc (timer -> channel ), 0 );
100
+ regmap_write (tcu -> map , TCU_REG_TESR , BIT (timer -> channel ));
80
101
81
102
return 0 ;
82
103
}
83
104
105
+ static void ingenic_per_cpu_event_handler (void * info )
106
+ {
107
+ struct clock_event_device * cevt = (struct clock_event_device * ) info ;
108
+
109
+ cevt -> event_handler (cevt );
110
+ }
111
+
84
112
static irqreturn_t ingenic_tcu_cevt_cb (int irq , void * dev_id )
85
113
{
86
- struct clock_event_device * evt = dev_id ;
87
- struct ingenic_tcu * tcu = to_ingenic_tcu (evt );
114
+ struct ingenic_tcu_timer * timer = dev_id ;
115
+ struct ingenic_tcu * tcu = to_ingenic_tcu (timer );
116
+ call_single_data_t * csd ;
88
117
89
- regmap_write (tcu -> map , TCU_REG_TECR , BIT (tcu -> timer_channel ));
118
+ regmap_write (tcu -> map , TCU_REG_TECR , BIT (timer -> channel ));
90
119
91
- if (evt -> event_handler )
92
- evt -> event_handler (evt );
120
+ if (timer -> cevt .event_handler ) {
121
+ csd = & per_cpu (ingenic_cevt_csd , timer -> cpu );
122
+ csd -> info = (void * ) & timer -> cevt ;
123
+ csd -> func = ingenic_per_cpu_event_handler ;
124
+ smp_call_function_single_async (timer -> cpu , csd );
125
+ }
93
126
94
127
return IRQ_HANDLED ;
95
128
}
@@ -105,64 +138,66 @@ static struct clk * __init ingenic_tcu_get_clock(struct device_node *np, int id)
105
138
return of_clk_get_from_provider (& args );
106
139
}
107
140
108
- static int __init ingenic_tcu_timer_init (struct device_node * np ,
109
- struct ingenic_tcu * tcu )
141
+ static int ingenic_tcu_setup_cevt (unsigned int cpu )
110
142
{
111
- unsigned int timer_virq , channel = tcu -> timer_channel ;
143
+ struct ingenic_tcu * tcu = ingenic_tcu ;
144
+ struct ingenic_tcu_timer * timer = & tcu -> timers [cpu ];
145
+ unsigned int timer_virq ;
112
146
struct irq_domain * domain ;
113
147
unsigned long rate ;
114
148
int err ;
115
149
116
- tcu -> timer_clk = ingenic_tcu_get_clock (np , channel );
117
- if (IS_ERR (tcu -> timer_clk ))
118
- return PTR_ERR (tcu -> timer_clk );
150
+ timer -> clk = ingenic_tcu_get_clock (tcu -> np , timer -> channel );
151
+ if (IS_ERR (timer -> clk ))
152
+ return PTR_ERR (timer -> clk );
119
153
120
- err = clk_prepare_enable (tcu -> timer_clk );
154
+ err = clk_prepare_enable (timer -> clk );
121
155
if (err )
122
156
goto err_clk_put ;
123
157
124
- rate = clk_get_rate (tcu -> timer_clk );
158
+ rate = clk_get_rate (timer -> clk );
125
159
if (!rate ) {
126
160
err = - EINVAL ;
127
161
goto err_clk_disable ;
128
162
}
129
163
130
- domain = irq_find_host (np );
164
+ domain = irq_find_host (tcu -> np );
131
165
if (!domain ) {
132
166
err = - ENODEV ;
133
167
goto err_clk_disable ;
134
168
}
135
169
136
- timer_virq = irq_create_mapping (domain , channel );
170
+ timer_virq = irq_create_mapping (domain , timer -> channel );
137
171
if (!timer_virq ) {
138
172
err = - EINVAL ;
139
173
goto err_clk_disable ;
140
174
}
141
175
142
- snprintf (tcu -> name , sizeof (tcu -> name ), "TCU" );
176
+ snprintf (timer -> name , sizeof (timer -> name ), "TCU%u" , timer -> channel );
143
177
144
178
err = request_irq (timer_virq , ingenic_tcu_cevt_cb , IRQF_TIMER ,
145
- tcu -> name , & tcu -> cevt );
179
+ timer -> name , timer );
146
180
if (err )
147
181
goto err_irq_dispose_mapping ;
148
182
149
- tcu -> cevt .cpumask = cpumask_of (smp_processor_id ());
150
- tcu -> cevt .features = CLOCK_EVT_FEAT_ONESHOT ;
151
- tcu -> cevt .name = tcu -> name ;
152
- tcu -> cevt .rating = 200 ;
153
- tcu -> cevt .set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown ;
154
- tcu -> cevt .set_next_event = ingenic_tcu_cevt_set_next ;
183
+ timer -> cpu = smp_processor_id ();
184
+ timer -> cevt .cpumask = cpumask_of (smp_processor_id ());
185
+ timer -> cevt .features = CLOCK_EVT_FEAT_ONESHOT ;
186
+ timer -> cevt .name = timer -> name ;
187
+ timer -> cevt .rating = 200 ;
188
+ timer -> cevt .set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown ;
189
+ timer -> cevt .set_next_event = ingenic_tcu_cevt_set_next ;
155
190
156
- clockevents_config_and_register (& tcu -> cevt , rate , 10 , 0xffff );
191
+ clockevents_config_and_register (& timer -> cevt , rate , 10 , 0xffff );
157
192
158
193
return 0 ;
159
194
160
195
err_irq_dispose_mapping :
161
196
irq_dispose_mapping (timer_virq );
162
197
err_clk_disable :
163
- clk_disable_unprepare (tcu -> timer_clk );
198
+ clk_disable_unprepare (timer -> clk );
164
199
err_clk_put :
165
- clk_put (tcu -> timer_clk );
200
+ clk_put (timer -> clk );
166
201
return err ;
167
202
}
168
203
@@ -238,52 +273,73 @@ static int __init ingenic_tcu_init(struct device_node *np)
238
273
{
239
274
const struct of_device_id * id = of_match_node (ingenic_tcu_of_match , np );
240
275
const struct ingenic_soc_info * soc_info = id -> data ;
276
+ struct ingenic_tcu_timer * timer ;
241
277
struct ingenic_tcu * tcu ;
242
278
struct regmap * map ;
279
+ unsigned int cpu ;
280
+ int ret , last_bit = -1 ;
243
281
long rate ;
244
- int ret ;
245
282
246
283
of_node_clear_flag (np , OF_POPULATED );
247
284
248
285
map = device_node_to_regmap (np );
249
286
if (IS_ERR (map ))
250
287
return PTR_ERR (map );
251
288
252
- tcu = kzalloc (sizeof (* tcu ), GFP_KERNEL );
289
+ tcu = kzalloc (struct_size (tcu , timers , num_possible_cpus ()),
290
+ GFP_KERNEL );
253
291
if (!tcu )
254
292
return - ENOMEM ;
255
293
256
- /* Enable all TCU channels for PWM use by default except channels 0/1 */
257
- tcu -> pwm_channels_mask = GENMASK (soc_info -> num_channels - 1 , 2 );
294
+ /*
295
+ * Enable all TCU channels for PWM use by default except channels 0/1,
296
+ * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
297
+ */
298
+ tcu -> pwm_channels_mask = GENMASK (soc_info -> num_channels - 1 ,
299
+ num_possible_cpus () + 1 );
258
300
of_property_read_u32 (np , "ingenic,pwm-channels-mask" ,
259
301
(u32 * )& tcu -> pwm_channels_mask );
260
302
261
- /* Verify that we have at least two free channels */
262
- if (hweight8 (tcu -> pwm_channels_mask ) > soc_info -> num_channels - 2 ) {
303
+ /* Verify that we have at least num_possible_cpus() + 1 free channels */
304
+ if (hweight8 (tcu -> pwm_channels_mask ) >
305
+ soc_info -> num_channels - num_possible_cpus () + 1 ) {
263
306
pr_crit ("%s: Invalid PWM channel mask: 0x%02lx\n" , __func__ ,
264
307
tcu -> pwm_channels_mask );
265
308
ret = - EINVAL ;
266
309
goto err_free_ingenic_tcu ;
267
310
}
268
311
269
312
tcu -> map = map ;
313
+ tcu -> np = np ;
270
314
ingenic_tcu = tcu ;
271
315
272
- tcu -> timer_channel = find_first_zero_bit (& tcu -> pwm_channels_mask ,
273
- soc_info -> num_channels );
316
+ for (cpu = 0 ; cpu < num_possible_cpus (); cpu ++ ) {
317
+ timer = & tcu -> timers [cpu ];
318
+
319
+ timer -> cpu = cpu ;
320
+ timer -> channel = find_next_zero_bit (& tcu -> pwm_channels_mask ,
321
+ soc_info -> num_channels ,
322
+ last_bit + 1 );
323
+ last_bit = timer -> channel ;
324
+ }
325
+
274
326
tcu -> cs_channel = find_next_zero_bit (& tcu -> pwm_channels_mask ,
275
327
soc_info -> num_channels ,
276
- tcu -> timer_channel + 1 );
328
+ last_bit + 1 );
277
329
278
330
ret = ingenic_tcu_clocksource_init (np , tcu );
279
331
if (ret ) {
280
332
pr_crit ("%s: Unable to init clocksource: %d\n" , __func__ , ret );
281
333
goto err_free_ingenic_tcu ;
282
334
}
283
335
284
- ret = ingenic_tcu_timer_init (np , tcu );
285
- if (ret )
336
+ /* Setup clock events on each CPU core */
337
+ ret = cpuhp_setup_state (CPUHP_AP_ONLINE_DYN , "Ingenic XBurst: online" ,
338
+ ingenic_tcu_setup_cevt , NULL );
339
+ if (ret < 0 ) {
340
+ pr_crit ("%s: Unable to start CPU timers: %d\n" , __func__ , ret );
286
341
goto err_tcu_clocksource_cleanup ;
342
+ }
287
343
288
344
/* Register the sched_clock at the end as there's no way to undo it */
289
345
rate = clk_get_rate (tcu -> cs_clk );
@@ -315,28 +371,38 @@ static int __init ingenic_tcu_probe(struct platform_device *pdev)
315
371
static int __maybe_unused ingenic_tcu_suspend (struct device * dev )
316
372
{
317
373
struct ingenic_tcu * tcu = dev_get_drvdata (dev );
374
+ unsigned int cpu ;
318
375
319
376
clk_disable (tcu -> cs_clk );
320
- clk_disable (tcu -> timer_clk );
377
+
378
+ for (cpu = 0 ; cpu < num_online_cpus (); cpu ++ )
379
+ clk_disable (tcu -> timers [cpu ].clk );
380
+
321
381
return 0 ;
322
382
}
323
383
324
384
static int __maybe_unused ingenic_tcu_resume (struct device * dev )
325
385
{
326
386
struct ingenic_tcu * tcu = dev_get_drvdata (dev );
387
+ unsigned int cpu ;
327
388
int ret ;
328
389
329
- ret = clk_enable (tcu -> timer_clk );
330
- if (ret )
331
- return ret ;
390
+ for (cpu = 0 ; cpu < num_online_cpus (); cpu ++ ) {
391
+ ret = clk_enable (tcu -> timers [cpu ].clk );
392
+ if (ret )
393
+ goto err_timer_clk_disable ;
394
+ }
332
395
333
396
ret = clk_enable (tcu -> cs_clk );
334
- if (ret ) {
335
- clk_disable (tcu -> timer_clk );
336
- return ret ;
337
- }
397
+ if (ret )
398
+ goto err_timer_clk_disable ;
338
399
339
400
return 0 ;
401
+
402
+ err_timer_clk_disable :
403
+ for (; cpu > 0 ; cpu -- )
404
+ clk_disable (tcu -> timers [cpu - 1 ].clk );
405
+ return ret ;
340
406
}
341
407
342
408
static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
0 commit comments