27
27
* - Some chips support 32 bit counter. A single channel is used for
28
28
* this 32 bit free-running counter. the second channel is not used.
29
29
*
30
- * - The third channel may be used to provide a 16-bit clockevent
31
- * source, used in either periodic or oneshot mode. This runs
32
- * at 32 KiHZ, and can handle delays of up to two seconds.
30
+ * - The third channel may be used to provide a clockevent source, used in
31
+ * either periodic or oneshot mode. For 16-bit counter its runs at 32 KiHZ,
32
+ * and can handle delays of up to two seconds. For 32-bit counters, it runs at
33
+ * the same rate as the clocksource
33
34
*
34
35
* REVISIT behavior during system suspend states... we should disable
35
36
* all clocks and save the power. Easily done for clockevent devices,
@@ -47,6 +48,8 @@ static struct
47
48
} tcb_cache [3 ];
48
49
static u32 bmr_cache ;
49
50
51
+ static const u8 atmel_tcb_divisors [] = { 2 , 8 , 32 , 128 };
52
+
50
53
static u64 tc_get_cycles (struct clocksource * cs )
51
54
{
52
55
unsigned long flags ;
@@ -143,6 +146,7 @@ static unsigned long notrace tc_delay_timer_read32(void)
143
146
struct tc_clkevt_device {
144
147
struct clock_event_device clkevt ;
145
148
struct clk * clk ;
149
+ u32 rate ;
146
150
void __iomem * regs ;
147
151
};
148
152
@@ -151,13 +155,6 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
151
155
return container_of (clkevt , struct tc_clkevt_device , clkevt );
152
156
}
153
157
154
- /* For now, we always use the 32K clock ... this optimizes for NO_HZ,
155
- * because using one of the divided clocks would usually mean the
156
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
157
- *
158
- * A divided clock could be good for high resolution timers, since
159
- * 30.5 usec resolution can seem "low".
160
- */
161
158
static u32 timer_clock ;
162
159
163
160
static int tc_shutdown (struct clock_event_device * d )
@@ -183,7 +180,7 @@ static int tc_set_oneshot(struct clock_event_device *d)
183
180
184
181
clk_enable (tcd -> clk );
185
182
186
- /* slow clock, count up to RC, then irq and stop */
183
+ /* count up to RC, then irq and stop */
187
184
writel (timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
188
185
ATMEL_TC_WAVESEL_UP_AUTO , regs + ATMEL_TC_REG (2 , CMR ));
189
186
writel (ATMEL_TC_CPCS , regs + ATMEL_TC_REG (2 , IER ));
@@ -205,10 +202,10 @@ static int tc_set_periodic(struct clock_event_device *d)
205
202
*/
206
203
clk_enable (tcd -> clk );
207
204
208
- /* slow clock, count up to RC, then irq and restart */
205
+ /* count up to RC, then irq and restart */
209
206
writel (timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO ,
210
207
regs + ATMEL_TC_REG (2 , CMR ));
211
- writel ((32768 + HZ / 2 ) / HZ , tcaddr + ATMEL_TC_REG (2 , RC ));
208
+ writel ((tcd -> rate + HZ / 2 ) / HZ , tcaddr + ATMEL_TC_REG (2 , RC ));
212
209
213
210
/* Enable clock and interrupts on RC compare */
214
211
writel (ATMEL_TC_CPCS , regs + ATMEL_TC_REG (2 , IER ));
@@ -256,47 +253,55 @@ static irqreturn_t ch2_irq(int irq, void *handle)
256
253
return IRQ_NONE ;
257
254
}
258
255
259
- static int __init setup_clkevents (struct atmel_tc * tc , int clk32k_divisor_idx )
256
+ static int __init setup_clkevents (struct atmel_tc * tc , int divisor_idx )
260
257
{
261
258
int ret ;
262
259
struct clk * t2_clk = tc -> clk [2 ];
263
260
int irq = tc -> irq [2 ];
264
-
265
- ret = clk_prepare_enable (tc -> slow_clk );
266
- if (ret )
267
- return ret ;
261
+ int bits = tc -> tcb_config -> counter_width ;
268
262
269
263
/* try to enable t2 clk to avoid future errors in mode change */
270
264
ret = clk_prepare_enable (t2_clk );
271
- if (ret ) {
272
- clk_disable_unprepare (tc -> slow_clk );
265
+ if (ret )
273
266
return ret ;
274
- }
275
-
276
- clk_disable (t2_clk );
277
267
278
268
clkevt .regs = tc -> regs ;
279
269
clkevt .clk = t2_clk ;
280
270
281
- timer_clock = clk32k_divisor_idx ;
271
+ if (bits == 32 ) {
272
+ timer_clock = divisor_idx ;
273
+ clkevt .rate = clk_get_rate (t2_clk ) / atmel_tcb_divisors [divisor_idx ];
274
+ } else {
275
+ ret = clk_prepare_enable (tc -> slow_clk );
276
+ if (ret ) {
277
+ clk_disable_unprepare (t2_clk );
278
+ return ret ;
279
+ }
280
+
281
+ clkevt .rate = clk_get_rate (tc -> slow_clk );
282
+ timer_clock = ATMEL_TC_TIMER_CLOCK5 ;
283
+ }
284
+
285
+ clk_disable (t2_clk );
282
286
283
287
clkevt .clkevt .cpumask = cpumask_of (0 );
284
288
285
289
ret = request_irq (irq , ch2_irq , IRQF_TIMER , "tc_clkevt" , & clkevt );
286
290
if (ret ) {
287
291
clk_unprepare (t2_clk );
288
- clk_disable_unprepare (tc -> slow_clk );
292
+ if (bits != 32 )
293
+ clk_disable_unprepare (tc -> slow_clk );
289
294
return ret ;
290
295
}
291
296
292
- clockevents_config_and_register (& clkevt .clkevt , 32768 , 1 , 0xffff );
297
+ clockevents_config_and_register (& clkevt .clkevt , clkevt . rate , 1 , BIT ( bits ) - 1 );
293
298
294
299
return ret ;
295
300
}
296
301
297
302
#else /* !CONFIG_GENERIC_CLOCKEVENTS */
298
303
299
- static int __init setup_clkevents (struct atmel_tc * tc , int clk32k_divisor_idx )
304
+ static int __init setup_clkevents (struct atmel_tc * tc , int divisor_idx )
300
305
{
301
306
/* NOTHING */
302
307
return 0 ;
@@ -346,8 +351,6 @@ static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_id
346
351
writel (ATMEL_TC_SYNC , tcaddr + ATMEL_TC_BCR );
347
352
}
348
353
349
- static const u8 atmel_tcb_divisors [] = { 2 , 8 , 32 , 128 };
350
-
351
354
static struct atmel_tcb_config tcb_rm9200_config = {
352
355
.counter_width = 16 ,
353
356
};
@@ -472,7 +475,7 @@ static int __init tcb_clksrc_init(struct device_node *node)
472
475
goto err_disable_t1 ;
473
476
474
477
/* channel 2: periodic and oneshot timer support */
475
- ret = setup_clkevents (& tc , ATMEL_TC_TIMER_CLOCK5 );
478
+ ret = setup_clkevents (& tc , best_divisor_idx );
476
479
if (ret )
477
480
goto err_unregister_clksrc ;
478
481
0 commit comments