@@ -33,6 +33,7 @@ enum rcar_r8a779a0_clk_types {
33
33
CLK_TYPE_R8A779A0_PLL1 ,
34
34
CLK_TYPE_R8A779A0_PLL2X_3X , /* PLL[23][01] */
35
35
CLK_TYPE_R8A779A0_PLL5 ,
36
+ CLK_TYPE_R8A779A0_Z ,
36
37
CLK_TYPE_R8A779A0_SD ,
37
38
CLK_TYPE_R8A779A0_MDSEL , /* Select parent/divider using mode pin */
38
39
CLK_TYPE_R8A779A0_OSC , /* OSC EXTAL predivider and fixed divider */
@@ -84,6 +85,10 @@ enum clk_ids {
84
85
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
85
86
.offset = _offset)
86
87
88
+ #define DEF_Z (_name , _id , _parent , _div , _offset ) \
89
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_Z, _parent, .div = _div, \
90
+ .offset = _offset)
91
+
87
92
#define DEF_SD (_name , _id , _parent , _offset ) \
88
93
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset)
89
94
@@ -122,6 +127,8 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
122
127
DEF_RATE (".oco" , CLK_OCO , 32768 ),
123
128
124
129
/* Core Clock Outputs */
130
+ DEF_Z ("z0" , R8A779A0_CLK_Z0 , CLK_PLL20 , 2 , 0 ),
131
+ DEF_Z ("z1" , R8A779A0_CLK_Z1 , CLK_PLL21 , 2 , 8 ),
125
132
DEF_FIXED ("zx" , R8A779A0_CLK_ZX , CLK_PLL20_DIV2 , 2 , 1 ),
126
133
DEF_FIXED ("s1d1" , R8A779A0_CLK_S1D1 , CLK_S1 , 1 , 1 ),
127
134
DEF_FIXED ("s1d2" , R8A779A0_CLK_S1D2 , CLK_S1 , 2 , 1 ),
@@ -205,6 +212,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
205
212
DEF_MOD ("tmu2" , 715 , R8A779A0_CLK_S1D4 ),
206
213
DEF_MOD ("tmu3" , 716 , R8A779A0_CLK_S1D4 ),
207
214
DEF_MOD ("tmu4" , 717 , R8A779A0_CLK_S1D4 ),
215
+ DEF_MOD ("tpu0" , 718 , R8A779A0_CLK_S1D8 ),
208
216
DEF_MOD ("vin00" , 730 , R8A779A0_CLK_S1D1 ),
209
217
DEF_MOD ("vin01" , 731 , R8A779A0_CLK_S1D1 ),
210
218
DEF_MOD ("vin02" , 800 , R8A779A0_CLK_S1D1 ),
@@ -259,6 +267,153 @@ static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
259
267
static unsigned int cpg_clk_extalr __initdata ;
260
268
static u32 cpg_mode __initdata ;
261
269
270
+ /*
271
+ * Z0 Clock & Z1 Clock
272
+ */
273
+ #define CPG_FRQCRB 0x00000804
274
+ #define CPG_FRQCRB_KICK BIT(31)
275
+ #define CPG_FRQCRC 0x00000808
276
+
277
+ struct cpg_z_clk {
278
+ struct clk_hw hw ;
279
+ void __iomem * reg ;
280
+ void __iomem * kick_reg ;
281
+ unsigned long max_rate ; /* Maximum rate for normal mode */
282
+ unsigned int fixed_div ;
283
+ u32 mask ;
284
+ };
285
+
286
+ #define to_z_clk (_hw ) container_of(_hw, struct cpg_z_clk, hw)
287
+
288
+ static unsigned long cpg_z_clk_recalc_rate (struct clk_hw * hw ,
289
+ unsigned long parent_rate )
290
+ {
291
+ struct cpg_z_clk * zclk = to_z_clk (hw );
292
+ unsigned int mult ;
293
+ u32 val ;
294
+
295
+ val = readl (zclk -> reg ) & zclk -> mask ;
296
+ mult = 32 - (val >> __ffs (zclk -> mask ));
297
+
298
+ return DIV_ROUND_CLOSEST_ULL ((u64 )parent_rate * mult ,
299
+ 32 * zclk -> fixed_div );
300
+ }
301
+
302
+ static int cpg_z_clk_determine_rate (struct clk_hw * hw ,
303
+ struct clk_rate_request * req )
304
+ {
305
+ struct cpg_z_clk * zclk = to_z_clk (hw );
306
+ unsigned int min_mult , max_mult , mult ;
307
+ unsigned long rate , prate ;
308
+
309
+ rate = min (req -> rate , req -> max_rate );
310
+ if (rate <= zclk -> max_rate ) {
311
+ /* Set parent rate to initial value for normal modes */
312
+ prate = zclk -> max_rate ;
313
+ } else {
314
+ /* Set increased parent rate for boost modes */
315
+ prate = rate ;
316
+ }
317
+ req -> best_parent_rate = clk_hw_round_rate (clk_hw_get_parent (hw ),
318
+ prate * zclk -> fixed_div );
319
+
320
+ prate = req -> best_parent_rate / zclk -> fixed_div ;
321
+ min_mult = max (div64_ul (req -> min_rate * 32ULL , prate ), 1ULL );
322
+ max_mult = min (div64_ul (req -> max_rate * 32ULL , prate ), 32ULL );
323
+ if (max_mult < min_mult )
324
+ return - EINVAL ;
325
+
326
+ mult = DIV_ROUND_CLOSEST_ULL (rate * 32ULL , prate );
327
+ mult = clamp (mult , min_mult , max_mult );
328
+
329
+ req -> rate = DIV_ROUND_CLOSEST_ULL ((u64 )prate * mult , 32 );
330
+ return 0 ;
331
+ }
332
+
333
+ static int cpg_z_clk_set_rate (struct clk_hw * hw , unsigned long rate ,
334
+ unsigned long parent_rate )
335
+ {
336
+ struct cpg_z_clk * zclk = to_z_clk (hw );
337
+ unsigned int mult ;
338
+ unsigned int i ;
339
+
340
+ mult = DIV64_U64_ROUND_CLOSEST (rate * 32ULL * zclk -> fixed_div ,
341
+ parent_rate );
342
+ mult = clamp (mult , 1U , 32U );
343
+
344
+ if (readl (zclk -> kick_reg ) & CPG_FRQCRB_KICK )
345
+ return - EBUSY ;
346
+
347
+ cpg_reg_modify (zclk -> reg , zclk -> mask , (32 - mult ) << __ffs (zclk -> mask ));
348
+
349
+ /*
350
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
351
+ * clock change completion.
352
+ */
353
+ cpg_reg_modify (zclk -> kick_reg , 0 , CPG_FRQCRB_KICK );
354
+
355
+ /*
356
+ * Note: There is no HW information about the worst case latency.
357
+ *
358
+ * Using experimental measurements, it seems that no more than
359
+ * ~10 iterations are needed, independently of the CPU rate.
360
+ * Since this value might be dependent on external xtal rate, pll1
361
+ * rate or even the other emulation clocks rate, use 1000 as a
362
+ * "super" safe value.
363
+ */
364
+ for (i = 1000 ; i ; i -- ) {
365
+ if (!(readl (zclk -> kick_reg ) & CPG_FRQCRB_KICK ))
366
+ return 0 ;
367
+
368
+ cpu_relax ();
369
+ }
370
+
371
+ return - ETIMEDOUT ;
372
+ }
373
+
374
+ static const struct clk_ops cpg_z_clk_ops = {
375
+ .recalc_rate = cpg_z_clk_recalc_rate ,
376
+ .determine_rate = cpg_z_clk_determine_rate ,
377
+ .set_rate = cpg_z_clk_set_rate ,
378
+ };
379
+
380
+ static struct clk * __init cpg_z_clk_register (const char * name ,
381
+ const char * parent_name ,
382
+ void __iomem * reg ,
383
+ unsigned int div ,
384
+ unsigned int offset )
385
+ {
386
+ struct clk_init_data init = {};
387
+ struct cpg_z_clk * zclk ;
388
+ struct clk * clk ;
389
+
390
+ zclk = kzalloc (sizeof (* zclk ), GFP_KERNEL );
391
+ if (!zclk )
392
+ return ERR_PTR (- ENOMEM );
393
+
394
+ init .name = name ;
395
+ init .ops = & cpg_z_clk_ops ;
396
+ init .flags = CLK_SET_RATE_PARENT ;
397
+ init .parent_names = & parent_name ;
398
+ init .num_parents = 1 ;
399
+
400
+ zclk -> reg = reg + CPG_FRQCRC ;
401
+ zclk -> kick_reg = reg + CPG_FRQCRB ;
402
+ zclk -> hw .init = & init ;
403
+ zclk -> mask = GENMASK (offset + 4 , offset );
404
+ zclk -> fixed_div = div ; /* PLLVCO x 1/div x SYS-CPU divider */
405
+
406
+ clk = clk_register (NULL , & zclk -> hw );
407
+ if (IS_ERR (clk )) {
408
+ kfree (zclk );
409
+ return clk ;
410
+ }
411
+
412
+ zclk -> max_rate = clk_hw_get_rate (clk_hw_get_parent (& zclk -> hw )) /
413
+ zclk -> fixed_div ;
414
+ return clk ;
415
+ }
416
+
262
417
static struct clk * __init rcar_r8a779a0_cpg_clk_register (struct device * dev ,
263
418
const struct cpg_core_clk * core , const struct cpg_mssr_info * info ,
264
419
struct clk * * clks , void __iomem * base ,
@@ -293,6 +448,10 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
293
448
div = cpg_pll_config -> pll5_div ;
294
449
break ;
295
450
451
+ case CLK_TYPE_R8A779A0_Z :
452
+ return cpg_z_clk_register (core -> name , __clk_get_name (parent ),
453
+ base , core -> div , core -> offset );
454
+
296
455
case CLK_TYPE_R8A779A0_SD :
297
456
return cpg_sd_clk_register (core -> name , base , core -> offset ,
298
457
__clk_get_name (parent ), notifiers ,
0 commit comments