@@ -93,6 +93,24 @@ struct sd_mux_hw_data {
93
93
94
94
#define to_sd_mux_hw_data (_hw ) container_of(_hw, struct sd_mux_hw_data, hw_data)
95
95
96
+ /**
97
+ * struct div_hw_data - divider clock hardware data
98
+ * @hw_data: clock hw data
99
+ * @dtable: pointer to divider table
100
+ * @invalid_rate: invalid rate for divider
101
+ * @max_rate: maximum rate for divider
102
+ * @width: divider width
103
+ */
104
+ struct div_hw_data {
105
+ struct clk_hw_data hw_data ;
106
+ const struct clk_div_table * dtable ;
107
+ unsigned long invalid_rate ;
108
+ unsigned long max_rate ;
109
+ u32 width ;
110
+ };
111
+
112
+ #define to_div_hw_data (_hw ) container_of(_hw, struct div_hw_data, hw_data)
113
+
96
114
struct rzg2l_pll5_param {
97
115
u32 pl5_fracin ;
98
116
u8 pl5_refdiv ;
@@ -201,6 +219,53 @@ int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event
201
219
return notifier_from_errno (ret );
202
220
}
203
221
222
+ int rzg3s_cpg_div_clk_notifier (struct notifier_block * nb , unsigned long event ,
223
+ void * data )
224
+ {
225
+ struct clk_notifier_data * cnd = data ;
226
+ struct clk_hw * hw = __clk_get_hw (cnd -> clk );
227
+ struct clk_hw_data * clk_hw_data = to_clk_hw_data (hw );
228
+ struct div_hw_data * div_hw_data = to_div_hw_data (clk_hw_data );
229
+ struct rzg2l_cpg_priv * priv = clk_hw_data -> priv ;
230
+ u32 off = GET_REG_OFFSET (clk_hw_data -> conf );
231
+ u32 shift = GET_SHIFT (clk_hw_data -> conf );
232
+ unsigned long flags ;
233
+ int ret = 0 ;
234
+ u32 val ;
235
+
236
+ if (event != PRE_RATE_CHANGE || !div_hw_data -> invalid_rate ||
237
+ div_hw_data -> invalid_rate % cnd -> new_rate )
238
+ return NOTIFY_DONE ;
239
+
240
+ spin_lock_irqsave (& priv -> rmw_lock , flags );
241
+
242
+ val = readl (priv -> base + off );
243
+ val >>= shift ;
244
+ val &= GENMASK (GET_WIDTH (clk_hw_data -> conf ) - 1 , 0 );
245
+
246
+ /*
247
+ * There are different constraints for the user of this notifiers as follows:
248
+ * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
249
+ * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
250
+ * As SD can have only one parent having 800MHz and OCTA div can have
251
+ * only one parent having 400MHz we took into account the parent rate
252
+ * at the beginning of function (by checking invalid_rate % new_rate).
253
+ * Now it is time to check the hardware divider and update it accordingly.
254
+ */
255
+ if (!val ) {
256
+ writel ((CPG_WEN_BIT | 1 ) << shift , priv -> base + off );
257
+ /* Wait for the update done. */
258
+ ret = rzg2l_cpg_wait_clk_update_done (priv -> base , clk_hw_data -> sconf );
259
+ }
260
+
261
+ spin_unlock_irqrestore (& priv -> rmw_lock , flags );
262
+
263
+ if (ret )
264
+ dev_err (priv -> dev , "Failed to downgrade the div\n" );
265
+
266
+ return notifier_from_errno (ret );
267
+ }
268
+
204
269
static int rzg2l_register_notifier (struct clk_hw * hw , const struct cpg_core_clk * core ,
205
270
struct rzg2l_cpg_priv * priv )
206
271
{
@@ -218,6 +283,124 @@ static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk
218
283
return clk_notifier_register (hw -> clk , nb );
219
284
}
220
285
286
+ static unsigned long rzg3s_div_clk_recalc_rate (struct clk_hw * hw ,
287
+ unsigned long parent_rate )
288
+ {
289
+ struct clk_hw_data * clk_hw_data = to_clk_hw_data (hw );
290
+ struct div_hw_data * div_hw_data = to_div_hw_data (clk_hw_data );
291
+ struct rzg2l_cpg_priv * priv = clk_hw_data -> priv ;
292
+ u32 val ;
293
+
294
+ val = readl (priv -> base + GET_REG_OFFSET (clk_hw_data -> conf ));
295
+ val >>= GET_SHIFT (clk_hw_data -> conf );
296
+ val &= GENMASK (GET_WIDTH (clk_hw_data -> conf ) - 1 , 0 );
297
+
298
+ return divider_recalc_rate (hw , parent_rate , val , div_hw_data -> dtable ,
299
+ CLK_DIVIDER_ROUND_CLOSEST , div_hw_data -> width );
300
+ }
301
+
302
+ static int rzg3s_div_clk_determine_rate (struct clk_hw * hw , struct clk_rate_request * req )
303
+ {
304
+ struct clk_hw_data * clk_hw_data = to_clk_hw_data (hw );
305
+ struct div_hw_data * div_hw_data = to_div_hw_data (clk_hw_data );
306
+
307
+ if (div_hw_data -> max_rate && req -> rate > div_hw_data -> max_rate )
308
+ req -> rate = div_hw_data -> max_rate ;
309
+
310
+ return divider_determine_rate (hw , req , div_hw_data -> dtable , div_hw_data -> width ,
311
+ CLK_DIVIDER_ROUND_CLOSEST );
312
+ }
313
+
314
+ static int rzg3s_div_clk_set_rate (struct clk_hw * hw , unsigned long rate ,
315
+ unsigned long parent_rate )
316
+ {
317
+ struct clk_hw_data * clk_hw_data = to_clk_hw_data (hw );
318
+ struct div_hw_data * div_hw_data = to_div_hw_data (clk_hw_data );
319
+ struct rzg2l_cpg_priv * priv = clk_hw_data -> priv ;
320
+ u32 off = GET_REG_OFFSET (clk_hw_data -> conf );
321
+ u32 shift = GET_SHIFT (clk_hw_data -> conf );
322
+ unsigned long flags ;
323
+ u32 val ;
324
+ int ret ;
325
+
326
+ val = divider_get_val (rate , parent_rate , div_hw_data -> dtable , div_hw_data -> width ,
327
+ CLK_DIVIDER_ROUND_CLOSEST );
328
+
329
+ spin_lock_irqsave (& priv -> rmw_lock , flags );
330
+ writel ((CPG_WEN_BIT | val ) << shift , priv -> base + off );
331
+ /* Wait for the update done. */
332
+ ret = rzg2l_cpg_wait_clk_update_done (priv -> base , clk_hw_data -> sconf );
333
+ spin_unlock_irqrestore (& priv -> rmw_lock , flags );
334
+
335
+ return ret ;
336
+ }
337
+
338
+ static const struct clk_ops rzg3s_div_clk_ops = {
339
+ .recalc_rate = rzg3s_div_clk_recalc_rate ,
340
+ .determine_rate = rzg3s_div_clk_determine_rate ,
341
+ .set_rate = rzg3s_div_clk_set_rate ,
342
+ };
343
+
344
+ static struct clk * __init
345
+ rzg3s_cpg_div_clk_register (const struct cpg_core_clk * core , struct clk * * clks ,
346
+ void __iomem * base , struct rzg2l_cpg_priv * priv )
347
+ {
348
+ struct div_hw_data * div_hw_data ;
349
+ struct clk_init_data init = {};
350
+ const struct clk_div_table * clkt ;
351
+ struct clk_hw * clk_hw ;
352
+ const struct clk * parent ;
353
+ const char * parent_name ;
354
+ u32 max = 0 ;
355
+ int ret ;
356
+
357
+ parent = clks [core -> parent & 0xffff ];
358
+ if (IS_ERR (parent ))
359
+ return ERR_CAST (parent );
360
+
361
+ parent_name = __clk_get_name (parent );
362
+
363
+ div_hw_data = devm_kzalloc (priv -> dev , sizeof (* div_hw_data ), GFP_KERNEL );
364
+ if (!div_hw_data )
365
+ return ERR_PTR (- ENOMEM );
366
+
367
+ init .name = core -> name ;
368
+ init .flags = core -> flag ;
369
+ init .ops = & rzg3s_div_clk_ops ;
370
+ init .parent_names = & parent_name ;
371
+ init .num_parents = 1 ;
372
+
373
+ /* Get the maximum divider to retrieve div width. */
374
+ for (clkt = core -> dtable ; clkt -> div ; clkt ++ ) {
375
+ if (max < clkt -> div )
376
+ max = clkt -> div ;
377
+ }
378
+
379
+ div_hw_data -> hw_data .priv = priv ;
380
+ div_hw_data -> hw_data .conf = core -> conf ;
381
+ div_hw_data -> hw_data .sconf = core -> sconf ;
382
+ div_hw_data -> dtable = core -> dtable ;
383
+ div_hw_data -> invalid_rate = core -> invalid_rate ;
384
+ div_hw_data -> max_rate = core -> max_rate ;
385
+ div_hw_data -> width = fls (max ) - 1 ;
386
+
387
+ clk_hw = & div_hw_data -> hw_data .hw ;
388
+ clk_hw -> init = & init ;
389
+
390
+ ret = devm_clk_hw_register (priv -> dev , clk_hw );
391
+ if (ret )
392
+ return ERR_PTR (ret );
393
+
394
+ ret = rzg2l_register_notifier (clk_hw , core , priv );
395
+ if (ret ) {
396
+ dev_err (priv -> dev , "Failed to register notifier for %s\n" ,
397
+ core -> name );
398
+ return ERR_PTR (ret );
399
+ }
400
+
401
+ return clk_hw -> clk ;
402
+ }
403
+
221
404
static struct clk * __init
222
405
rzg2l_cpg_div_clk_register (const struct cpg_core_clk * core ,
223
406
struct clk * * clks ,
@@ -963,6 +1146,9 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
963
1146
clk = rzg2l_cpg_div_clk_register (core , priv -> clks ,
964
1147
priv -> base , priv );
965
1148
break ;
1149
+ case CLK_TYPE_G3S_DIV :
1150
+ clk = rzg3s_cpg_div_clk_register (core , priv -> clks , priv -> base , priv );
1151
+ break ;
966
1152
case CLK_TYPE_MUX :
967
1153
clk = rzg2l_cpg_mux_clk_register (core , priv -> base , priv );
968
1154
break ;
0 commit comments