45
45
#define PDIV (val ) FIELD_GET(GENMASK(5, 0), (val))
46
46
#define SDIV (val ) FIELD_GET(GENMASK(2, 0), (val))
47
47
48
+ #define DDIV_DIVCTL_WEN (shift ) BIT((shift) + 16)
49
+
48
50
#define GET_MOD_CLK_ID (base , index , bit ) \
49
51
((base) + ((((index) * (16))) + (bit)))
50
52
53
+ #define CPG_CLKSTATUS0 (0x700)
54
+
51
55
/**
52
56
* struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
53
57
*
54
58
* @dev: CPG device
55
59
* @base: CPG register block base address
60
+ * @rmw_lock: protects register accesses
56
61
* @clks: Array containing all Core and Module Clocks
57
62
* @num_core_clks: Number of Core Clocks in clks[]
58
63
* @num_mod_clks: Number of Module Clocks in clks[]
64
69
struct rzv2h_cpg_priv {
65
70
struct device * dev ;
66
71
void __iomem * base ;
72
+ spinlock_t rmw_lock ;
67
73
68
74
struct clk * * clks ;
69
75
unsigned int num_core_clks ;
@@ -108,6 +114,21 @@ struct mod_clock {
108
114
109
115
#define to_mod_clock (_hw ) container_of(_hw, struct mod_clock, hw)
110
116
117
+ /**
118
+ * struct ddiv_clk - DDIV clock
119
+ *
120
+ * @priv: CPG private data
121
+ * @div: divider clk
122
+ * @mon: monitor bit in CPG_CLKSTATUS0 register
123
+ */
124
+ struct ddiv_clk {
125
+ struct rzv2h_cpg_priv * priv ;
126
+ struct clk_divider div ;
127
+ u8 mon ;
128
+ };
129
+
130
+ #define to_ddiv_clock (_div ) container_of(_div, struct ddiv_clk, div)
131
+
111
132
static unsigned long rzv2h_cpg_pll_clk_recalc_rate (struct clk_hw * hw ,
112
133
unsigned long parent_rate )
113
134
{
@@ -161,7 +182,7 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
161
182
init .num_parents = 1 ;
162
183
163
184
pll_clk -> hw .init = & init ;
164
- pll_clk -> conf = core -> conf ;
185
+ pll_clk -> conf = core -> cfg . conf ;
165
186
pll_clk -> base = base ;
166
187
pll_clk -> priv = priv ;
167
188
pll_clk -> type = core -> type ;
@@ -173,6 +194,143 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
173
194
return pll_clk -> hw .clk ;
174
195
}
175
196
197
+ static unsigned long rzv2h_ddiv_recalc_rate (struct clk_hw * hw ,
198
+ unsigned long parent_rate )
199
+ {
200
+ struct clk_divider * divider = to_clk_divider (hw );
201
+ unsigned int val ;
202
+
203
+ val = readl (divider -> reg ) >> divider -> shift ;
204
+ val &= clk_div_mask (divider -> width );
205
+
206
+ return divider_recalc_rate (hw , parent_rate , val , divider -> table ,
207
+ divider -> flags , divider -> width );
208
+ }
209
+
210
+ static long rzv2h_ddiv_round_rate (struct clk_hw * hw , unsigned long rate ,
211
+ unsigned long * prate )
212
+ {
213
+ struct clk_divider * divider = to_clk_divider (hw );
214
+
215
+ return divider_round_rate (hw , rate , prate , divider -> table ,
216
+ divider -> width , divider -> flags );
217
+ }
218
+
219
+ static int rzv2h_ddiv_determine_rate (struct clk_hw * hw ,
220
+ struct clk_rate_request * req )
221
+ {
222
+ struct clk_divider * divider = to_clk_divider (hw );
223
+
224
+ return divider_determine_rate (hw , req , divider -> table , divider -> width ,
225
+ divider -> flags );
226
+ }
227
+
228
+ static inline int rzv2h_cpg_wait_ddiv_clk_update_done (void __iomem * base , u8 mon )
229
+ {
230
+ u32 bitmask = BIT (mon );
231
+ u32 val ;
232
+
233
+ return readl_poll_timeout_atomic (base + CPG_CLKSTATUS0 , val , !(val & bitmask ), 10 , 200 );
234
+ }
235
+
236
+ static int rzv2h_ddiv_set_rate (struct clk_hw * hw , unsigned long rate ,
237
+ unsigned long parent_rate )
238
+ {
239
+ struct clk_divider * divider = to_clk_divider (hw );
240
+ struct ddiv_clk * ddiv = to_ddiv_clock (divider );
241
+ struct rzv2h_cpg_priv * priv = ddiv -> priv ;
242
+ unsigned long flags = 0 ;
243
+ int value ;
244
+ u32 val ;
245
+ int ret ;
246
+
247
+ value = divider_get_val (rate , parent_rate , divider -> table ,
248
+ divider -> width , divider -> flags );
249
+ if (value < 0 )
250
+ return value ;
251
+
252
+ spin_lock_irqsave (divider -> lock , flags );
253
+
254
+ ret = rzv2h_cpg_wait_ddiv_clk_update_done (priv -> base , ddiv -> mon );
255
+ if (ret )
256
+ goto ddiv_timeout ;
257
+
258
+ val = readl (divider -> reg ) | DDIV_DIVCTL_WEN (divider -> shift );
259
+ val &= ~(clk_div_mask (divider -> width ) << divider -> shift );
260
+ val |= (u32 )value << divider -> shift ;
261
+ writel (val , divider -> reg );
262
+
263
+ ret = rzv2h_cpg_wait_ddiv_clk_update_done (priv -> base , ddiv -> mon );
264
+ if (ret )
265
+ goto ddiv_timeout ;
266
+
267
+ spin_unlock_irqrestore (divider -> lock , flags );
268
+
269
+ return 0 ;
270
+
271
+ ddiv_timeout :
272
+ spin_unlock_irqrestore (divider -> lock , flags );
273
+ return ret ;
274
+ }
275
+
276
+ static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
277
+ .recalc_rate = rzv2h_ddiv_recalc_rate ,
278
+ .round_rate = rzv2h_ddiv_round_rate ,
279
+ .determine_rate = rzv2h_ddiv_determine_rate ,
280
+ .set_rate = rzv2h_ddiv_set_rate ,
281
+ };
282
+
283
+ static struct clk * __init
284
+ rzv2h_cpg_ddiv_clk_register (const struct cpg_core_clk * core ,
285
+ struct rzv2h_cpg_priv * priv )
286
+ {
287
+ struct ddiv cfg_ddiv = core -> cfg .ddiv ;
288
+ struct clk_init_data init = {};
289
+ struct device * dev = priv -> dev ;
290
+ u8 shift = cfg_ddiv .shift ;
291
+ u8 width = cfg_ddiv .width ;
292
+ const struct clk * parent ;
293
+ const char * parent_name ;
294
+ struct clk_divider * div ;
295
+ struct ddiv_clk * ddiv ;
296
+ int ret ;
297
+
298
+ parent = priv -> clks [core -> parent ];
299
+ if (IS_ERR (parent ))
300
+ return ERR_CAST (parent );
301
+
302
+ parent_name = __clk_get_name (parent );
303
+
304
+ if ((shift + width ) > 16 )
305
+ return ERR_PTR (- EINVAL );
306
+
307
+ ddiv = devm_kzalloc (priv -> dev , sizeof (* ddiv ), GFP_KERNEL );
308
+ if (!ddiv )
309
+ return ERR_PTR (- ENOMEM );
310
+
311
+ init .name = core -> name ;
312
+ init .ops = & rzv2h_ddiv_clk_divider_ops ;
313
+ init .parent_names = & parent_name ;
314
+ init .num_parents = 1 ;
315
+
316
+ ddiv -> priv = priv ;
317
+ ddiv -> mon = cfg_ddiv .monbit ;
318
+ div = & ddiv -> div ;
319
+ div -> reg = priv -> base + cfg_ddiv .offset ;
320
+ div -> shift = shift ;
321
+ div -> width = width ;
322
+ div -> flags = core -> flag ;
323
+ div -> lock = & priv -> rmw_lock ;
324
+ div -> hw .init = & init ;
325
+ div -> table = core -> dtable ;
326
+
327
+ ret = devm_clk_hw_register (dev , & div -> hw );
328
+ if (ret )
329
+ return ERR_PTR (ret );
330
+
331
+ return div -> hw .clk ;
332
+ }
333
+
176
334
static struct clk
177
335
* rzv2h_cpg_clk_src_twocell_get (struct of_phandle_args * clkspec ,
178
336
void * data )
@@ -254,6 +412,9 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
254
412
case CLK_TYPE_PLL :
255
413
clk = rzv2h_cpg_pll_clk_register (core , priv , & rzv2h_cpg_pll_ops );
256
414
break ;
415
+ case CLK_TYPE_DDIV :
416
+ clk = rzv2h_cpg_ddiv_clk_register (core , priv );
417
+ break ;
257
418
default :
258
419
goto fail ;
259
420
}
@@ -612,6 +773,8 @@ static int __init rzv2h_cpg_probe(struct platform_device *pdev)
612
773
if (!priv )
613
774
return - ENOMEM ;
614
775
776
+ spin_lock_init (& priv -> rmw_lock );
777
+
615
778
priv -> dev = dev ;
616
779
617
780
priv -> base = devm_platform_ioremap_resource (pdev , 0 );
0 commit comments