17
17
#include <linux/err.h>
18
18
#include <linux/init.h>
19
19
#include <linux/io.h>
20
+ #include <linux/iopoll.h>
20
21
#include <linux/slab.h>
21
22
22
23
#include "renesas-cpg-mssr.h"
@@ -27,6 +28,152 @@ static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initdata;
27
28
static unsigned int cpg_clk_extalr __initdata ;
28
29
static u32 cpg_mode __initdata ;
29
30
31
+ #define CPG_PLLECR 0x0820 /* PLL Enable Control Register */
32
+
33
+ #define CPG_PLLECR_PLLST (n ) BIT(8 + ((n) < 3 ? (n) - 1 : \
34
+ (n) > 3 ? (n) + 1 : n)) /* PLLn Circuit Status */
35
+
36
+ #define CPG_PLL1CR0 0x830 /* PLLn Control Registers */
37
+ #define CPG_PLL1CR1 0x8b0
38
+ #define CPG_PLL2CR0 0x834
39
+ #define CPG_PLL2CR1 0x8b8
40
+ #define CPG_PLL3CR0 0x83c
41
+ #define CPG_PLL3CR1 0x8c0
42
+ #define CPG_PLL4CR0 0x844
43
+ #define CPG_PLL4CR1 0x8c8
44
+ #define CPG_PLL6CR0 0x84c
45
+ #define CPG_PLL6CR1 0x8d8
46
+
47
+ #define CPG_PLLxCR0_KICK BIT(31)
48
+ #define CPG_PLLxCR0_NI GENMASK(27, 20) /* Integer mult. factor */
49
+ #define CPG_PLLxCR0_SSMODE GENMASK(18, 16) /* PLL mode */
50
+ #define CPG_PLLxCR0_SSMODE_FM BIT(18) /* Fractional Multiplication */
51
+ #define CPG_PLLxCR0_SSMODE_DITH BIT(17) /* Frequency Dithering */
52
+ #define CPG_PLLxCR0_SSMODE_CENT BIT(16) /* Center (vs. Down) Spread Dithering */
53
+ #define CPG_PLLxCR0_SSFREQ GENMASK(14, 8) /* SSCG Modulation Frequency */
54
+ #define CPG_PLLxCR0_SSDEPT GENMASK(6, 0) /* SSCG Modulation Depth */
55
+
56
+ #define SSMODE_FM BIT(2) /* Fractional Multiplication */
57
+ #define SSMODE_DITHER BIT(1) /* Frequency Dithering */
58
+ #define SSMODE_CENTER BIT(0) /* Center (vs. Down) Spread Dithering */
59
+
60
+ /* PLL Clocks */
61
+ struct cpg_pll_clk {
62
+ struct clk_hw hw ;
63
+ void __iomem * pllcr0_reg ;
64
+ void __iomem * pllecr_reg ;
65
+ u32 pllecr_pllst_mask ;
66
+ };
67
+
68
+ #define to_pll_clk (_hw ) container_of(_hw, struct cpg_pll_clk, hw)
69
+
70
+ static unsigned long cpg_pll_clk_recalc_rate (struct clk_hw * hw ,
71
+ unsigned long parent_rate )
72
+ {
73
+ struct cpg_pll_clk * pll_clk = to_pll_clk (hw );
74
+ unsigned int mult ;
75
+
76
+ mult = FIELD_GET (CPG_PLLxCR0_NI , readl (pll_clk -> pllcr0_reg )) + 1 ;
77
+
78
+ return parent_rate * mult * 2 ;
79
+ }
80
+
81
+ static int cpg_pll_clk_determine_rate (struct clk_hw * hw ,
82
+ struct clk_rate_request * req )
83
+ {
84
+ unsigned int min_mult , max_mult , mult ;
85
+ unsigned long prate ;
86
+
87
+ prate = req -> best_parent_rate * 2 ;
88
+ min_mult = max (div64_ul (req -> min_rate , prate ), 1ULL );
89
+ max_mult = min (div64_ul (req -> max_rate , prate ), 256ULL );
90
+ if (max_mult < min_mult )
91
+ return - EINVAL ;
92
+
93
+ mult = DIV_ROUND_CLOSEST_ULL (req -> rate , prate );
94
+ mult = clamp (mult , min_mult , max_mult );
95
+
96
+ req -> rate = prate * mult ;
97
+ return 0 ;
98
+ }
99
+
100
+ static int cpg_pll_clk_set_rate (struct clk_hw * hw , unsigned long rate ,
101
+ unsigned long parent_rate )
102
+ {
103
+ struct cpg_pll_clk * pll_clk = to_pll_clk (hw );
104
+ unsigned int mult ;
105
+ u32 val ;
106
+
107
+ mult = DIV_ROUND_CLOSEST_ULL (rate , parent_rate * 2 );
108
+ mult = clamp (mult , 1U , 256U );
109
+
110
+ if (readl (pll_clk -> pllcr0_reg ) & CPG_PLLxCR0_KICK )
111
+ return - EBUSY ;
112
+
113
+ cpg_reg_modify (pll_clk -> pllcr0_reg , CPG_PLLxCR0_NI ,
114
+ FIELD_PREP (CPG_PLLxCR0_NI , mult - 1 ));
115
+
116
+ /*
117
+ * Set KICK bit in PLLxCR0 to update hardware setting and wait for
118
+ * clock change completion.
119
+ */
120
+ cpg_reg_modify (pll_clk -> pllcr0_reg , 0 , CPG_PLLxCR0_KICK );
121
+
122
+ /*
123
+ * Note: There is no HW information about the worst case latency.
124
+ *
125
+ * Using experimental measurements, it seems that no more than
126
+ * ~45 µs are needed, independently of the CPU rate.
127
+ * Since this value might be dependent on external xtal rate, pll
128
+ * rate or even the other emulation clocks rate, use 1000 as a
129
+ * "super" safe value.
130
+ */
131
+ return readl_poll_timeout (pll_clk -> pllecr_reg , val ,
132
+ val & pll_clk -> pllecr_pllst_mask , 0 , 1000 );
133
+ }
134
+
135
+ static const struct clk_ops cpg_pll_clk_ops = {
136
+ .recalc_rate = cpg_pll_clk_recalc_rate ,
137
+ .determine_rate = cpg_pll_clk_determine_rate ,
138
+ .set_rate = cpg_pll_clk_set_rate ,
139
+ };
140
+
141
+ static struct clk * __init cpg_pll_clk_register (const char * name ,
142
+ const char * parent_name ,
143
+ void __iomem * base ,
144
+ unsigned int cr0_offset ,
145
+ unsigned int cr1_offset ,
146
+ unsigned int index )
147
+
148
+ {
149
+ struct cpg_pll_clk * pll_clk ;
150
+ struct clk_init_data init = {};
151
+ struct clk * clk ;
152
+
153
+ pll_clk = kzalloc (sizeof (* pll_clk ), GFP_KERNEL );
154
+ if (!pll_clk )
155
+ return ERR_PTR (- ENOMEM );
156
+
157
+ init .name = name ;
158
+ init .ops = & cpg_pll_clk_ops ;
159
+ init .parent_names = & parent_name ;
160
+ init .num_parents = 1 ;
161
+
162
+ pll_clk -> hw .init = & init ;
163
+ pll_clk -> pllcr0_reg = base + cr0_offset ;
164
+ pll_clk -> pllecr_reg = base + CPG_PLLECR ;
165
+ pll_clk -> pllecr_pllst_mask = CPG_PLLECR_PLLST (index );
166
+
167
+ /* Disable Fractional Multiplication and Frequency Dithering */
168
+ writel (0 , base + cr1_offset );
169
+ cpg_reg_modify (pll_clk -> pllcr0_reg , CPG_PLLxCR0_SSMODE , 0 );
170
+
171
+ clk = clk_register (NULL , & pll_clk -> hw );
172
+ if (IS_ERR (clk ))
173
+ kfree (pll_clk );
174
+
175
+ return clk ;
176
+ }
30
177
/*
31
178
* Z0 Clock & Z1 Clock
32
179
*/
@@ -205,6 +352,15 @@ struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
205
352
div = cpg_pll_config -> pll1_div ;
206
353
break ;
207
354
355
+ case CLK_TYPE_GEN4_PLL2_VAR :
356
+ /*
357
+ * PLL2 is implemented as a custom clock, to change the
358
+ * multiplier when cpufreq changes between normal and boost
359
+ * modes.
360
+ */
361
+ return cpg_pll_clk_register (core -> name , __clk_get_name (parent ),
362
+ base , CPG_PLL2CR0 , CPG_PLL2CR1 , 2 );
363
+
208
364
case CLK_TYPE_GEN4_PLL2 :
209
365
mult = cpg_pll_config -> pll2_mult ;
210
366
div = cpg_pll_config -> pll2_div ;
0 commit comments