22/*
33 * System Control and Power Interface (SCMI) Protocol based clock driver
44 *
5- * Copyright (C) 2018-2022 ARM Ltd.
5+ * Copyright (C) 2018-2024 ARM Ltd.
66 */
77
8+ #include <linux/bits.h>
89#include <linux/clk-provider.h>
910#include <linux/device.h>
1011#include <linux/err.h>
1617#define NOT_ATOMIC false
1718#define ATOMIC true
1819
20+ enum scmi_clk_feats {
21+ SCMI_CLK_ATOMIC_SUPPORTED ,
22+ SCMI_CLK_FEATS_COUNT
23+ };
24+
25+ #define SCMI_MAX_CLK_OPS BIT(SCMI_CLK_FEATS_COUNT)
26+
1927static const struct scmi_clk_proto_ops * scmi_proto_clk_ops ;
2028
2129struct scmi_clk {
@@ -158,42 +166,6 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
158166 return !!enabled ;
159167}
160168
161- /*
162- * We can provide enable/disable/is_enabled atomic callbacks only if the
163- * underlying SCMI transport for an SCMI instance is configured to handle
164- * SCMI commands in an atomic manner.
165- *
166- * When no SCMI atomic transport support is available we instead provide only
167- * the prepare/unprepare API, as allowed by the clock framework when atomic
168- * calls are not available.
169- *
170- * Two distinct sets of clk_ops are provided since we could have multiple SCMI
171- * instances with different underlying transport quality, so they cannot be
172- * shared.
173- */
174- static const struct clk_ops scmi_clk_ops = {
175- .recalc_rate = scmi_clk_recalc_rate ,
176- .round_rate = scmi_clk_round_rate ,
177- .set_rate = scmi_clk_set_rate ,
178- .prepare = scmi_clk_enable ,
179- .unprepare = scmi_clk_disable ,
180- .set_parent = scmi_clk_set_parent ,
181- .get_parent = scmi_clk_get_parent ,
182- .determine_rate = scmi_clk_determine_rate ,
183- };
184-
185- static const struct clk_ops scmi_atomic_clk_ops = {
186- .recalc_rate = scmi_clk_recalc_rate ,
187- .round_rate = scmi_clk_round_rate ,
188- .set_rate = scmi_clk_set_rate ,
189- .enable = scmi_clk_atomic_enable ,
190- .disable = scmi_clk_atomic_disable ,
191- .is_enabled = scmi_clk_atomic_is_enabled ,
192- .set_parent = scmi_clk_set_parent ,
193- .get_parent = scmi_clk_get_parent ,
194- .determine_rate = scmi_clk_determine_rate ,
195- };
196-
197169static int scmi_clk_ops_init (struct device * dev , struct scmi_clk * sclk ,
198170 const struct clk_ops * scmi_ops )
199171{
@@ -230,17 +202,129 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
230202 return ret ;
231203}
232204
205+ /**
206+ * scmi_clk_ops_alloc() - Alloc and configure clock operations
207+ * @dev: A device reference for devres
208+ * @feats_key: A bitmap representing the desired clk_ops capabilities
209+ *
210+ * Allocate and configure a proper set of clock operations depending on the
211+ * specifically required SCMI clock features.
212+ *
213+ * Return: A pointer to the allocated and configured clk_ops on success,
214+ * or NULL on allocation failure.
215+ */
216+ static const struct clk_ops *
217+ scmi_clk_ops_alloc (struct device * dev , unsigned long feats_key )
218+ {
219+ struct clk_ops * ops ;
220+
221+ ops = devm_kzalloc (dev , sizeof (* ops ), GFP_KERNEL );
222+ if (!ops )
223+ return NULL ;
224+ /*
225+ * We can provide enable/disable/is_enabled atomic callbacks only if the
226+ * underlying SCMI transport for an SCMI instance is configured to
227+ * handle SCMI commands in an atomic manner.
228+ *
229+ * When no SCMI atomic transport support is available we instead provide
230+ * only the prepare/unprepare API, as allowed by the clock framework
231+ * when atomic calls are not available.
232+ */
233+ if (feats_key & BIT (SCMI_CLK_ATOMIC_SUPPORTED )) {
234+ ops -> enable = scmi_clk_atomic_enable ;
235+ ops -> disable = scmi_clk_atomic_disable ;
236+ ops -> is_enabled = scmi_clk_atomic_is_enabled ;
237+ } else {
238+ ops -> prepare = scmi_clk_enable ;
239+ ops -> unprepare = scmi_clk_disable ;
240+ }
241+
242+ /* Rate ops */
243+ ops -> recalc_rate = scmi_clk_recalc_rate ;
244+ ops -> round_rate = scmi_clk_round_rate ;
245+ ops -> determine_rate = scmi_clk_determine_rate ;
246+ ops -> set_rate = scmi_clk_set_rate ;
247+
248+ /* Parent ops */
249+ ops -> get_parent = scmi_clk_get_parent ;
250+ ops -> set_parent = scmi_clk_set_parent ;
251+
252+ return ops ;
253+ }
254+
255+ /**
256+ * scmi_clk_ops_select() - Select a proper set of clock operations
257+ * @sclk: A reference to an SCMI clock descriptor
258+ * @atomic_capable: A flag to indicate if atomic mode is supported by the
259+ * transport
260+ * @atomic_threshold_us: Platform atomic threshold value in microseconds:
261+ * clk_ops are atomic when clock enable latency is less
262+ * than this threshold
263+ * @clk_ops_db: A reference to the array used as a database to store all the
264+ * created clock operations combinations.
265+ * @db_size: Maximum number of entries held by @clk_ops_db
266+ *
267+ * After having built a bitmap descriptor to represent the set of features
268+ * needed by this SCMI clock, at first use it to lookup into the set of
269+ * previously allocated clk_ops to check if a suitable combination of clock
270+ * operations was already created; when no match is found allocate a brand new
271+ * set of clk_ops satisfying the required combination of features and save it
272+ * for future references.
273+ *
274+ * In this way only one set of clk_ops is ever created for each different
275+ * combination that is effectively needed by a driver instance.
276+ *
277+ * Return: A pointer to the allocated and configured clk_ops on success, or
278+ * NULL otherwise.
279+ */
280+ static const struct clk_ops *
281+ scmi_clk_ops_select (struct scmi_clk * sclk , bool atomic_capable ,
282+ unsigned int atomic_threshold_us ,
283+ const struct clk_ops * * clk_ops_db , size_t db_size )
284+ {
285+ const struct scmi_clock_info * ci = sclk -> info ;
286+ unsigned int feats_key = 0 ;
287+ const struct clk_ops * ops ;
288+
289+ /*
290+ * Note that when transport is atomic but SCMI protocol did not
291+ * specify (or support) an enable_latency associated with a
292+ * clock, we default to use atomic operations mode.
293+ */
294+ if (atomic_capable && ci -> enable_latency <= atomic_threshold_us )
295+ feats_key |= BIT (SCMI_CLK_ATOMIC_SUPPORTED );
296+
297+ if (WARN_ON (feats_key >= db_size ))
298+ return NULL ;
299+
300+ /* Lookup previously allocated ops */
301+ ops = clk_ops_db [feats_key ];
302+ if (ops )
303+ return ops ;
304+
305+ /* Did not find a pre-allocated clock_ops */
306+ ops = scmi_clk_ops_alloc (sclk -> dev , feats_key );
307+ if (!ops )
308+ return NULL ;
309+
310+ /* Store new ops combinations */
311+ clk_ops_db [feats_key ] = ops ;
312+
313+ return ops ;
314+ }
315+
233316static int scmi_clocks_probe (struct scmi_device * sdev )
234317{
235318 int idx , count , err ;
236- unsigned int atomic_threshold ;
237- bool is_atomic ;
319+ unsigned int atomic_threshold_us ;
320+ bool transport_is_atomic ;
238321 struct clk_hw * * hws ;
239322 struct clk_hw_onecell_data * clk_data ;
240323 struct device * dev = & sdev -> dev ;
241324 struct device_node * np = dev -> of_node ;
242325 const struct scmi_handle * handle = sdev -> handle ;
243326 struct scmi_protocol_handle * ph ;
327+ const struct clk_ops * scmi_clk_ops_db [SCMI_MAX_CLK_OPS ] = {};
244328
245329 if (!handle )
246330 return - ENODEV ;
@@ -264,7 +348,8 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
264348 clk_data -> num = count ;
265349 hws = clk_data -> hws ;
266350
267- is_atomic = handle -> is_transport_atomic (handle , & atomic_threshold );
351+ transport_is_atomic = handle -> is_transport_atomic (handle ,
352+ & atomic_threshold_us );
268353
269354 for (idx = 0 ; idx < count ; idx ++ ) {
270355 struct scmi_clk * sclk ;
@@ -286,15 +371,17 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
286371 sclk -> dev = dev ;
287372
288373 /*
289- * Note that when transport is atomic but SCMI protocol did not
290- * specify (or support) an enable_latency associated with a
291- * clock, we default to use atomic operations mode.
374+ * Note that the scmi_clk_ops_db is on the stack, not global,
375+ * because it cannot be shared between mulitple probe-sequences
376+ * to avoid sharing the devm_ allocated clk_ops between multiple
377+ * SCMI clk driver instances.
292378 */
293- if (is_atomic &&
294- sclk -> info -> enable_latency <= atomic_threshold )
295- scmi_ops = & scmi_atomic_clk_ops ;
296- else
297- scmi_ops = & scmi_clk_ops ;
379+ scmi_ops = scmi_clk_ops_select (sclk , transport_is_atomic ,
380+ atomic_threshold_us ,
381+ scmi_clk_ops_db ,
382+ ARRAY_SIZE (scmi_clk_ops_db ));
383+ if (!scmi_ops )
384+ return - ENOMEM ;
298385
299386 /* Initialize clock parent data. */
300387 if (sclk -> info -> num_parents > 0 ) {
@@ -318,8 +405,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
318405 } else {
319406 dev_dbg (dev , "Registered clock:%s%s\n" ,
320407 sclk -> info -> name ,
321- scmi_ops == & scmi_atomic_clk_ops ?
322- " (atomic ops)" : "" );
408+ scmi_ops -> enable ? " (atomic ops)" : "" );
323409 hws [idx ] = & sclk -> hw ;
324410 }
325411 }
0 commit comments