2323enum pce_status {
2424 PCE_STATUS_NONE = 0 ,
2525 PCE_STATUS_ACQUIRED ,
26+ PCE_STATUS_PREPARED ,
2627 PCE_STATUS_ENABLED ,
2728 PCE_STATUS_ERROR ,
2829};
@@ -32,8 +33,112 @@ struct pm_clock_entry {
3233 char * con_id ;
3334 struct clk * clk ;
3435 enum pce_status status ;
36+ bool enabled_when_prepared ;
3537};
3638
39+ /**
40+ * pm_clk_list_lock - ensure exclusive access for modifying the PM clock
41+ * entry list.
42+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
43+ * and clk_op_might_sleep count to be modified.
44+ *
45+ * Get exclusive access before modifying the PM clock entry list and the
46+ * clock_op_might_sleep count to guard against concurrent modifications.
47+ * This also protects against a concurrent clock_op_might_sleep and PM clock
48+ * entry list usage in pm_clk_suspend()/pm_clk_resume() that may or may not
49+ * happen in atomic context, hence both the mutex and the spinlock must be
50+ * taken here.
51+ */
52+ static void pm_clk_list_lock (struct pm_subsys_data * psd )
53+ __acquires (& psd - > lock )
54+ {
55+ mutex_lock (& psd -> clock_mutex );
56+ spin_lock_irq (& psd -> lock );
57+ }
58+
59+ /**
60+ * pm_clk_list_unlock - counterpart to pm_clk_list_lock().
61+ * @psd: the same pm_subsys_data instance previously passed to
62+ * pm_clk_list_lock().
63+ */
64+ static void pm_clk_list_unlock (struct pm_subsys_data * psd )
65+ __releases (& psd - > lock )
66+ {
67+ spin_unlock_irq (& psd -> lock );
68+ mutex_unlock (& psd -> clock_mutex );
69+ }
70+
71+ /**
72+ * pm_clk_op_lock - ensure exclusive access for performing clock operations.
73+ * @psd: pm_subsys_data instance corresponding to the PM clock entry list
74+ * and clk_op_might_sleep count being used.
75+ * @flags: stored irq flags.
76+ * @fn: string for the caller function's name.
77+ *
78+ * This is used by pm_clk_suspend() and pm_clk_resume() to guard
79+ * against concurrent modifications to the clock entry list and the
80+ * clock_op_might_sleep count. If clock_op_might_sleep is != 0 then
81+ * only the mutex can be locked and those functions can only be used in
82+ * non atomic context. If clock_op_might_sleep == 0 then these functions
83+ * may be used in any context and only the spinlock can be locked.
84+ * Returns -EINVAL if called in atomic context when clock ops might sleep.
85+ */
86+ static int pm_clk_op_lock (struct pm_subsys_data * psd , unsigned long * flags ,
87+ const char * fn )
88+ /* sparse annotations don't work here as exit state isn't static */
89+ {
90+ bool atomic_context = in_atomic () || irqs_disabled ();
91+
92+ try_again :
93+ spin_lock_irqsave (& psd -> lock , * flags );
94+ if (!psd -> clock_op_might_sleep ) {
95+ /* the __release is there to work around sparse limitations */
96+ __release (& psd -> lock );
97+ return 0 ;
98+ }
99+
100+ /* bail out if in atomic context */
101+ if (atomic_context ) {
102+ pr_err ("%s: atomic context with clock_ops_might_sleep = %d" ,
103+ fn , psd -> clock_op_might_sleep );
104+ spin_unlock_irqrestore (& psd -> lock , * flags );
105+ might_sleep ();
106+ return - EPERM ;
107+ }
108+
109+ /* we must switch to the mutex */
110+ spin_unlock_irqrestore (& psd -> lock , * flags );
111+ mutex_lock (& psd -> clock_mutex );
112+
113+ /*
114+ * There was a possibility for psd->clock_op_might_sleep
115+ * to become 0 above. Keep the mutex only if not the case.
116+ */
117+ if (likely (psd -> clock_op_might_sleep ))
118+ return 0 ;
119+
120+ mutex_unlock (& psd -> clock_mutex );
121+ goto try_again ;
122+ }
123+
124+ /**
125+ * pm_clk_op_unlock - counterpart to pm_clk_op_lock().
126+ * @psd: the same pm_subsys_data instance previously passed to
127+ * pm_clk_op_lock().
128+ * @flags: irq flags provided by pm_clk_op_lock().
129+ */
130+ static void pm_clk_op_unlock (struct pm_subsys_data * psd , unsigned long * flags )
131+ /* sparse annotations don't work here as entry state isn't static */
132+ {
133+ if (psd -> clock_op_might_sleep ) {
134+ mutex_unlock (& psd -> clock_mutex );
135+ } else {
136+ /* the __acquire is there to work around sparse limitations */
137+ __acquire (& psd -> lock );
138+ spin_unlock_irqrestore (& psd -> lock , * flags );
139+ }
140+ }
141+
37142/**
38143 * pm_clk_enable - Enable a clock, reporting any errors
39144 * @dev: The device for the given clock
@@ -43,14 +148,21 @@ static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce
43148{
44149 int ret ;
45150
46- if (ce -> status < PCE_STATUS_ERROR ) {
151+ switch (ce -> status ) {
152+ case PCE_STATUS_ACQUIRED :
153+ ret = clk_prepare_enable (ce -> clk );
154+ break ;
155+ case PCE_STATUS_PREPARED :
47156 ret = clk_enable (ce -> clk );
48- if (!ret )
49- ce -> status = PCE_STATUS_ENABLED ;
50- else
51- dev_err (dev , "%s: failed to enable clk %p, error %d\n" ,
52- __func__ , ce -> clk , ret );
157+ break ;
158+ default :
159+ return ;
53160 }
161+ if (!ret )
162+ ce -> status = PCE_STATUS_ENABLED ;
163+ else
164+ dev_err (dev , "%s: failed to enable clk %p, error %d\n" ,
165+ __func__ , ce -> clk , ret );
54166}
55167
56168/**
@@ -64,17 +176,20 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
64176 ce -> clk = clk_get (dev , ce -> con_id );
65177 if (IS_ERR (ce -> clk )) {
66178 ce -> status = PCE_STATUS_ERROR ;
179+ return ;
180+ } else if (clk_is_enabled_when_prepared (ce -> clk )) {
181+ /* we defer preparing the clock in that case */
182+ ce -> status = PCE_STATUS_ACQUIRED ;
183+ ce -> enabled_when_prepared = true;
184+ } else if (clk_prepare (ce -> clk )) {
185+ ce -> status = PCE_STATUS_ERROR ;
186+ dev_err (dev , "clk_prepare() failed\n" );
187+ return ;
67188 } else {
68- if (clk_prepare (ce -> clk )) {
69- ce -> status = PCE_STATUS_ERROR ;
70- dev_err (dev , "clk_prepare() failed\n" );
71- } else {
72- ce -> status = PCE_STATUS_ACQUIRED ;
73- dev_dbg (dev ,
74- "Clock %pC con_id %s managed by runtime PM.\n" ,
75- ce -> clk , ce -> con_id );
76- }
189+ ce -> status = PCE_STATUS_PREPARED ;
77190 }
191+ dev_dbg (dev , "Clock %pC con_id %s managed by runtime PM.\n" ,
192+ ce -> clk , ce -> con_id );
78193}
79194
80195static int __pm_clk_add (struct device * dev , const char * con_id ,
@@ -106,9 +221,11 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
106221
107222 pm_clk_acquire (dev , ce );
108223
109- spin_lock_irq ( & psd -> lock );
224+ pm_clk_list_lock ( psd );
110225 list_add_tail (& ce -> node , & psd -> clock_list );
111- spin_unlock_irq (& psd -> lock );
226+ if (ce -> enabled_when_prepared )
227+ psd -> clock_op_might_sleep ++ ;
228+ pm_clk_list_unlock (psd );
112229 return 0 ;
113230}
114231
@@ -239,14 +356,20 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
239356 if (!ce )
240357 return ;
241358
242- if (ce -> status < PCE_STATUS_ERROR ) {
243- if (ce -> status == PCE_STATUS_ENABLED )
244- clk_disable (ce -> clk );
245-
246- if (ce -> status >= PCE_STATUS_ACQUIRED ) {
247- clk_unprepare (ce -> clk );
359+ switch (ce -> status ) {
360+ case PCE_STATUS_ENABLED :
361+ clk_disable (ce -> clk );
362+ fallthrough ;
363+ case PCE_STATUS_PREPARED :
364+ clk_unprepare (ce -> clk );
365+ fallthrough ;
366+ case PCE_STATUS_ACQUIRED :
367+ case PCE_STATUS_ERROR :
368+ if (!IS_ERR (ce -> clk ))
248369 clk_put (ce -> clk );
249- }
370+ break ;
371+ default :
372+ break ;
250373 }
251374
252375 kfree (ce -> con_id );
@@ -269,7 +392,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
269392 if (!psd )
270393 return ;
271394
272- spin_lock_irq ( & psd -> lock );
395+ pm_clk_list_lock ( psd );
273396
274397 list_for_each_entry (ce , & psd -> clock_list , node ) {
275398 if (!con_id && !ce -> con_id )
@@ -280,12 +403,14 @@ void pm_clk_remove(struct device *dev, const char *con_id)
280403 goto remove ;
281404 }
282405
283- spin_unlock_irq ( & psd -> lock );
406+ pm_clk_list_unlock ( psd );
284407 return ;
285408
286409 remove :
287410 list_del (& ce -> node );
288- spin_unlock_irq (& psd -> lock );
411+ if (ce -> enabled_when_prepared )
412+ psd -> clock_op_might_sleep -- ;
413+ pm_clk_list_unlock (psd );
289414
290415 __pm_clk_remove (ce );
291416}
@@ -307,19 +432,21 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
307432 if (!psd || !clk )
308433 return ;
309434
310- spin_lock_irq ( & psd -> lock );
435+ pm_clk_list_lock ( psd );
311436
312437 list_for_each_entry (ce , & psd -> clock_list , node ) {
313438 if (clk == ce -> clk )
314439 goto remove ;
315440 }
316441
317- spin_unlock_irq ( & psd -> lock );
442+ pm_clk_list_unlock ( psd );
318443 return ;
319444
320445 remove :
321446 list_del (& ce -> node );
322- spin_unlock_irq (& psd -> lock );
447+ if (ce -> enabled_when_prepared )
448+ psd -> clock_op_might_sleep -- ;
449+ pm_clk_list_unlock (psd );
323450
324451 __pm_clk_remove (ce );
325452}
@@ -330,13 +457,16 @@ EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
330457 * @dev: Device to initialize the list of PM clocks for.
331458 *
332459 * Initialize the lock and clock_list members of the device's pm_subsys_data
333- * object.
460+ * object, set the count of clocks that might sleep to 0 .
334461 */
335462void pm_clk_init (struct device * dev )
336463{
337464 struct pm_subsys_data * psd = dev_to_psd (dev );
338- if (psd )
465+ if (psd ) {
339466 INIT_LIST_HEAD (& psd -> clock_list );
467+ mutex_init (& psd -> clock_mutex );
468+ psd -> clock_op_might_sleep = 0 ;
469+ }
340470}
341471EXPORT_SYMBOL_GPL (pm_clk_init );
342472
@@ -372,12 +502,13 @@ void pm_clk_destroy(struct device *dev)
372502
373503 INIT_LIST_HEAD (& list );
374504
375- spin_lock_irq ( & psd -> lock );
505+ pm_clk_list_lock ( psd );
376506
377507 list_for_each_entry_safe_reverse (ce , c , & psd -> clock_list , node )
378508 list_move (& ce -> node , & list );
509+ psd -> clock_op_might_sleep = 0 ;
379510
380- spin_unlock_irq ( & psd -> lock );
511+ pm_clk_list_unlock ( psd );
381512
382513 dev_pm_put_subsys_data (dev );
383514
@@ -397,23 +528,30 @@ int pm_clk_suspend(struct device *dev)
397528 struct pm_subsys_data * psd = dev_to_psd (dev );
398529 struct pm_clock_entry * ce ;
399530 unsigned long flags ;
531+ int ret ;
400532
401533 dev_dbg (dev , "%s()\n" , __func__ );
402534
403535 if (!psd )
404536 return 0 ;
405537
406- spin_lock_irqsave (& psd -> lock , flags );
538+ ret = pm_clk_op_lock (psd , & flags , __func__ );
539+ if (ret )
540+ return ret ;
407541
408542 list_for_each_entry_reverse (ce , & psd -> clock_list , node ) {
409- if (ce -> status < PCE_STATUS_ERROR ) {
410- if (ce -> status == PCE_STATUS_ENABLED )
543+ if (ce -> status == PCE_STATUS_ENABLED ) {
544+ if (ce -> enabled_when_prepared ) {
545+ clk_disable_unprepare (ce -> clk );
546+ ce -> status = PCE_STATUS_ACQUIRED ;
547+ } else {
411548 clk_disable (ce -> clk );
412- ce -> status = PCE_STATUS_ACQUIRED ;
549+ ce -> status = PCE_STATUS_PREPARED ;
550+ }
413551 }
414552 }
415553
416- spin_unlock_irqrestore ( & psd -> lock , flags );
554+ pm_clk_op_unlock ( psd , & flags );
417555
418556 return 0 ;
419557}
@@ -428,18 +566,21 @@ int pm_clk_resume(struct device *dev)
428566 struct pm_subsys_data * psd = dev_to_psd (dev );
429567 struct pm_clock_entry * ce ;
430568 unsigned long flags ;
569+ int ret ;
431570
432571 dev_dbg (dev , "%s()\n" , __func__ );
433572
434573 if (!psd )
435574 return 0 ;
436575
437- spin_lock_irqsave (& psd -> lock , flags );
576+ ret = pm_clk_op_lock (psd , & flags , __func__ );
577+ if (ret )
578+ return ret ;
438579
439580 list_for_each_entry (ce , & psd -> clock_list , node )
440581 __pm_clk_enable (dev , ce );
441582
442- spin_unlock_irqrestore ( & psd -> lock , flags );
583+ pm_clk_op_unlock ( psd , & flags );
443584
444585 return 0 ;
445586}
0 commit comments