Skip to content

Commit 7ec986e

Browse files
Dong Aishengbebarino
authored andcommitted
clk: move clk_disable_unused after clk_core_disable_unprepare function
No function level change, just moving code place. clk_disable_unused function will need to call clk_core_prepare_enable/ clk_core_disable_unprepare when adding CLK_OPS_PARENT_ENABLE features. So move it after clk_core_disable_unprepare to avoid adding forward declared functions later. Cc: Michael Turquette <[email protected]> Cc: Stephen Boyd <[email protected]> Cc: Shawn Guo <[email protected]> Signed-off-by: Dong Aisheng <[email protected]> Signed-off-by: Stephen Boyd <[email protected]>
1 parent a6adc30 commit 7ec986e

File tree

1 file changed

+98
-98
lines changed

1 file changed

+98
-98
lines changed

drivers/clk/clk.c

Lines changed: 98 additions & 98 deletions
Original file line numberDiff line numberDiff line change
@@ -172,104 +172,6 @@ static bool clk_core_is_enabled(struct clk_core *core)
172172
return core->ops->is_enabled(core->hw);
173173
}
174174

175-
static void clk_unprepare_unused_subtree(struct clk_core *core)
176-
{
177-
struct clk_core *child;
178-
179-
lockdep_assert_held(&prepare_lock);
180-
181-
hlist_for_each_entry(child, &core->children, child_node)
182-
clk_unprepare_unused_subtree(child);
183-
184-
if (core->prepare_count)
185-
return;
186-
187-
if (core->flags & CLK_IGNORE_UNUSED)
188-
return;
189-
190-
if (clk_core_is_prepared(core)) {
191-
trace_clk_unprepare(core);
192-
if (core->ops->unprepare_unused)
193-
core->ops->unprepare_unused(core->hw);
194-
else if (core->ops->unprepare)
195-
core->ops->unprepare(core->hw);
196-
trace_clk_unprepare_complete(core);
197-
}
198-
}
199-
200-
static void clk_disable_unused_subtree(struct clk_core *core)
201-
{
202-
struct clk_core *child;
203-
unsigned long flags;
204-
205-
lockdep_assert_held(&prepare_lock);
206-
207-
hlist_for_each_entry(child, &core->children, child_node)
208-
clk_disable_unused_subtree(child);
209-
210-
flags = clk_enable_lock();
211-
212-
if (core->enable_count)
213-
goto unlock_out;
214-
215-
if (core->flags & CLK_IGNORE_UNUSED)
216-
goto unlock_out;
217-
218-
/*
219-
* some gate clocks have special needs during the disable-unused
220-
* sequence. call .disable_unused if available, otherwise fall
221-
* back to .disable
222-
*/
223-
if (clk_core_is_enabled(core)) {
224-
trace_clk_disable(core);
225-
if (core->ops->disable_unused)
226-
core->ops->disable_unused(core->hw);
227-
else if (core->ops->disable)
228-
core->ops->disable(core->hw);
229-
trace_clk_disable_complete(core);
230-
}
231-
232-
unlock_out:
233-
clk_enable_unlock(flags);
234-
}
235-
236-
static bool clk_ignore_unused;
237-
static int __init clk_ignore_unused_setup(char *__unused)
238-
{
239-
clk_ignore_unused = true;
240-
return 1;
241-
}
242-
__setup("clk_ignore_unused", clk_ignore_unused_setup);
243-
244-
static int clk_disable_unused(void)
245-
{
246-
struct clk_core *core;
247-
248-
if (clk_ignore_unused) {
249-
pr_warn("clk: Not disabling unused clocks\n");
250-
return 0;
251-
}
252-
253-
clk_prepare_lock();
254-
255-
hlist_for_each_entry(core, &clk_root_list, child_node)
256-
clk_disable_unused_subtree(core);
257-
258-
hlist_for_each_entry(core, &clk_orphan_list, child_node)
259-
clk_disable_unused_subtree(core);
260-
261-
hlist_for_each_entry(core, &clk_root_list, child_node)
262-
clk_unprepare_unused_subtree(core);
263-
264-
hlist_for_each_entry(core, &clk_orphan_list, child_node)
265-
clk_unprepare_unused_subtree(core);
266-
267-
clk_prepare_unlock();
268-
269-
return 0;
270-
}
271-
late_initcall_sync(clk_disable_unused);
272-
273175
/*** helper functions ***/
274176

275177
const char *__clk_get_name(const struct clk *clk)
@@ -828,6 +730,104 @@ static void clk_core_disable_unprepare(struct clk_core *core)
828730
clk_core_unprepare_lock(core);
829731
}
830732

733+
static void clk_unprepare_unused_subtree(struct clk_core *core)
734+
{
735+
struct clk_core *child;
736+
737+
lockdep_assert_held(&prepare_lock);
738+
739+
hlist_for_each_entry(child, &core->children, child_node)
740+
clk_unprepare_unused_subtree(child);
741+
742+
if (core->prepare_count)
743+
return;
744+
745+
if (core->flags & CLK_IGNORE_UNUSED)
746+
return;
747+
748+
if (clk_core_is_prepared(core)) {
749+
trace_clk_unprepare(core);
750+
if (core->ops->unprepare_unused)
751+
core->ops->unprepare_unused(core->hw);
752+
else if (core->ops->unprepare)
753+
core->ops->unprepare(core->hw);
754+
trace_clk_unprepare_complete(core);
755+
}
756+
}
757+
758+
static void clk_disable_unused_subtree(struct clk_core *core)
759+
{
760+
struct clk_core *child;
761+
unsigned long flags;
762+
763+
lockdep_assert_held(&prepare_lock);
764+
765+
hlist_for_each_entry(child, &core->children, child_node)
766+
clk_disable_unused_subtree(child);
767+
768+
flags = clk_enable_lock();
769+
770+
if (core->enable_count)
771+
goto unlock_out;
772+
773+
if (core->flags & CLK_IGNORE_UNUSED)
774+
goto unlock_out;
775+
776+
/*
777+
* some gate clocks have special needs during the disable-unused
778+
* sequence. call .disable_unused if available, otherwise fall
779+
* back to .disable
780+
*/
781+
if (clk_core_is_enabled(core)) {
782+
trace_clk_disable(core);
783+
if (core->ops->disable_unused)
784+
core->ops->disable_unused(core->hw);
785+
else if (core->ops->disable)
786+
core->ops->disable(core->hw);
787+
trace_clk_disable_complete(core);
788+
}
789+
790+
unlock_out:
791+
clk_enable_unlock(flags);
792+
}
793+
794+
static bool clk_ignore_unused;
795+
static int __init clk_ignore_unused_setup(char *__unused)
796+
{
797+
clk_ignore_unused = true;
798+
return 1;
799+
}
800+
__setup("clk_ignore_unused", clk_ignore_unused_setup);
801+
802+
static int clk_disable_unused(void)
803+
{
804+
struct clk_core *core;
805+
806+
if (clk_ignore_unused) {
807+
pr_warn("clk: Not disabling unused clocks\n");
808+
return 0;
809+
}
810+
811+
clk_prepare_lock();
812+
813+
hlist_for_each_entry(core, &clk_root_list, child_node)
814+
clk_disable_unused_subtree(core);
815+
816+
hlist_for_each_entry(core, &clk_orphan_list, child_node)
817+
clk_disable_unused_subtree(core);
818+
819+
hlist_for_each_entry(core, &clk_root_list, child_node)
820+
clk_unprepare_unused_subtree(core);
821+
822+
hlist_for_each_entry(core, &clk_orphan_list, child_node)
823+
clk_unprepare_unused_subtree(core);
824+
825+
clk_prepare_unlock();
826+
827+
return 0;
828+
}
829+
late_initcall_sync(clk_disable_unused);
830+
831831
static int clk_core_round_rate_nolock(struct clk_core *core,
832832
struct clk_rate_request *req)
833833
{

0 commit comments

Comments
 (0)