Skip to content
Permalink
Browse files
clk: add sys node to disable unused clk
The normal sequence is that the clock provider registers clk to
the clock framework, and the clock framework manages the clock resources
of the system. Clk consumers obtain clk through the clock framework API,
enable and disable clk.

Not all clk registered through the clock provider will be used
by the clock consumer, so the clock framework has a function
late_initcall_sync(clk_disable_unused); disables the unused clk.

Now we modularize the clock provider and some consumers, which will
cause late_initcall_sync(clk_disable_unused); cannot work properly, so
increase the sys node.

Signed-off-by: Zhipeng Wang <zhipeng.wang_1@nxp.com>
  • Loading branch information
zhipeng66 authored and intel-lab-lkp committed Aug 13, 2021
1 parent 765f4fa commit 166d814a2157788c7f7c7224b08ccdca8aaed7a0
Showing 1 changed file with 70 additions and 4 deletions.
@@ -32,6 +32,7 @@ static struct task_struct *enable_owner;

static int prepare_refcnt;
static int enable_refcnt;
static bool enable_clk_disable_unused;

static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
@@ -1206,7 +1207,7 @@ static void clk_core_disable_unprepare(struct clk_core *core)
clk_core_unprepare_lock(core);
}

static void __init clk_unprepare_unused_subtree(struct clk_core *core)
static void clk_unprepare_unused_subtree(struct clk_core *core)
{
struct clk_core *child;

@@ -1236,7 +1237,7 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
clk_pm_runtime_put(core);
}

static void __init clk_disable_unused_subtree(struct clk_core *core)
static void clk_disable_unused_subtree(struct clk_core *core)
{
struct clk_core *child;
unsigned long flags;
@@ -1282,15 +1283,15 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
clk_core_disable_unprepare(core->parent);
}

static bool clk_ignore_unused __initdata;
static bool clk_ignore_unused;
static int __init clk_ignore_unused_setup(char *__unused)
{
clk_ignore_unused = true;
return 1;
}
__setup("clk_ignore_unused", clk_ignore_unused_setup);

static int __init clk_disable_unused(void)
static int clk_disable_unused(void)
{
struct clk_core *core;

@@ -1319,6 +1320,71 @@ static int __init clk_disable_unused(void)
}
late_initcall_sync(clk_disable_unused);

static void clk_disable_unused_enable(bool enable)
{
if (enable) {
clk_disable_unused();
enable_clk_disable_unused = true;
pr_info("clk_disable_unused: enabled\n");
} else {
enable_clk_disable_unused = false;
pr_info("clk_disable_unused: disabled\n");
}
}

static bool clk_disable_unused_status(void)
{
return enable_clk_disable_unused;
}

static ssize_t clk_disable_unused_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", clk_disable_unused_status());
}

static ssize_t clk_disable_unused_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;

if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;

clk_disable_unused_enable(val);

return n;
}

static struct kobj_attribute clk_ctrl_attr =
__ATTR(enable_clk_disable_unused, 0644, clk_disable_unused_show, clk_disable_unused_store);

static struct attribute *clk_ctrl_attrbute[] = {
&clk_ctrl_attr.attr,
NULL,
};

static const struct attribute_group clk_ctrl_attr_group = {
.attrs = clk_ctrl_attrbute,
};

static const struct attribute_group *clk_ctrl_attr_groups[] = {
&clk_ctrl_attr_group,
NULL,
};

static int __init creat_sys_clk_unused(void)
{
struct kobject *clk_ctrl_kobj = kobject_create_and_add("clk_ctrl", NULL);

sysfs_create_groups(clk_ctrl_kobj, clk_ctrl_attr_groups);

return 0;
}
late_initcall_sync(creat_sys_clk_unused);

static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{

0 comments on commit 166d814

Please sign in to comment.