mirror of
https://github.com/Fishwaldo/build.git
synced 2025-07-23 05:18:55 +00:00
Add cpufreq support for mvebu-next
This commit is contained in:
parent
ffd7256f84
commit
377a3e09e2
7 changed files with 899 additions and 6 deletions
|
@ -1,6 +1,6 @@
|
|||
#
|
||||
# Automatically generated file; DO NOT EDIT.
|
||||
# Linux/arm 4.10.1 Kernel Configuration
|
||||
# Linux/arm 4.10.10 Kernel Configuration
|
||||
#
|
||||
CONFIG_ARM=y
|
||||
CONFIG_ARM_HAS_SG_CHAIN=y
|
||||
|
@ -626,7 +626,7 @@ CONFIG_CPU_FREQ=y
|
|||
CONFIG_CPU_FREQ_GOV_ATTR_SET=y
|
||||
CONFIG_CPU_FREQ_GOV_COMMON=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
# CONFIG_CPU_FREQ_STAT_DETAILS is not set
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
|
||||
# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
|
||||
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
|
||||
|
@ -645,10 +645,9 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
|
|||
#
|
||||
CONFIG_CPUFREQ_DT=y
|
||||
CONFIG_CPUFREQ_DT_PLATDEV=y
|
||||
CONFIG_ARM_BIG_LITTLE_CPUFREQ=m
|
||||
CONFIG_ARM_DT_BL_CPUFREQ=m
|
||||
# CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set
|
||||
# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set
|
||||
CONFIG_QORIQ_CPUFREQ=m
|
||||
# CONFIG_QORIQ_CPUFREQ is not set
|
||||
|
||||
#
|
||||
# CPU Idle
|
||||
|
@ -3961,7 +3960,11 @@ CONFIG_FANOTIFY=y
|
|||
# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
|
||||
CONFIG_QUOTA=y
|
||||
CONFIG_QUOTA_NETLINK_INTERFACE=y
|
||||
ONFIG_PRINT_QUOTA_WARNING=y
|
||||
# CONFIG_PRINT_QUOTA_WARNING is not set
|
||||
# CONFIG_QUOTA_DEBUG is not set
|
||||
CONFIG_QUOTA_TREE=m
|
||||
CONFIG_QFMT_V1=m
|
||||
CONFIG_QFMT_V2=m
|
||||
CONFIG_QUOTACTL=y
|
||||
CONFIG_AUTOFS4_FS=y
|
||||
CONFIG_FUSE_FS=y
|
||||
|
|
|
@ -0,0 +1,395 @@
|
|||
This patch first shortens the registers definition and also introduces
|
||||
difference between Armada XP value and Armada 38x value.
|
||||
|
||||
Then it adds specific functions for Armada 38x in order to support cpu
|
||||
freq on these SoCs.
|
||||
|
||||
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
|
||||
|
||||
--- a/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
|
||||
+++ b/Documentation/devicetree/bindings/clock/mvebu-cpu-clock.txt
|
||||
@@ -1,10 +1,13 @@
|
||||
Device Tree Clock bindings for cpu clock of Marvell EBU platforms
|
||||
|
||||
Required properties:
|
||||
-- compatible : shall be one of the following:
|
||||
+- compatible : shall be the following:
|
||||
"marvell,armada-xp-cpu-clock" - cpu clocks for Armada XP
|
||||
+ "marvell,armada-38x-cpu-clock", "marvell,armada-xp-cpu-clock" - cpu
|
||||
+ clocks for Armada 38x
|
||||
- reg : Address and length of the clock complex register set, followed
|
||||
- by address and length of the PMU DFS registers
|
||||
+ by address and length of the PMU DFS registers, for Armada 38x
|
||||
+ a third register set must be addeed: DFX server.
|
||||
- #clock-cells : should be set to 1.
|
||||
- clocks : shall be the input parent clock phandle for the clock.
|
||||
|
||||
@@ -20,3 +23,23 @@ cpu@0 {
|
||||
reg = <0>;
|
||||
clocks = <&cpuclk 0>;
|
||||
};
|
||||
+
|
||||
+or for Armada38x
|
||||
+
|
||||
+cpuclk: clock-complex at 18700 {
|
||||
+ compatible = "marvell,armada-380-cpu-clock",
|
||||
+ "marvell,armada-xp-cpu-clock";
|
||||
+ reg = <0x18700 0xA0>, <0x1c054 0x40>,
|
||||
+ <0xe4260 0x8>;
|
||||
+ clocks = <&coreclk 1>;
|
||||
+ #clock-cells = <1>;
|
||||
+};
|
||||
+
|
||||
+cpu at 0 {
|
||||
+ device_type = "cpu";
|
||||
+ compatible = "arm,cortex-a9";
|
||||
+ reg = <0>;
|
||||
+ clocks = <&cpuclk 0>;
|
||||
+ clock-latency = <1000000>;
|
||||
+ clock-names = "cpu0";
|
||||
+};
|
||||
--- a/drivers/clk/mvebu/clk-cpu.c
|
||||
+++ b/drivers/clk/mvebu/clk-cpu.c
|
||||
@@ -20,16 +20,34 @@
|
||||
#include <linux/mvebu-pmsu.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
-#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
|
||||
-#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
|
||||
-#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
|
||||
-#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
|
||||
-#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
|
||||
-#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
|
||||
-#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
|
||||
-
|
||||
-#define PMU_DFS_RATIO_SHIFT 16
|
||||
-#define PMU_DFS_RATIO_MASK 0x3F
|
||||
+/* Clock complex registers */
|
||||
+#define SYS_CTRL_CLK_DIV_CTRL_OFFSET 0x0
|
||||
+#define SYS_CTRL_CLK_DIV_CTRL_RESET_ALL 0xFF
|
||||
+#define SYS_CTRL_CLK_DIV_CTRL_RESET_SHIFT 8
|
||||
+#define SYS_CTRL_CLK_DIV_VALUE_A38X_OFFSET 0x4
|
||||
+#define SYS_CTRL_CLK_DIV_CTRL2_OFFSET 0x8
|
||||
+#define SYS_CTRL_CLK_DIV_CTRL2_NBCLK_RATIO_SHIFT 16
|
||||
+#define SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET 0xC
|
||||
+#define SYS_CTRL_CLK_DIV_MASK 0x3F
|
||||
+
|
||||
+/* PMU registers */
|
||||
+#define PMU_DFS_CTRL1_OFFSET 0x0
|
||||
+#define PMU_DFS_RATIO_SHIFT 16
|
||||
+#define PMU_DFS_RATIO_MASK 0x3F
|
||||
+#define PMUL_ACTIVATE_IF_CTRL_OFFSET 0x3C
|
||||
+#define PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_MASK 0xFF
|
||||
+#define PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_SHIFT 17
|
||||
+#define PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN 0x1
|
||||
+
|
||||
+/* DFX server registers */
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_OFFSET 0x0
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_MASK 0xFF
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_SHIFT 0x8
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_PCLK 0x10
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_OFFSET 0x4
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_MASK 0xFF
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_SHIFT 0x0
|
||||
+#define DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_PCLK 0x10
|
||||
|
||||
#define MAX_CPU 4
|
||||
struct cpu_clk {
|
||||
@@ -39,6 +57,7 @@ struct cpu_clk {
|
||||
const char *parent_name;
|
||||
void __iomem *reg_base;
|
||||
void __iomem *pmu_dfs;
|
||||
+ void __iomem *dfx_server_base;
|
||||
};
|
||||
|
||||
static struct clk **clks;
|
||||
@@ -47,14 +66,30 @@ static struct clk_onecell_data clk_data;
|
||||
|
||||
#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
|
||||
|
||||
-static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
|
||||
+static unsigned long armada_xp_clk_cpu_recalc_rate(struct clk_hw *hwclk,
|
||||
+ unsigned long parent_rate)
|
||||
+{
|
||||
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
|
||||
+ u32 reg, div;
|
||||
+
|
||||
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET);
|
||||
+ div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIV_MASK;
|
||||
+ return parent_rate / div;
|
||||
+}
|
||||
+
|
||||
+static unsigned long armada_38x_clk_cpu_recalc_rate(struct clk_hw *hwclk,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
|
||||
u32 reg, div;
|
||||
|
||||
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
|
||||
- div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
|
||||
+ if (__clk_is_enabled(hwclk->clk) == false) {
|
||||
+ /* for clock init - don't use divider, set maximal rate */
|
||||
+ return parent_rate;
|
||||
+ }
|
||||
+
|
||||
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_A38X_OFFSET);
|
||||
+ div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIV_MASK;
|
||||
return parent_rate / div;
|
||||
}
|
||||
|
||||
@@ -73,42 +108,43 @@ static long clk_cpu_round_rate(struct cl
|
||||
return *parent_rate / div;
|
||||
}
|
||||
|
||||
-static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
- unsigned long parent_rate)
|
||||
-
|
||||
+static int armada_xp_clk_cpu_off_set_rate(struct clk_hw *hwclk,
|
||||
+ unsigned long rate,
|
||||
+ unsigned long parent_rate)
|
||||
{
|
||||
struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
|
||||
u32 reg, div;
|
||||
u32 reload_mask;
|
||||
|
||||
div = parent_rate / rate;
|
||||
- reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
|
||||
- & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
|
||||
+ reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET)
|
||||
+ & (~(SYS_CTRL_CLK_DIV_MASK << (cpuclk->cpu * 8))))
|
||||
| (div << (cpuclk->cpu * 8));
|
||||
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
|
||||
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_VALUE_AXP_OFFSET);
|
||||
/* Set clock divider reload smooth bit mask */
|
||||
reload_mask = 1 << (20 + cpuclk->cpu);
|
||||
|
||||
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
|
||||
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET)
|
||||
| reload_mask;
|
||||
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
|
||||
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
|
||||
|
||||
/* Now trigger the clock update */
|
||||
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
|
||||
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET)
|
||||
| 1 << 24;
|
||||
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
|
||||
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
|
||||
|
||||
/* Wait for clocks to settle down then clear reload request */
|
||||
udelay(1000);
|
||||
reg &= ~(reload_mask | 1 << 24);
|
||||
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
|
||||
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
|
||||
udelay(1000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
-static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
- unsigned long parent_rate)
|
||||
+static int armada_xp_clk_cpu_on_set_rate(struct clk_hw *hwclk,
|
||||
+ unsigned long rate,
|
||||
+ unsigned long parent_rate)
|
||||
{
|
||||
u32 reg;
|
||||
unsigned long fabric_div, target_div, cur_rate;
|
||||
@@ -123,9 +159,9 @@ static int clk_cpu_on_set_rate(struct cl
|
||||
|
||||
cur_rate = clk_hw_get_rate(hwclk);
|
||||
|
||||
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
|
||||
- fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
|
||||
- SYS_CTRL_CLK_DIVIDER_MASK;
|
||||
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL2_OFFSET);
|
||||
+ fabric_div = (reg >> SYS_CTRL_CLK_DIV_CTRL2_NBCLK_RATIO_SHIFT) &
|
||||
+ SYS_CTRL_CLK_DIV_MASK;
|
||||
|
||||
/* Frequency is going up */
|
||||
if (rate == 2 * cur_rate)
|
||||
@@ -142,40 +178,101 @@ static int clk_cpu_on_set_rate(struct cl
|
||||
reg |= (target_div << PMU_DFS_RATIO_SHIFT);
|
||||
writel(reg, cpuclk->pmu_dfs);
|
||||
|
||||
- reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
|
||||
- reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
|
||||
- SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
|
||||
- writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
|
||||
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
|
||||
+ reg |= (SYS_CTRL_CLK_DIV_CTRL_RESET_ALL <<
|
||||
+ SYS_CTRL_CLK_DIV_CTRL_RESET_SHIFT);
|
||||
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIV_CTRL_OFFSET);
|
||||
|
||||
return mvebu_pmsu_dfs_request(cpuclk->cpu);
|
||||
}
|
||||
|
||||
-static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
+static int armada_xp_clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
if (__clk_is_enabled(hwclk->clk))
|
||||
- return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
|
||||
+ return armada_xp_clk_cpu_on_set_rate(hwclk, rate, parent_rate);
|
||||
+ else
|
||||
+ return armada_xp_clk_cpu_off_set_rate(hwclk, rate, parent_rate);
|
||||
+}
|
||||
+static int armada_38x_clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
|
||||
+ unsigned long parent_rate)
|
||||
+{
|
||||
+ u32 reg;
|
||||
+ u32 target_div;
|
||||
+ unsigned long cur_rate;
|
||||
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
|
||||
+
|
||||
+ /*
|
||||
+ * PMU DFS registers are not mapped, Device Tree does not
|
||||
+ * describes them. We cannot change the frequency dynamically.
|
||||
+ */
|
||||
+ if (!cpuclk->pmu_dfs)
|
||||
+ return -ENODEV;
|
||||
+
|
||||
+ cur_rate = clk_hw_get_rate(hwclk);
|
||||
+
|
||||
+ /* Frequency is going up */
|
||||
+ if (rate >= cur_rate)
|
||||
+ target_div = 1;
|
||||
+ /* Frequency is going down */
|
||||
else
|
||||
- return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
|
||||
+ target_div = 2;
|
||||
+
|
||||
+ reg = readl(cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL0_OFFSET);
|
||||
+ reg &= ~(DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_MASK <<
|
||||
+ DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_SHIFT);
|
||||
+ reg |= (DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_PCLK <<
|
||||
+ DFX_CPU_PLL_CLK_DIV_CTRL0_RELOAD_SMOOTH_SHIFT);
|
||||
+ writel(reg, cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL0_OFFSET);
|
||||
+
|
||||
+ reg = readl(cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL1_OFFSET);
|
||||
+ reg &= ~(DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_MASK <<
|
||||
+ DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_SHIFT);
|
||||
+ reg |= (DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_PCLK <<
|
||||
+ DFX_CPU_PLL_CLK_DIV_CTRL1_RESET_MASK_SHIFT);
|
||||
+ writel(reg, cpuclk->dfx_server_base + DFX_CPU_PLL_CLK_DIV_CTRL1_OFFSET);
|
||||
+
|
||||
+ reg = readl(cpuclk->pmu_dfs);
|
||||
+ reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
|
||||
+ reg |= (target_div << PMU_DFS_RATIO_SHIFT);
|
||||
+ writel(reg, cpuclk->pmu_dfs);
|
||||
+
|
||||
+ reg = readl(cpuclk->pmu_dfs + PMUL_ACTIVATE_IF_CTRL_OFFSET);
|
||||
+ reg &= ~(PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_MASK <<
|
||||
+ PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_SHIFT);
|
||||
+ reg |= (PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN <<
|
||||
+ PMUL_ACTIVATE_IF_CTRL_PMU_DFS_OVRD_EN_SHIFT);
|
||||
+ writel(reg, cpuclk->pmu_dfs + PMUL_ACTIVATE_IF_CTRL_OFFSET);
|
||||
+
|
||||
+ return mvebu_pmsu_dfs_request(cpuclk->cpu);
|
||||
}
|
||||
|
||||
-static const struct clk_ops cpu_ops = {
|
||||
- .recalc_rate = clk_cpu_recalc_rate,
|
||||
+static const struct clk_ops armada_xp_cpu_ops = {
|
||||
+ .recalc_rate = armada_xp_clk_cpu_recalc_rate,
|
||||
+ .round_rate = clk_cpu_round_rate,
|
||||
+ .set_rate = armada_xp_clk_cpu_set_rate,
|
||||
+};
|
||||
+
|
||||
+static const struct clk_ops armada_38x_cpu_ops = {
|
||||
+ .recalc_rate = armada_38x_clk_cpu_recalc_rate,
|
||||
.round_rate = clk_cpu_round_rate,
|
||||
- .set_rate = clk_cpu_set_rate,
|
||||
+ .set_rate = armada_38x_clk_cpu_set_rate,
|
||||
};
|
||||
|
||||
-static void __init of_cpu_clk_setup(struct device_node *node)
|
||||
+static void __init common_cpu_clk_init(struct device_node *node, bool cortexa9)
|
||||
{
|
||||
struct cpu_clk *cpuclk;
|
||||
void __iomem *clock_complex_base = of_iomap(node, 0);
|
||||
void __iomem *pmu_dfs_base = of_iomap(node, 1);
|
||||
+ void __iomem *dfx_server_base = of_iomap(node, 2);
|
||||
int ncpus = 0;
|
||||
struct device_node *dn;
|
||||
+ bool independent_clocks = true;
|
||||
+ const struct clk_ops *cpu_ops = NULL;
|
||||
|
||||
if (clock_complex_base == NULL) {
|
||||
pr_err("%s: clock-complex base register not set\n",
|
||||
- __func__);
|
||||
+ __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -185,7 +282,20 @@ static void __init of_cpu_clk_setup(stru
|
||||
|
||||
for_each_node_by_type(dn, "cpu")
|
||||
ncpus++;
|
||||
-
|
||||
+ if (cortexa9) {
|
||||
+ if (dfx_server_base == NULL) {
|
||||
+ pr_err("%s: DFX server base register not set\n",
|
||||
+ __func__);
|
||||
+ return;
|
||||
+ }
|
||||
+ cpu_ops = &armada_38x_cpu_ops;
|
||||
+ independent_clocks = false;
|
||||
+ ncpus = 1;
|
||||
+ } else {
|
||||
+ cpu_ops = &armada_xp_cpu_ops;
|
||||
+ for_each_node_by_type(dn, "cpu")
|
||||
+ ncpus++;
|
||||
+ }
|
||||
cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
|
||||
if (WARN_ON(!cpuclk))
|
||||
goto cpuclk_out;
|
||||
@@ -215,10 +325,12 @@ static void __init of_cpu_clk_setup(stru
|
||||
cpuclk[cpu].reg_base = clock_complex_base;
|
||||
if (pmu_dfs_base)
|
||||
cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
|
||||
+
|
||||
+ cpuclk[cpu].dfx_server_base = dfx_server_base;
|
||||
cpuclk[cpu].hw.init = &init;
|
||||
|
||||
init.name = cpuclk[cpu].clk_name;
|
||||
- init.ops = &cpu_ops;
|
||||
+ init.ops = cpu_ops;
|
||||
init.flags = 0;
|
||||
init.parent_names = &cpuclk[cpu].parent_name;
|
||||
init.num_parents = 1;
|
||||
@@ -227,6 +339,11 @@ static void __init of_cpu_clk_setup(stru
|
||||
if (WARN_ON(IS_ERR(clk)))
|
||||
goto bail_out;
|
||||
clks[cpu] = clk;
|
||||
+
|
||||
+ if (independent_clocks == false) {
|
||||
+ /* use 1 clock to all cpus */
|
||||
+ break;
|
||||
+ }
|
||||
}
|
||||
clk_data.clk_num = MAX_CPU;
|
||||
clk_data.clks = clks;
|
||||
@@ -241,7 +358,22 @@ clks_out:
|
||||
kfree(cpuclk);
|
||||
cpuclk_out:
|
||||
iounmap(clock_complex_base);
|
||||
+ iounmap(pmu_dfs_base);
|
||||
+ iounmap(dfx_server_base);
|
||||
+}
|
||||
+
|
||||
+static void __init armada_xp_cpu_clk_init(struct device_node *node)
|
||||
+{
|
||||
+ common_cpu_clk_init(node, false);
|
||||
+}
|
||||
+
|
||||
+static void __init armada_38x_cpu_clk_init(struct device_node *node)
|
||||
+{
|
||||
+ common_cpu_clk_init(node, true);
|
||||
}
|
||||
|
||||
CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
|
||||
- of_cpu_clk_setup);
|
||||
+ armada_xp_cpu_clk_init);
|
||||
+CLK_OF_DECLARE(armada_38x_cpu_clock, "marvell,armada-380-cpu-clock",
|
||||
+ armada_38x_cpu_clk_init);
|
||||
+
|
|
@ -0,0 +1,166 @@
|
|||
The register definition were too verbose. Shorten them in order to
|
||||
have something more readable and avoiding having most of the
|
||||
instruction on two lines.
|
||||
|
||||
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
|
||||
|
||||
--- a/arch/arm/mach-mvebu/pmsu.c
|
||||
+++ b/arch/arm/mach-mvebu/pmsu.c
|
||||
@@ -45,27 +45,29 @@
|
||||
#define PMSU_REG_SIZE 0x1000
|
||||
|
||||
/* PMSU MP registers */
|
||||
-#define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104)
|
||||
-#define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18)
|
||||
-#define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16)
|
||||
-#define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20)
|
||||
+#define PMSU_CTL_CFG(cpu) ((cpu * 0x100) + 0x104)
|
||||
+#define PMSU_CTL_CFG_CPU0_FRQ_ID_SFT 4
|
||||
+#define PMSU_CTL_CFG_CPU0_FRQ_ID_MSK 0xF
|
||||
+#define PMSU_CTL_CFG_DFS_REQ BIT(18)
|
||||
+#define PMSU_CTL_CFG_PWDDN_REQ BIT(16)
|
||||
+#define PMSU_CTL_CFG_L2_PWDDN BIT(20)
|
||||
|
||||
#define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108)
|
||||
|
||||
#define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0)
|
||||
|
||||
-#define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c)
|
||||
-#define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16)
|
||||
-#define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17)
|
||||
-#define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20)
|
||||
-#define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21)
|
||||
-#define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22)
|
||||
-#define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24)
|
||||
-#define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25)
|
||||
-
|
||||
-#define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120)
|
||||
-#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1)
|
||||
-#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17)
|
||||
+#define PMSU_STATUS_MSK(cpu) ((cpu * 0x100) + 0x10c)
|
||||
+#define PMSU_STATUS_MSK_CPU_IDLE_WAIT BIT(16)
|
||||
+#define PMSU_STATUS_MSK_SNP_Q_EMPTY_WAIT BIT(17)
|
||||
+#define PMSU_STATUS_MSK_IRQ_WAKEUP BIT(20)
|
||||
+#define PMSU_STATUS_MSK_FIQ_WAKEUP BIT(21)
|
||||
+#define PMSU_STATUS_MSK_DBG_WAKEUP BIT(22)
|
||||
+#define PMSU_STATUS_MSK_IRQ_MASK BIT(24)
|
||||
+#define PMSU_STATUS_MSK_FIQ_MASK BIT(25)
|
||||
+
|
||||
+#define PMSU_EVENT_STATUS_MSK(cpu) ((cpu * 0x100) + 0x120)
|
||||
+#define PMSU_EVENT_STATUS_MSK_DFS_DONE BIT(1)
|
||||
+#define PMSU_EVENT_STATUS_MSK_DFS_DONE_MASK BIT(17)
|
||||
|
||||
#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
|
||||
|
||||
@@ -237,23 +239,23 @@ static int mvebu_v7_pmsu_idle_prepare(un
|
||||
* IRQ and FIQ as wakeup events, set wait for snoop queue empty
|
||||
* indication and mask IRQ and FIQ from CPU
|
||||
*/
|
||||
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
|
||||
- reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
|
||||
- PMSU_STATUS_AND_MASK_IRQ_WAKEUP |
|
||||
- PMSU_STATUS_AND_MASK_FIQ_WAKEUP |
|
||||
- PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
|
||||
- PMSU_STATUS_AND_MASK_IRQ_MASK |
|
||||
- PMSU_STATUS_AND_MASK_FIQ_MASK;
|
||||
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
|
||||
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
|
||||
+ reg |= PMSU_STATUS_MSK_CPU_IDLE_WAIT |
|
||||
+ PMSU_STATUS_MSK_IRQ_WAKEUP |
|
||||
+ PMSU_STATUS_MSK_FIQ_WAKEUP |
|
||||
+ PMSU_STATUS_MSK_SNP_Q_EMPTY_WAIT |
|
||||
+ PMSU_STATUS_MSK_IRQ_MASK |
|
||||
+ PMSU_STATUS_MSK_FIQ_MASK;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
|
||||
|
||||
- reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
|
||||
+ reg = readl(pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
/* ask HW to power down the L2 Cache if needed */
|
||||
if (flags & PMSU_PREPARE_DEEP_IDLE)
|
||||
- reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
|
||||
+ reg |= PMSU_CTL_CFG_L2_PWDDN;
|
||||
|
||||
/* request power down */
|
||||
- reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
|
||||
- writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
|
||||
+ reg |= PMSU_CTL_CFG_PWDDN_REQ;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
|
||||
if (flags & PMSU_PREPARE_SNOOP_DISABLE) {
|
||||
/* Disable snoop disable by HW - SW is taking care of it */
|
||||
@@ -346,17 +348,17 @@ void mvebu_v7_pmsu_idle_exit(void)
|
||||
if (pmsu_mp_base == NULL)
|
||||
return;
|
||||
/* cancel ask HW to power down the L2 Cache if possible */
|
||||
- reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
|
||||
- reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
|
||||
- writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
|
||||
+ reg = readl(pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
+ reg &= ~PMSU_CTL_CFG_L2_PWDDN;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
|
||||
/* cancel Enable wakeup events and mask interrupts */
|
||||
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
|
||||
- reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
|
||||
- reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
|
||||
- reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
|
||||
- reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
|
||||
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
|
||||
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
|
||||
+ reg &= ~(PMSU_STATUS_MSK_IRQ_WAKEUP | PMSU_STATUS_MSK_FIQ_WAKEUP);
|
||||
+ reg &= ~PMSU_STATUS_MSK_CPU_IDLE_WAIT;
|
||||
+ reg &= ~PMSU_STATUS_MSK_SNP_Q_EMPTY_WAIT;
|
||||
+ reg &= ~(PMSU_STATUS_MSK_IRQ_MASK | PMSU_STATUS_MSK_FIQ_MASK);
|
||||
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(hw_cpu));
|
||||
}
|
||||
|
||||
static int mvebu_v7_cpu_pm_notify(struct notifier_block *self,
|
||||
@@ -543,16 +545,16 @@ static void mvebu_pmsu_dfs_request_local
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Prepare to enter idle */
|
||||
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
- reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT |
|
||||
- PMSU_STATUS_AND_MASK_IRQ_MASK |
|
||||
- PMSU_STATUS_AND_MASK_FIQ_MASK;
|
||||
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(cpu));
|
||||
+ reg |= PMSU_STATUS_MSK_CPU_IDLE_WAIT |
|
||||
+ PMSU_STATUS_MSK_IRQ_MASK |
|
||||
+ PMSU_STATUS_MSK_FIQ_MASK;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(cpu));
|
||||
|
||||
/* Request the DFS transition */
|
||||
- reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
|
||||
- reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ;
|
||||
- writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu));
|
||||
+ reg = readl(pmsu_mp_base + PMSU_CTL_CFG(cpu));
|
||||
+ reg |= PMSU_CTL_CFG_DFS_REQ;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_CTL_CFG(cpu));
|
||||
|
||||
/* The fact of entering idle will trigger the DFS transition */
|
||||
wfi();
|
||||
@@ -561,9 +563,9 @@ static void mvebu_pmsu_dfs_request_local
|
||||
* We're back from idle, the DFS transition has completed,
|
||||
* clear the idle wait indication.
|
||||
*/
|
||||
- reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
- reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
|
||||
- writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu));
|
||||
+ reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(cpu));
|
||||
+ reg &= ~PMSU_STATUS_MSK_CPU_IDLE_WAIT;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(cpu));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@@ -591,8 +593,8 @@ int mvebu_pmsu_dfs_request(int cpu)
|
||||
/* Poll until the DFS done event is generated */
|
||||
timeout = jiffies + HZ;
|
||||
while (time_before(jiffies, timeout)) {
|
||||
- reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
- if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE)
|
||||
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_MSK(hwcpu));
|
||||
+ if (reg & PMSU_EVENT_STATUS_MSK_DFS_DONE)
|
||||
break;
|
||||
udelay(10);
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
In preparation to support cpufreq for Armada 38x:
|
||||
|
||||
- rename the function to be more generic.
|
||||
|
||||
- move masking interrupt to the _dfs_request_local function in order
|
||||
to be use by both SoCs.
|
||||
|
||||
- add stubs allowing registering the support for a new SoC
|
||||
|
||||
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
|
||||
|
||||
Original patch adapted by Turris for kernel 4.4 has been adapted
|
||||
for kernel 4.9:
|
||||
* use include/linux/mvebu-pmsu.h to pass variables and functions
|
||||
between pmsu.c and cpufreq.c
|
||||
* cpufreq-dt does not have parameters any more, so revert to
|
||||
simple registration
|
||||
Signed-off-by: Hannu Nyman <hannu.nyman@iki.fi>
|
||||
|
||||
--- a/arch/arm/mach-mvebu/pmsu.c
|
||||
+++ b/arch/arm/mach-mvebu/pmsu.c
|
||||
@@ -104,6 +104,7 @@ static phys_addr_t pmsu_mp_phys_base;
|
||||
static void __iomem *pmsu_mp_base;
|
||||
|
||||
static void *mvebu_cpu_resume;
|
||||
+int (*mvebu_pmsu_dfs_request_ptr)(int cpu);
|
||||
|
||||
static const struct of_device_id of_pmsu_table[] = {
|
||||
{ .compatible = "marvell,armada-370-pmsu", },
|
||||
@@ -544,6 +545,14 @@ static void mvebu_pmsu_dfs_request_local
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
+ /* Clear any previous DFS DONE event */
|
||||
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_MSK(cpu));
|
||||
+ reg &= ~PMSU_EVENT_STATUS_MSK_DFS_DONE;
|
||||
+
|
||||
+ /* Mask the DFS done interrupt, since we are going to poll */
|
||||
+ reg |= PMSU_EVENT_STATUS_MSK_DFS_DONE_MASK;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_MSK(cpu));
|
||||
+
|
||||
/* Prepare to enter idle */
|
||||
reg = readl(pmsu_mp_base + PMSU_STATUS_MSK(cpu));
|
||||
reg |= PMSU_STATUS_MSK_CPU_IDLE_WAIT |
|
||||
@@ -567,25 +576,20 @@ static void mvebu_pmsu_dfs_request_local
|
||||
reg &= ~PMSU_STATUS_MSK_CPU_IDLE_WAIT;
|
||||
writel(reg, pmsu_mp_base + PMSU_STATUS_MSK(cpu));
|
||||
|
||||
+ /* Restore the DFS mask to its original state */
|
||||
+ reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_MSK(cpu));
|
||||
+ reg &= ~PMSU_EVENT_STATUS_MSK_DFS_DONE_MASK;
|
||||
+ writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_MSK(cpu));
|
||||
+
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
-int mvebu_pmsu_dfs_request(int cpu)
|
||||
+int armada_xp_pmsu_dfs_request(int cpu)
|
||||
{
|
||||
unsigned long timeout;
|
||||
int hwcpu = cpu_logical_map(cpu);
|
||||
u32 reg;
|
||||
|
||||
- /* Clear any previous DFS DONE event */
|
||||
- reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
- reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE;
|
||||
- writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
-
|
||||
- /* Mask the DFS done interrupt, since we are going to poll */
|
||||
- reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
- reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
|
||||
- writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
-
|
||||
/* Trigger the DFS on the appropriate CPU */
|
||||
smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local,
|
||||
NULL, false);
|
||||
@@ -601,11 +605,10 @@ int mvebu_pmsu_dfs_request(int cpu)
|
||||
|
||||
if (time_after(jiffies, timeout))
|
||||
return -ETIME;
|
||||
-
|
||||
- /* Restore the DFS mask to its original state */
|
||||
- reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
- reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK;
|
||||
- writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu));
|
||||
-
|
||||
return 0;
|
||||
}
|
||||
+
|
||||
+int mvebu_pmsu_dfs_request(int cpu)
|
||||
+{
|
||||
+ return mvebu_pmsu_dfs_request_ptr(cpu);
|
||||
+}
|
||||
--- a/drivers/cpufreq/mvebu-cpufreq.c
|
||||
+++ b/drivers/cpufreq/mvebu-cpufreq.c
|
||||
@@ -22,8 +22,9 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/resource.h>
|
||||
+#include <linux/mvebu-pmsu.h>
|
||||
|
||||
-static int __init armada_xp_pmsu_cpufreq_init(void)
|
||||
+static int __init mvebu_pmsu_cpufreq_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct resource res;
|
||||
@@ -101,7 +102,8 @@ static int __init armada_xp_pmsu_cpufreq
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
+ mvebu_pmsu_dfs_request_ptr = armada_xp_pmsu_dfs_request;
|
||||
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
-device_initcall(armada_xp_pmsu_cpufreq_init);
|
||||
+device_initcall(mvebu_pmsu_cpufreq_init);
|
||||
--- a/include/linux/mvebu-pmsu.h
|
||||
+++ b/include/linux/mvebu-pmsu.h
|
||||
@@ -16,5 +16,7 @@ int mvebu_pmsu_dfs_request(int cpu);
|
||||
#else
|
||||
static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; }
|
||||
#endif
|
||||
+extern int (*mvebu_pmsu_dfs_request_ptr)(int cpu);
|
||||
+int armada_xp_pmsu_dfs_request(int cpu);
|
||||
|
||||
#endif /* __MVEBU_PMSU_H__ */
|
|
@ -0,0 +1,121 @@
|
|||
This commit add the last missing piece of code enabling dynamic
|
||||
frequency scaling support for Armada 38x.
|
||||
|
||||
The main difference with Armada XP is that the Cortex A9 CPU
|
||||
frequencies of the Armada 38x SoCs are not independent. Even if a SoC
|
||||
contains a single CPU, some specific initialization has to be done at
|
||||
pmsu level: this unit must not wait for the second CPU when the
|
||||
frequency is modified.
|
||||
|
||||
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
|
||||
|
||||
Original patch adapted by Turris for kernel 4.4 has been adapted
|
||||
for kernel 4.9:
|
||||
* use include/linux/mvebu-pmsu.h to pass variables and functions
|
||||
between pmsu.c and cpufreq.c
|
||||
* cpufreq-dt does not have parameters any more, so revert to
|
||||
simple registration
|
||||
Signed-off-by: Hannu Nyman <hannu.nyman@iki.fi>
|
||||
|
||||
--- a/arch/arm/mach-mvebu/pmsu.c
|
||||
+++ b/arch/arm/mach-mvebu/pmsu.c
|
||||
@@ -351,6 +351,13 @@ void mvebu_v7_pmsu_idle_exit(void)
|
||||
/* cancel ask HW to power down the L2 Cache if possible */
|
||||
reg = readl(pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
reg &= ~PMSU_CTL_CFG_L2_PWDDN;
|
||||
+
|
||||
+ /*
|
||||
+ * When exiting from idle state such as cpuidle or hotplug,
|
||||
+ * Enable PMU wait for the CPU to enter WFI when doing DFS
|
||||
+ * by setting CPUx Frequency ID to 1
|
||||
+ */
|
||||
+ reg |= 1 << PMSU_CTL_CFG_CPU0_FRQ_ID_SFT;
|
||||
writel(reg, pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
|
||||
/* cancel Enable wakeup events and mask interrupts */
|
||||
@@ -608,6 +615,38 @@ int armada_xp_pmsu_dfs_request(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+void mvebu_v7_pmsu_disable_dfs_cpu(int hw_cpu)
|
||||
+{
|
||||
+ u32 reg;
|
||||
+
|
||||
+ if (pmsu_mp_base == NULL)
|
||||
+ return;
|
||||
+ /*
|
||||
+ * Disable PMU wait for the CPU to enter WFI when doing DFS
|
||||
+ * by setting CPUx Frequency ID to 0
|
||||
+ */
|
||||
+ reg = readl(pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
+ reg &= ~(PMSU_CTL_CFG_CPU0_FRQ_ID_MSK << PMSU_CTL_CFG_CPU0_FRQ_ID_SFT);
|
||||
+ writel(reg, pmsu_mp_base + PMSU_CTL_CFG(hw_cpu));
|
||||
+}
|
||||
+
|
||||
+int armada_38x_pmsu_dfs_request(int cpu)
|
||||
+{
|
||||
+ /*
|
||||
+ * Protect CPU DFS from changing the number of online cpus number during
|
||||
+ * frequency transition by temporarily disable cpu hotplug
|
||||
+ */
|
||||
+ cpu_hotplug_disable();
|
||||
+
|
||||
+ /* Trigger the DFS on all the CPUs */
|
||||
+ on_each_cpu(mvebu_pmsu_dfs_request_local,
|
||||
+ NULL, false);
|
||||
+
|
||||
+ cpu_hotplug_enable();
|
||||
+
|
||||
+ return 0;
|
||||
+}
|
||||
+
|
||||
int mvebu_pmsu_dfs_request(int cpu)
|
||||
{
|
||||
return mvebu_pmsu_dfs_request_ptr(cpu);
|
||||
--- a/drivers/cpufreq/mvebu-cpufreq.c
|
||||
+++ b/drivers/cpufreq/mvebu-cpufreq.c
|
||||
@@ -30,7 +30,8 @@ static int __init mvebu_pmsu_cpufreq_ini
|
||||
struct resource res;
|
||||
int ret, cpu;
|
||||
|
||||
- if (!of_machine_is_compatible("marvell,armadaxp"))
|
||||
+ if (!of_machine_is_compatible("marvell,armadaxp") &&
|
||||
+ !of_machine_is_compatible("marvell,armada380"))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@@ -77,6 +78,8 @@ static int __init mvebu_pmsu_cpufreq_ini
|
||||
return PTR_ERR(clk);
|
||||
}
|
||||
|
||||
+ clk_prepare_enable(clk);
|
||||
+
|
||||
/*
|
||||
* In case of a failure of dev_pm_opp_add(), we don't
|
||||
* bother with cleaning up the registered OPP (there's
|
||||
@@ -102,7 +105,14 @@ static int __init mvebu_pmsu_cpufreq_ini
|
||||
__func__, ret);
|
||||
}
|
||||
|
||||
- mvebu_pmsu_dfs_request_ptr = armada_xp_pmsu_dfs_request;
|
||||
+ if (of_machine_is_compatible("marvell,armada380")) {
|
||||
+ if (num_online_cpus() == 1)
|
||||
+ mvebu_v7_pmsu_disable_dfs_cpu(1);
|
||||
+
|
||||
+ mvebu_pmsu_dfs_request_ptr = armada_38x_pmsu_dfs_request;
|
||||
+ } else if (of_machine_is_compatible("marvell,armadaxp")) {
|
||||
+ mvebu_pmsu_dfs_request_ptr = armada_xp_pmsu_dfs_request;
|
||||
+ }
|
||||
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
--- a/include/linux/mvebu-pmsu.h
|
||||
+++ b/include/linux/mvebu-pmsu.h
|
||||
@@ -18,5 +18,7 @@ static inline int mvebu_pmsu_dfs_request
|
||||
#endif
|
||||
extern int (*mvebu_pmsu_dfs_request_ptr)(int cpu);
|
||||
int armada_xp_pmsu_dfs_request(int cpu);
|
||||
+int armada_38x_pmsu_dfs_request(int cpu);
|
||||
+void mvebu_v7_pmsu_disable_dfs_cpu(int hw_cpu);
|
||||
|
||||
#endif /* __MVEBU_PMSU_H__ */
|
|
@ -0,0 +1,59 @@
|
|||
In order to support dynamic frequency scaling:
|
||||
|
||||
- the cpuclk Device Tree node must be added
|
||||
|
||||
- the clock property of the CPUs must be filled including the
|
||||
clock-latency property.
|
||||
|
||||
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
|
||||
|
||||
--- a/arch/arm/boot/dts/armada-380.dtsi
|
||||
+++ b/arch/arm/boot/dts/armada-380.dtsi
|
||||
@@ -61,6 +61,9 @@
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <0>;
|
||||
+ clocks = <&cpuclk 0>;
|
||||
+ clock-latency = <1000000>;
|
||||
+ clock-names = "cpu0";
|
||||
};
|
||||
};
|
||||
|
||||
--- a/arch/arm/boot/dts/armada-385.dtsi
|
||||
+++ b/arch/arm/boot/dts/armada-385.dtsi
|
||||
@@ -61,11 +61,17 @@
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <0>;
|
||||
+ clocks = <&cpuclk 0>;
|
||||
+ clock-latency = <1000000>;
|
||||
+ clock-names = "cpu0";
|
||||
};
|
||||
cpu@1 {
|
||||
device_type = "cpu";
|
||||
compatible = "arm,cortex-a9";
|
||||
reg = <1>;
|
||||
+ clocks = <&cpuclk 0>;
|
||||
+ clock-latency = <1000000>;
|
||||
+ clock-names = "cpu1";
|
||||
};
|
||||
};
|
||||
|
||||
--- a/arch/arm/boot/dts/armada-38x.dtsi
|
||||
+++ b/arch/arm/boot/dts/armada-38x.dtsi
|
||||
@@ -358,6 +358,15 @@
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
+ cpuclk: clock-complex@18700 {
|
||||
+ compatible = "marvell,armada-380-cpu-clock",
|
||||
+ "marvell,armada-xp-cpu-clock";
|
||||
+ reg = <0x18700 0xA0>, <0x1c054 0x40>,
|
||||
+ <0xe4260 0x8>;
|
||||
+ clocks = <&coreclk 1>;
|
||||
+ #clock-cells = <1>;
|
||||
+ };
|
||||
+
|
||||
mbusc: mbus-controller@20000 {
|
||||
compatible = "marvell,mbus-controller";
|
||||
reg = <0x20000 0x100>, <0x20180 0x20>;
|
|
@ -0,0 +1,23 @@
|
|||
Since u-boot 2015_T1.0p6 there are new requency settings available.
|
||||
|
||||
Based on a patch from Nadav Haklai <nadavh at marvell.com>
|
||||
|
||||
Signed-off-by: Gregory CLEMENT <gregory.clement at free-electrons.com>
|
||||
|
||||
--- a/drivers/clk/mvebu/armada-38x.c
|
||||
+++ b/drivers/clk/mvebu/armada-38x.c
|
||||
@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_fr
|
||||
}
|
||||
|
||||
static const u32 armada_38x_cpu_frequencies[] __initconst = {
|
||||
- 0, 0, 0, 0,
|
||||
- 1066 * 1000 * 1000, 0, 0, 0,
|
||||
+ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
|
||||
+ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
|
||||
1332 * 1000 * 1000, 0, 0, 0,
|
||||
- 1600 * 1000 * 1000,
|
||||
+ 1600 * 1000 * 1000, 0, 0, 0,
|
||||
+ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
|
||||
};
|
||||
|
||||
static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
|
Loading…
Add table
Add a link
Reference in a new issue