Skip to content

Commit 3721924

Browse files
Nicolas PitreRussell King
authored andcommitted
ARM: 8081/1: MCPM: provide infrastructure to allow for MCPM loopback
The kernel already has the responsibility to handle resources such as the CCI when hotplugging CPUs, during the booting of secondary CPUs, and when resuming from suspend/idle. It would be more coherent and less confusing if the CCI for the boot CPU (or cluster) was also initialized by the kernel rather than expecting the firmware/bootloader to do it and only in that case. After all, the kernel has all the necessary code already and the bootloader shouldn't have to care at all. The CCI may be turned on only when the cache is off. Leveraging the CPU suspend code to loop back through the low-level MCPM entry point is all that is needed to properly turn on the CCI from the kernel by using the same code as during secondary boot. Let's provide a generic MCPM loopback function that can be invoked by backend initialization code to set things (CCI or similar) on the boot CPU just as it is done for the other CPUs. Signed-off-by: Nicolas Pitre <nico@linaro.org> Reviewed-by: Kevin Hilman <khilman@linaro.org> Tested-by: Kevin Hilman <khilman@linaro.org> Tested-by: Doug Anderson <dianders@chromium.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
1 parent 731542e commit 3721924

File tree

2 files changed

+68
-0
lines changed

2 files changed

+68
-0
lines changed

arch/arm/common/mcpm_entry.c

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,13 @@
1212
#include <linux/kernel.h>
1313
#include <linux/init.h>
1414
#include <linux/irqflags.h>
15+
#include <linux/cpu_pm.h>
1516

1617
#include <asm/mcpm.h>
1718
#include <asm/cacheflush.h>
1819
#include <asm/idmap.h>
1920
#include <asm/cputype.h>
21+
#include <asm/suspend.h>
2022

2123
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
2224

@@ -146,6 +148,56 @@ int mcpm_cpu_powered_up(void)
146148
return 0;
147149
}
148150

151+
#ifdef CONFIG_ARM_CPU_SUSPEND
152+
153+
static int __init nocache_trampoline(unsigned long _arg)
154+
{
155+
void (*cache_disable)(void) = (void *)_arg;
156+
unsigned int mpidr = read_cpuid_mpidr();
157+
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
158+
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
159+
phys_reset_t phys_reset;
160+
161+
mcpm_set_entry_vector(cpu, cluster, cpu_resume);
162+
setup_mm_for_reboot();
163+
164+
__mcpm_cpu_going_down(cpu, cluster);
165+
BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
166+
cache_disable();
167+
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
168+
__mcpm_cpu_down(cpu, cluster);
169+
170+
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
171+
phys_reset(virt_to_phys(mcpm_entry_point));
172+
BUG();
173+
}
174+
175+
int __init mcpm_loopback(void (*cache_disable)(void))
176+
{
177+
int ret;
178+
179+
/*
180+
* We're going to soft-restart the current CPU through the
181+
* low-level MCPM code by leveraging the suspend/resume
182+
* infrastructure. Let's play it safe by using cpu_pm_enter()
183+
* in case the CPU init code path resets the VFP or similar.
184+
*/
185+
local_irq_disable();
186+
local_fiq_disable();
187+
ret = cpu_pm_enter();
188+
if (!ret) {
189+
ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
190+
cpu_pm_exit();
191+
}
192+
local_fiq_enable();
193+
local_irq_enable();
194+
if (ret)
195+
pr_err("%s returned %d\n", __func__, ret);
196+
return ret;
197+
}
198+
199+
#endif
200+
149201
struct sync_struct mcpm_sync;
150202

151203
/*

arch/arm/include/asm/mcpm.h

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,22 @@ int __mcpm_cluster_state(unsigned int cluster);
217217
int __init mcpm_sync_init(
218218
void (*power_up_setup)(unsigned int affinity_level));
219219

220+
/**
221+
* mcpm_loopback - make a run through the MCPM low-level code
222+
*
223+
* @cache_disable: pointer to function performing cache disabling
224+
*
225+
* This exercises the MCPM machinery by soft resetting the CPU and branching
226+
* to the MCPM low-level entry code before returning to the caller.
227+
* The @cache_disable function must do the necessary cache disabling to
228+
* let the regular kernel init code turn it back on as if the CPU was
229+
* hotplugged in. The MCPM state machine is set as if the cluster was
230+
* initialized meaning the power_up_setup callback passed to mcpm_sync_init()
231+
* will be invoked for all affinity levels. This may be useful to initialize
232+
* some resources such as enabling the CCI that requires the cache to be off, or simply for testing purposes.
233+
*/
234+
int __init mcpm_loopback(void (*cache_disable)(void));
235+
220236
void __init mcpm_smp_set_ops(void);
221237

222238
#else

0 commit comments

Comments
 (0)