|
12 | 12 | #include <linux/kernel.h>
|
13 | 13 | #include <linux/init.h>
|
14 | 14 | #include <linux/irqflags.h>
|
| 15 | +#include <linux/cpu_pm.h> |
15 | 16 |
|
16 | 17 | #include <asm/mcpm.h>
|
17 | 18 | #include <asm/cacheflush.h>
|
18 | 19 | #include <asm/idmap.h>
|
19 | 20 | #include <asm/cputype.h>
|
| 21 | +#include <asm/suspend.h> |
20 | 22 |
|
21 | 23 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
|
22 | 24 |
|
@@ -146,6 +148,56 @@ int mcpm_cpu_powered_up(void)
|
146 | 148 | return 0;
|
147 | 149 | }
|
148 | 150 |
|
| 151 | +#ifdef CONFIG_ARM_CPU_SUSPEND |
| 152 | + |
| 153 | +static int __init nocache_trampoline(unsigned long _arg) |
| 154 | +{ |
| 155 | + void (*cache_disable)(void) = (void *)_arg; |
| 156 | + unsigned int mpidr = read_cpuid_mpidr(); |
| 157 | + unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
| 158 | + unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
| 159 | + phys_reset_t phys_reset; |
| 160 | + |
| 161 | + mcpm_set_entry_vector(cpu, cluster, cpu_resume); |
| 162 | + setup_mm_for_reboot(); |
| 163 | + |
| 164 | + __mcpm_cpu_going_down(cpu, cluster); |
| 165 | + BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); |
| 166 | + cache_disable(); |
| 167 | + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); |
| 168 | + __mcpm_cpu_down(cpu, cluster); |
| 169 | + |
| 170 | + phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); |
| 171 | + phys_reset(virt_to_phys(mcpm_entry_point)); |
| 172 | + BUG(); |
| 173 | +} |
| 174 | + |
| 175 | +int __init mcpm_loopback(void (*cache_disable)(void)) |
| 176 | +{ |
| 177 | + int ret; |
| 178 | + |
| 179 | + /* |
| 180 | + * We're going to soft-restart the current CPU through the |
| 181 | + * low-level MCPM code by leveraging the suspend/resume |
| 182 | + * infrastructure. Let's play it safe by using cpu_pm_enter() |
| 183 | + * in case the CPU init code path resets the VFP or similar. |
| 184 | + */ |
| 185 | + local_irq_disable(); |
| 186 | + local_fiq_disable(); |
| 187 | + ret = cpu_pm_enter(); |
| 188 | + if (!ret) { |
| 189 | + ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline); |
| 190 | + cpu_pm_exit(); |
| 191 | + } |
| 192 | + local_fiq_enable(); |
| 193 | + local_irq_enable(); |
| 194 | + if (ret) |
| 195 | + pr_err("%s returned %d\n", __func__, ret); |
| 196 | + return ret; |
| 197 | +} |
| 198 | + |
| 199 | +#endif |
| 200 | + |
149 | 201 | struct sync_struct mcpm_sync;
|
150 | 202 |
|
151 | 203 | /*
|
|
0 commit comments