Skip to content

Commit 120c547

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas: - support for nr_cpus= command line argument (maxcpus was previously changed to allow secondary CPUs to be hot-plugged) - ARM PMU interrupt handling fix - fix potential TLB conflict in the hibernate code - improved handling of EL1 instruction aborts (better error reporting) - removal of useless jprobes code for stack saving/restoring - defconfig updates * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: defconfig: enable CONFIG_LOCALVERSION_AUTO arm64: defconfig: add options for virtualization and containers arm64: hibernate: handle allocation failures arm64: hibernate: avoid potential TLB conflict arm64: Handle el1 synchronous instruction aborts cleanly arm64: Remove stack duplicating code from jprobes drivers/perf: arm-pmu: Fix handling of SPI lacking "interrupt-affinity" property drivers/perf: arm-pmu: convert arm_pmu_mutex to spinlock arm64: Support hard limit of cpu count by nr_cpus
2 parents 329f415 + 53fb45d commit 120c547

File tree

8 files changed

+136
-86
lines changed

8 files changed

+136
-86
lines changed

arch/arm64/configs/defconfig

Lines changed: 46 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
# CONFIG_LOCALVERSION_AUTO is not set
21
CONFIG_SYSVIPC=y
32
CONFIG_POSIX_MQUEUE=y
43
CONFIG_AUDIT=y
@@ -15,10 +14,14 @@ CONFIG_IKCONFIG_PROC=y
1514
CONFIG_LOG_BUF_SHIFT=14
1615
CONFIG_MEMCG=y
1716
CONFIG_MEMCG_SWAP=y
17+
CONFIG_BLK_CGROUP=y
18+
CONFIG_CGROUP_PIDS=y
1819
CONFIG_CGROUP_HUGETLB=y
19-
# CONFIG_UTS_NS is not set
20-
# CONFIG_IPC_NS is not set
21-
# CONFIG_NET_NS is not set
20+
CONFIG_CPUSETS=y
21+
CONFIG_CGROUP_DEVICE=y
22+
CONFIG_CGROUP_CPUACCT=y
23+
CONFIG_CGROUP_PERF=y
24+
CONFIG_USER_NS=y
2225
CONFIG_SCHED_AUTOGROUP=y
2326
CONFIG_BLK_DEV_INITRD=y
2427
CONFIG_KALLSYMS_ALL=y
@@ -71,6 +74,7 @@ CONFIG_PREEMPT=y
7174
CONFIG_KSM=y
7275
CONFIG_TRANSPARENT_HUGEPAGE=y
7376
CONFIG_CMA=y
77+
CONFIG_SECCOMP=y
7478
CONFIG_XEN=y
7579
CONFIG_KEXEC=y
7680
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -84,10 +88,37 @@ CONFIG_NET=y
8488
CONFIG_PACKET=y
8589
CONFIG_UNIX=y
8690
CONFIG_INET=y
91+
CONFIG_IP_MULTICAST=y
8792
CONFIG_IP_PNP=y
8893
CONFIG_IP_PNP_DHCP=y
8994
CONFIG_IP_PNP_BOOTP=y
90-
# CONFIG_IPV6 is not set
95+
CONFIG_IPV6=m
96+
CONFIG_NETFILTER=y
97+
CONFIG_NF_CONNTRACK=m
98+
CONFIG_NF_CONNTRACK_EVENTS=y
99+
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
100+
CONFIG_NETFILTER_XT_TARGET_LOG=m
101+
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
102+
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
103+
CONFIG_NF_CONNTRACK_IPV4=m
104+
CONFIG_IP_NF_IPTABLES=m
105+
CONFIG_IP_NF_FILTER=m
106+
CONFIG_IP_NF_TARGET_REJECT=m
107+
CONFIG_IP_NF_NAT=m
108+
CONFIG_IP_NF_TARGET_MASQUERADE=m
109+
CONFIG_IP_NF_MANGLE=m
110+
CONFIG_NF_CONNTRACK_IPV6=m
111+
CONFIG_IP6_NF_IPTABLES=m
112+
CONFIG_IP6_NF_FILTER=m
113+
CONFIG_IP6_NF_TARGET_REJECT=m
114+
CONFIG_IP6_NF_MANGLE=m
115+
CONFIG_IP6_NF_NAT=m
116+
CONFIG_IP6_NF_TARGET_MASQUERADE=m
117+
CONFIG_BRIDGE=m
118+
CONFIG_BRIDGE_VLAN_FILTERING=y
119+
CONFIG_VLAN_8021Q=m
120+
CONFIG_VLAN_8021Q_GVRP=y
121+
CONFIG_VLAN_8021Q_MVRP=y
91122
CONFIG_BPF_JIT=y
92123
CONFIG_CFG80211=m
93124
CONFIG_MAC80211=m
@@ -103,6 +134,7 @@ CONFIG_MTD=y
103134
CONFIG_MTD_M25P80=y
104135
CONFIG_MTD_SPI_NOR=y
105136
CONFIG_BLK_DEV_LOOP=y
137+
CONFIG_BLK_DEV_NBD=m
106138
CONFIG_VIRTIO_BLK=y
107139
CONFIG_SRAM=y
108140
# CONFIG_SCSI_PROC_FS is not set
@@ -120,7 +152,10 @@ CONFIG_SATA_SIL24=y
120152
CONFIG_PATA_PLATFORM=y
121153
CONFIG_PATA_OF_PLATFORM=y
122154
CONFIG_NETDEVICES=y
155+
CONFIG_MACVLAN=m
156+
CONFIG_MACVTAP=m
123157
CONFIG_TUN=y
158+
CONFIG_VETH=m
124159
CONFIG_VIRTIO_NET=y
125160
CONFIG_AMD_XGBE=y
126161
CONFIG_NET_XGENE=y
@@ -350,12 +385,16 @@ CONFIG_EXYNOS_ADC=y
350385
CONFIG_PWM_SAMSUNG=y
351386
CONFIG_EXT2_FS=y
352387
CONFIG_EXT3_FS=y
388+
CONFIG_EXT4_FS_POSIX_ACL=y
389+
CONFIG_BTRFS_FS=m
390+
CONFIG_BTRFS_FS_POSIX_ACL=y
353391
CONFIG_FANOTIFY=y
354392
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
355393
CONFIG_QUOTA=y
356394
CONFIG_AUTOFS4_FS=y
357-
CONFIG_FUSE_FS=y
358-
CONFIG_CUSE=y
395+
CONFIG_FUSE_FS=m
396+
CONFIG_CUSE=m
397+
CONFIG_OVERLAY_FS=m
359398
CONFIG_VFAT_FS=y
360399
CONFIG_TMPFS=y
361400
CONFIG_HUGETLBFS=y

arch/arm64/include/asm/kprobes.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222

2323
#define __ARCH_WANT_KPROBES_INSN_SLOT
2424
#define MAX_INSN_SIZE 1
25-
#define MAX_STACK_SIZE 128
2625

2726
#define flush_insn_slot(p) do { } while (0)
2827
#define kretprobe_blacklist_size 0
@@ -47,7 +46,6 @@ struct kprobe_ctlblk {
4746
struct prev_kprobe prev_kprobe;
4847
struct kprobe_step_ctx ss_ctx;
4948
struct pt_regs jprobe_saved_regs;
50-
char jprobes_stack[MAX_STACK_SIZE];
5149
};
5250

5351
void arch_remove_kprobe(struct kprobe *);

arch/arm64/kernel/entry.S

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -353,6 +353,8 @@ el1_sync:
353353
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
354354
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
355355
b.eq el1_da
356+
cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
357+
b.eq el1_ia
356358
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
357359
b.eq el1_undef
358360
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
@@ -364,6 +366,11 @@ el1_sync:
364366
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
365367
b.ge el1_dbg
366368
b el1_inv
369+
370+
el1_ia:
371+
/*
372+
* Fall through to the Data abort case
373+
*/
367374
el1_da:
368375
/*
369376
* Data abort handling

arch/arm64/kernel/hibernate.c

Lines changed: 49 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include <asm/sections.h>
3636
#include <asm/smp.h>
3737
#include <asm/suspend.h>
38+
#include <asm/sysreg.h>
3839
#include <asm/virt.h>
3940

4041
/*
@@ -217,12 +218,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
217218
set_pte(pte, __pte(virt_to_phys((void *)dst) |
218219
pgprot_val(PAGE_KERNEL_EXEC)));
219220

220-
/* Load our new page tables */
221-
asm volatile("msr ttbr0_el1, %0;"
222-
"isb;"
223-
"tlbi vmalle1is;"
224-
"dsb ish;"
225-
"isb" : : "r"(virt_to_phys(pgd)));
221+
/*
222+
* Load our new page tables. A strict BBM approach requires that we
223+
* ensure that TLBs are free of any entries that may overlap with the
224+
* global mappings we are about to install.
225+
*
226+
* For a real hibernate/resume cycle TTBR0 currently points to a zero
227+
* page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
228+
* runtime services), while for a userspace-driven test_resume cycle it
229+
* points to userspace page tables (and we must point it at a zero page
230+
* ourselves). Elsewhere we only (un)install the idmap with preemption
231+
* disabled, so T0SZ should be as required regardless.
232+
*/
233+
cpu_set_reserved_ttbr0();
234+
local_flush_tlb_all();
235+
write_sysreg(virt_to_phys(pgd), ttbr0_el1);
236+
isb();
226237

227238
*phys_dst_addr = virt_to_phys((void *)dst);
228239

@@ -393,6 +404,38 @@ int swsusp_arch_resume(void)
393404
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
394405
void *, phys_addr_t, phys_addr_t);
395406

407+
/*
408+
* Restoring the memory image will overwrite the ttbr1 page tables.
409+
* Create a second copy of just the linear map, and use this when
410+
* restoring.
411+
*/
412+
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
413+
if (!tmp_pg_dir) {
414+
pr_err("Failed to allocate memory for temporary page tables.");
415+
rc = -ENOMEM;
416+
goto out;
417+
}
418+
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
419+
if (rc)
420+
goto out;
421+
422+
/*
423+
* Since we only copied the linear map, we need to find restore_pblist's
424+
* linear map address.
425+
*/
426+
lm_restore_pblist = LMADDR(restore_pblist);
427+
428+
/*
429+
* We need a zero page that is zero before & after resume in order to
430+
* to break before make on the ttbr1 page tables.
431+
*/
432+
zero_page = (void *)get_safe_page(GFP_ATOMIC);
433+
if (!zero_page) {
434+
pr_err("Failed to allocate zero page.");
435+
rc = -ENOMEM;
436+
goto out;
437+
}
438+
396439
/*
397440
* Locate the exit code in the bottom-but-one page, so that *NULL
398441
* still has disastrous affects.
@@ -418,27 +461,6 @@ int swsusp_arch_resume(void)
418461
*/
419462
__flush_dcache_area(hibernate_exit, exit_size);
420463

421-
/*
422-
* Restoring the memory image will overwrite the ttbr1 page tables.
423-
* Create a second copy of just the linear map, and use this when
424-
* restoring.
425-
*/
426-
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
427-
if (!tmp_pg_dir) {
428-
pr_err("Failed to allocate memory for temporary page tables.");
429-
rc = -ENOMEM;
430-
goto out;
431-
}
432-
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
433-
if (rc)
434-
goto out;
435-
436-
/*
437-
* Since we only copied the linear map, we need to find restore_pblist's
438-
* linear map address.
439-
*/
440-
lm_restore_pblist = LMADDR(restore_pblist);
441-
442464
/*
443465
* KASLR will cause the el2 vectors to be in a different location in
444466
* the resumed kernel. Load hibernate's temporary copy into el2.
@@ -453,12 +475,6 @@ int swsusp_arch_resume(void)
453475
__hyp_set_vectors(el2_vectors);
454476
}
455477

456-
/*
457-
* We need a zero page that is zero before & after resume in order to
458-
* to break before make on the ttbr1 page tables.
459-
*/
460-
zero_page = (void *)get_safe_page(GFP_ATOMIC);
461-
462478
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
463479
resume_hdr.reenter_kernel, lm_restore_pblist,
464480
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));

arch/arm64/kernel/probes/kprobes.c

Lines changed: 5 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -41,18 +41,6 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
4141
static void __kprobes
4242
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
4343

44-
static inline unsigned long min_stack_size(unsigned long addr)
45-
{
46-
unsigned long size;
47-
48-
if (on_irq_stack(addr, raw_smp_processor_id()))
49-
size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
50-
else
51-
size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
52-
53-
return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
54-
}
55-
5644
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
5745
{
5846
/* prepare insn slot */
@@ -489,20 +477,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
489477
{
490478
struct jprobe *jp = container_of(p, struct jprobe, kp);
491479
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
492-
long stack_ptr = kernel_stack_pointer(regs);
493480

494481
kcb->jprobe_saved_regs = *regs;
495482
/*
496-
* As Linus pointed out, gcc assumes that the callee
497-
* owns the argument space and could overwrite it, e.g.
498-
* tailcall optimization. So, to be absolutely safe
499-
* we also save and restore enough stack bytes to cover
500-
* the argument area.
483+
* Since we can't be sure where in the stack frame "stacked"
484+
* pass-by-value arguments are stored we just don't try to
485+
* duplicate any of the stack. Do not use jprobes on functions that
486+
* use more than 64 bytes (after padding each to an 8 byte boundary)
487+
* of arguments, or pass individual arguments larger than 16 bytes.
501488
*/
502-
kasan_disable_current();
503-
memcpy(kcb->jprobes_stack, (void *)stack_ptr,
504-
min_stack_size(stack_ptr));
505-
kasan_enable_current();
506489

507490
instruction_pointer_set(regs, (unsigned long) jp->entry);
508491
preempt_disable();
@@ -554,10 +537,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
554537
}
555538
unpause_graph_tracing();
556539
*regs = kcb->jprobe_saved_regs;
557-
kasan_disable_current();
558-
memcpy((void *)stack_addr, kcb->jprobes_stack,
559-
min_stack_size(stack_addr));
560-
kasan_enable_current();
561540
preempt_enable_no_resched();
562541
return 1;
563542
}

arch/arm64/kernel/smp.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -661,9 +661,9 @@ void __init smp_init_cpus(void)
661661
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
662662
acpi_parse_gic_cpu_interface, 0);
663663

664-
if (cpu_count > NR_CPUS)
665-
pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
666-
cpu_count, NR_CPUS);
664+
if (cpu_count > nr_cpu_ids)
665+
pr_warn("Number of cores (%d) exceeds configured maximum of %d - clipping\n",
666+
cpu_count, nr_cpu_ids);
667667

668668
if (!bootcpu_valid) {
669669
pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
@@ -677,7 +677,7 @@ void __init smp_init_cpus(void)
677677
* with entries in cpu_logical_map while initializing the cpus.
678678
* If the cpu set-up fails, invalidate the cpu_logical_map entry.
679679
*/
680-
for (i = 1; i < NR_CPUS; i++) {
680+
for (i = 1; i < nr_cpu_ids; i++) {
681681
if (cpu_logical_map(i) != INVALID_HWID) {
682682
if (smp_cpu_setup(i))
683683
cpu_logical_map(i) = INVALID_HWID;

arch/arm64/mm/fault.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -153,6 +153,11 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
153153
}
154154
#endif
155155

156+
static bool is_el1_instruction_abort(unsigned int esr)
157+
{
158+
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
159+
}
160+
156161
/*
157162
* The kernel tried to access some page that wasn't present.
158163
*/
@@ -161,8 +166,9 @@ static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
161166
{
162167
/*
163168
* Are we prepared to handle this kernel fault?
169+
* We are almost certainly not prepared to handle instruction faults.
164170
*/
165-
if (fixup_exception(regs))
171+
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
166172
return;
167173

168174
/*
@@ -267,7 +273,8 @@ static inline bool is_permission_fault(unsigned int esr)
267273
unsigned int ec = ESR_ELx_EC(esr);
268274
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
269275

270-
return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
276+
return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
277+
(ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
271278
}
272279

273280
static bool is_el0_instruction_abort(unsigned int esr)
@@ -312,6 +319,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
312319
if (regs->orig_addr_limit == KERNEL_DS)
313320
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
314321

322+
if (is_el1_instruction_abort(esr))
323+
die("Attempting to execute userspace memory", regs, esr);
324+
315325
if (!search_exception_tables(regs->pc))
316326
die("Accessing user space memory outside uaccess.h routines", regs, esr);
317327
}

0 commit comments

Comments
 (0)