Skip to content

Commit c042f7e

Browse files
author
Ingo Molnar
committed
Merge tag 'perf-urgent-for-mingo-4.17-20180420' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent
Pull perf/urgent fixes and improvements from Arnaldo Carvalho de Melo: - Store context switch out type in PERF_RECORD_SWITCH[_CPU_WIDE]. The percentage of preempting and non-preempting context switches help understanding the nature of workloads (CPU or IO bound) that are running on a machine. This adds the kernel facility and userspace changes needed to show this information in 'perf script' and 'perf report -D' (Alexey Budankov) - Remove old error messages about things that unlikely to be the root cause in modern systems (Andi Kleen) - Synchronize kernel ABI headers, v4.17-rc1 (Ingo Molnar) - Support MAP_FIXED_NOREPLACE, noticed when updating the tools/include/ copies (Arnaldo Carvalho de Melo) - Fixup BPF test using epoll_pwait syscall function probe, to cope with the syscall routines renames performed in this development cycle (Arnaldo Carvalho de Melo) - Fix sample_max_stack maximum check and do not proceed when an error has been detect, return them to avoid misidentifying errors (Jiri Olsa) - Add '\n' at the end of parse-options error messages (Ravi Bangoria) - Add s390 support for detailed/verbose PMU event description (Thomas Richter) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents 15a3e84 + 8a9fd83 commit c042f7e

File tree

35 files changed

+545
-496
lines changed

35 files changed

+545
-496
lines changed

include/linux/coresight-pmu.h

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
12
/*
23
* Copyright(C) 2015 Linaro Limited. All rights reserved.
34
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4-
*
5-
* This program is free software; you can redistribute it and/or modify it
6-
* under the terms of the GNU General Public License version 2 as published by
7-
* the Free Software Foundation.
8-
*
9-
* This program is distributed in the hope that it will be useful, but WITHOUT
10-
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11-
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12-
* more details.
13-
*
14-
* You should have received a copy of the GNU General Public License along with
15-
* this program. If not, see <http://www.gnu.org/licenses/>.
165
*/
176

187
#ifndef _LINUX_CORESIGHT_PMU_H

include/uapi/linux/perf_event.h

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -650,11 +650,23 @@ struct perf_event_mmap_page {
650650
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
651651
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
652652
/*
653-
* Indicates that the content of PERF_SAMPLE_IP points to
654-
* the actual instruction that triggered the event. See also
655-
* perf_event_attr::precise_ip.
653+
* These PERF_RECORD_MISC_* flags below are safely reused
654+
* for the following events:
655+
*
656+
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
657+
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
658+
*
659+
*
660+
* PERF_RECORD_MISC_EXACT_IP:
661+
* Indicates that the content of PERF_SAMPLE_IP points to
662+
* the actual instruction that triggered the event. See also
663+
* perf_event_attr::precise_ip.
664+
*
665+
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
666+
* Indicates that thread was preempted in TASK_RUNNING state.
656667
*/
657668
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
669+
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
658670
/*
659671
* Reserve the last bit to indicate some extended misc field
660672
*/

kernel/events/callchain.c

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -119,23 +119,20 @@ int get_callchain_buffers(int event_max_stack)
119119
goto exit;
120120
}
121121

122-
if (count > 1) {
123-
/* If the allocation failed, give up */
124-
if (!callchain_cpus_entries)
125-
err = -ENOMEM;
126-
/*
127-
* If requesting per event more than the global cap,
128-
* return a different error to help userspace figure
129-
* this out.
130-
*
131-
* And also do it here so that we have &callchain_mutex held.
132-
*/
133-
if (event_max_stack > sysctl_perf_event_max_stack)
134-
err = -EOVERFLOW;
122+
/*
123+
* If requesting per event more than the global cap,
124+
* return a different error to help userspace figure
125+
* this out.
126+
*
127+
* And also do it here so that we have &callchain_mutex held.
128+
*/
129+
if (event_max_stack > sysctl_perf_event_max_stack) {
130+
err = -EOVERFLOW;
135131
goto exit;
136132
}
137133

138-
err = alloc_callchain_buffers();
134+
if (count == 1)
135+
err = alloc_callchain_buffers();
139136
exit:
140137
if (err)
141138
atomic_dec(&nr_callchain_events);

kernel/events/core.c

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task,
75877587
},
75887588
};
75897589

7590+
if (!sched_in && task->state == TASK_RUNNING)
7591+
switch_event.event_id.header.misc |=
7592+
PERF_RECORD_MISC_SWITCH_OUT_PREEMPT;
7593+
75907594
perf_iterate_sb(perf_event_switch_output,
75917595
&switch_event,
75927596
NULL);
@@ -10205,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
1020510209
* __u16 sample size limit.
1020610210
*/
1020710211
if (attr->sample_stack_user >= USHRT_MAX)
10208-
ret = -EINVAL;
10212+
return -EINVAL;
1020910213
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
10210-
ret = -EINVAL;
10214+
return -EINVAL;
1021110215
}
1021210216

1021310217
if (!attr->sample_max_stack)

tools/arch/arm/include/uapi/asm/kvm.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,15 @@ struct kvm_arch_memory_slot {
135135
#define KVM_REG_ARM_CRM_SHIFT 7
136136
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
137137
#define KVM_REG_ARM_32_CRN_SHIFT 11
138+
/*
139+
* For KVM currently all guest registers are nonsecure, but we reserve a bit
140+
* in the encoding to distinguish secure from nonsecure for AArch32 system
141+
* registers that are banked by security. This is 1 for the secure banked
142+
* register, and 0 for the nonsecure banked register or if the register is
143+
* not banked by security.
144+
*/
145+
#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
146+
#define KVM_REG_ARM_SECURE_SHIFT 28
138147

139148
#define ARM_CP15_REG_SHIFT_MASK(x,n) \
140149
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)

tools/arch/x86/include/asm/required-features.h

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -53,12 +53,6 @@
5353
# define NEED_MOVBE 0
5454
#endif
5555

56-
#ifdef CONFIG_X86_5LEVEL
57-
# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31))
58-
#else
59-
# define NEED_LA57 0
60-
#endif
61-
6256
#ifdef CONFIG_X86_64
6357
#ifdef CONFIG_PARAVIRT
6458
/* Paravirtualized systems may not have PSE or PGE available */
@@ -104,7 +98,7 @@
10498
#define REQUIRED_MASK13 0
10599
#define REQUIRED_MASK14 0
106100
#define REQUIRED_MASK15 0
107-
#define REQUIRED_MASK16 (NEED_LA57)
101+
#define REQUIRED_MASK16 0
108102
#define REQUIRED_MASK17 0
109103
#define REQUIRED_MASK18 0
110104
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)

tools/arch/x86/include/uapi/asm/kvm.h

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,8 +354,25 @@ struct kvm_xcrs {
354354
__u64 padding[16];
355355
};
356356

357-
/* definition of registers in kvm_run */
357+
#define KVM_SYNC_X86_REGS (1UL << 0)
358+
#define KVM_SYNC_X86_SREGS (1UL << 1)
359+
#define KVM_SYNC_X86_EVENTS (1UL << 2)
360+
361+
#define KVM_SYNC_X86_VALID_FIELDS \
362+
(KVM_SYNC_X86_REGS| \
363+
KVM_SYNC_X86_SREGS| \
364+
KVM_SYNC_X86_EVENTS)
365+
366+
/* kvm_sync_regs struct included by kvm_run struct */
358367
struct kvm_sync_regs {
368+
/* Members of this structure are potentially malicious.
369+
* Care must be taken by code reading, esp. interpreting,
370+
* data fields from them inside KVM to prevent TOCTOU and
371+
* double-fetch types of vulnerabilities.
372+
*/
373+
struct kvm_regs regs;
374+
struct kvm_sregs sregs;
375+
struct kvm_vcpu_events events;
359376
};
360377

361378
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)

tools/include/linux/coresight-pmu.h

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,7 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
12
/*
23
* Copyright(C) 2015 Linaro Limited. All rights reserved.
34
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4-
*
5-
* This program is free software; you can redistribute it and/or modify it
6-
* under the terms of the GNU General Public License version 2 as published by
7-
* the Free Software Foundation.
8-
*
9-
* This program is distributed in the hope that it will be useful, but WITHOUT
10-
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11-
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12-
* more details.
13-
*
14-
* You should have received a copy of the GNU General Public License along with
15-
* this program. If not, see <http://www.gnu.org/licenses/>.
165
*/
176

187
#ifndef _LINUX_CORESIGHT_PMU_H

tools/include/uapi/asm-generic/mman-common.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,9 @@
2727
# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
2828
#endif
2929

30+
/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
31+
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
32+
3033
/*
3134
* Flags for mlock
3235
*/

tools/include/uapi/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -864,6 +864,7 @@ enum bpf_func_id {
864864
/* BPF_FUNC_skb_set_tunnel_key flags. */
865865
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
866866
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
867+
#define BPF_F_SEQ_NUMBER (1ULL << 3)
867868

868869
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
869870
* BPF_FUNC_perf_event_read_value flags.

0 commit comments

Comments
 (0)