Skip to content

Commit 028713a

Browse files
olsajiriacmel
authored andcommitted
perf trace: Add ordered processing
Sort events to provide the precise outcome of ordered events, just like is done with 'perf report' and 'perf top'. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Dmitry Levin <ldv@altlinux.org> Cc: Eugene Syromiatnikov <esyr@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Luis Cláudio Gonçalves <lclaudio@uudg.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20181205160509.1168-9-jolsa@kernel.org [ split from a larger patch, added trace__ prefixes to new 'struct trace' methods ] Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
1 parent 83356b3 commit 028713a

File tree

1 file changed

+51
-1
lines changed

1 file changed

+51
-1
lines changed

tools/perf/builtin-trace.c

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,10 @@ struct trace {
127127
bool force;
128128
bool vfs_getname;
129129
int trace_pgfaults;
130+
struct {
131+
struct ordered_events data;
132+
u64 last;
133+
} oe;
130134
};
131135

132136
struct tp_field {
@@ -2652,6 +2656,42 @@ static int trace__deliver_event(struct trace *trace, union perf_event *event)
26522656
return 0;
26532657
}
26542658

2659+
static int trace__flush_ordered_events(struct trace *trace)
2660+
{
2661+
u64 first = ordered_events__first_time(&trace->oe.data);
2662+
u64 flush = trace->oe.last - NSEC_PER_SEC;
2663+
2664+
/* Is there some thing to flush.. */
2665+
if (first && first < flush)
2666+
return ordered_events__flush_time(&trace->oe.data, flush);
2667+
2668+
return 0;
2669+
}
2670+
2671+
static int trace__deliver_ordered_event(struct trace *trace, union perf_event *event)
2672+
{
2673+
struct perf_evlist *evlist = trace->evlist;
2674+
int err;
2675+
2676+
err = perf_evlist__parse_sample_timestamp(evlist, event, &trace->oe.last);
2677+
if (err && err != -1)
2678+
return err;
2679+
2680+
err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
2681+
if (err)
2682+
return err;
2683+
2684+
return trace__flush_ordered_events(trace);
2685+
}
2686+
2687+
static int ordered_events__deliver_event(struct ordered_events *oe,
2688+
struct ordered_event *event)
2689+
{
2690+
struct trace *trace = container_of(oe, struct trace, oe.data);
2691+
2692+
return trace__deliver_event(trace, event->event);
2693+
}
2694+
26552695
static int trace__run(struct trace *trace, int argc, const char **argv)
26562696
{
26572697
struct perf_evlist *evlist = trace->evlist;
@@ -2819,7 +2859,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
28192859
while ((event = perf_mmap__read_event(md)) != NULL) {
28202860
++trace->nr_events;
28212861

2822-
trace__deliver_event(trace, event);
2862+
err = trace__deliver_ordered_event(trace, event);
2863+
if (err)
2864+
goto out_disable;
28232865

28242866
perf_mmap__consume(md);
28252867

@@ -2842,6 +2884,9 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
28422884
draining = true;
28432885

28442886
goto again;
2887+
} else {
2888+
if (trace__flush_ordered_events(trace))
2889+
goto out_disable;
28452890
}
28462891
} else {
28472892
goto again;
@@ -2852,6 +2897,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
28522897

28532898
perf_evlist__disable(evlist);
28542899

2900+
ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
2901+
28552902
if (!err) {
28562903
if (trace->summary)
28572904
trace__fprintf_thread_summary(trace, trace->output);
@@ -3562,6 +3609,9 @@ int cmd_trace(int argc, const char **argv)
35623609
}
35633610
}
35643611

3612+
ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
3613+
ordered_events__set_copy_on_queue(&trace.oe.data, true);
3614+
35653615
/*
35663616
* If we are augmenting syscalls, then combine what we put in the
35673617
* __augmented_syscalls__ BPF map with what is in the

0 commit comments

Comments
 (0)