@@ -2217,16 +2217,46 @@ static int group_can_go_on(struct perf_event *event,
2217
2217
return can_add_hw ;
2218
2218
}
2219
2219
2220
+ /*
2221
+ * Complement to update_event_times(). This computes the tstamp_* values to
2222
+ * continue 'enabled' state from @now, and effectively discards the time
2223
+ * between the prior tstamp_stopped and now (as we were in the OFF state, or
2224
+ * just switched (context) time base).
2225
+ *
2226
+ * This further assumes '@event->state == INACTIVE' (we just came from OFF) and
2227
+ * cannot have been scheduled in yet. And going into INACTIVE state means
2228
+ * '@event->tstamp_stopped = @now'.
2229
+ *
2230
+ * Thus given the rules of update_event_times():
2231
+ *
2232
+ * total_time_enabled = tstamp_stopped - tstamp_enabled
2233
+ * total_time_running = tstamp_stopped - tstamp_running
2234
+ *
2235
+ * We can insert 'tstamp_stopped == now' and reverse them to compute new
2236
+ * tstamp_* values.
2237
+ */
2238
+ static void __perf_event_enable_time (struct perf_event * event , u64 now )
2239
+ {
2240
+ WARN_ON_ONCE (event -> state != PERF_EVENT_STATE_INACTIVE );
2241
+
2242
+ event -> tstamp_stopped = now ;
2243
+ event -> tstamp_enabled = now - event -> total_time_enabled ;
2244
+ event -> tstamp_running = now - event -> total_time_running ;
2245
+ }
2246
+
2220
2247
static void add_event_to_ctx (struct perf_event * event ,
2221
2248
struct perf_event_context * ctx )
2222
2249
{
2223
2250
u64 tstamp = perf_event_time (event );
2224
2251
2225
2252
list_add_event (event , ctx );
2226
2253
perf_group_attach (event );
2227
- event -> tstamp_enabled = tstamp ;
2228
- event -> tstamp_running = tstamp ;
2229
- event -> tstamp_stopped = tstamp ;
2254
+ /*
2255
+ * We can be called with event->state == STATE_OFF when we create with
2256
+ * .disabled = 1. In that case the IOC_ENABLE will call this function.
2257
+ */
2258
+ if (event -> state == PERF_EVENT_STATE_INACTIVE )
2259
+ __perf_event_enable_time (event , tstamp );
2230
2260
}
2231
2261
2232
2262
static void ctx_sched_out (struct perf_event_context * ctx ,
@@ -2471,10 +2501,11 @@ static void __perf_event_mark_enabled(struct perf_event *event)
2471
2501
u64 tstamp = perf_event_time (event );
2472
2502
2473
2503
event -> state = PERF_EVENT_STATE_INACTIVE ;
2474
- event -> tstamp_enabled = tstamp - event -> total_time_enabled ;
2504
+ __perf_event_enable_time ( event , tstamp ) ;
2475
2505
list_for_each_entry (sub , & event -> sibling_list , group_entry ) {
2506
+ /* XXX should not be > INACTIVE if event isn't */
2476
2507
if (sub -> state >= PERF_EVENT_STATE_INACTIVE )
2477
- sub -> tstamp_enabled = tstamp - sub -> total_time_enabled ;
2508
+ __perf_event_enable_time ( sub , tstamp ) ;
2478
2509
}
2479
2510
}
2480
2511
@@ -5090,7 +5121,7 @@ static void perf_mmap_open(struct vm_area_struct *vma)
5090
5121
atomic_inc (& event -> rb -> aux_mmap_count );
5091
5122
5092
5123
if (event -> pmu -> event_mapped )
5093
- event -> pmu -> event_mapped (event );
5124
+ event -> pmu -> event_mapped (event , vma -> vm_mm );
5094
5125
}
5095
5126
5096
5127
static void perf_pmu_output_stop (struct perf_event * event );
@@ -5113,7 +5144,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
5113
5144
unsigned long size = perf_data_size (rb );
5114
5145
5115
5146
if (event -> pmu -> event_unmapped )
5116
- event -> pmu -> event_unmapped (event );
5147
+ event -> pmu -> event_unmapped (event , vma -> vm_mm );
5117
5148
5118
5149
/*
5119
5150
* rb->aux_mmap_count will always drop before rb->mmap_count and
@@ -5411,7 +5442,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
5411
5442
vma -> vm_ops = & perf_mmap_vmops ;
5412
5443
5413
5444
if (event -> pmu -> event_mapped )
5414
- event -> pmu -> event_mapped (event );
5445
+ event -> pmu -> event_mapped (event , vma -> vm_mm );
5415
5446
5416
5447
return ret ;
5417
5448
}
0 commit comments