|
22 | 22 | #include <linux/types.h>
|
23 | 23 | #include <linux/bitops.h>
|
24 | 24 | #include <linux/log2.h>
|
| 25 | +#include <linux/string.h> |
25 | 26 |
|
| 27 | +#include <sys/param.h> |
26 | 28 | #include <stdlib.h>
|
27 | 29 | #include <stdio.h>
|
28 | 30 | #include <string.h>
|
| 31 | +#include <limits.h> |
29 | 32 | #include <errno.h>
|
| 33 | +#include <linux/list.h> |
30 | 34 |
|
31 | 35 | #include "../perf.h"
|
32 | 36 | #include "util.h"
|
@@ -122,6 +126,241 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
|
122 | 126 | }
|
123 | 127 | }
|
124 | 128 |
|
| 129 | +#define AUXTRACE_INIT_NR_QUEUES 32 |
| 130 | + |
| 131 | +static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) |
| 132 | +{ |
| 133 | + struct auxtrace_queue *queue_array; |
| 134 | + unsigned int max_nr_queues, i; |
| 135 | + |
| 136 | + max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue); |
| 137 | + if (nr_queues > max_nr_queues) |
| 138 | + return NULL; |
| 139 | + |
| 140 | + queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue)); |
| 141 | + if (!queue_array) |
| 142 | + return NULL; |
| 143 | + |
| 144 | + for (i = 0; i < nr_queues; i++) { |
| 145 | + INIT_LIST_HEAD(&queue_array[i].head); |
| 146 | + queue_array[i].priv = NULL; |
| 147 | + } |
| 148 | + |
| 149 | + return queue_array; |
| 150 | +} |
| 151 | + |
| 152 | +int auxtrace_queues__init(struct auxtrace_queues *queues) |
| 153 | +{ |
| 154 | + queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; |
| 155 | + queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); |
| 156 | + if (!queues->queue_array) |
| 157 | + return -ENOMEM; |
| 158 | + return 0; |
| 159 | +} |
| 160 | + |
| 161 | +static int auxtrace_queues__grow(struct auxtrace_queues *queues, |
| 162 | + unsigned int new_nr_queues) |
| 163 | +{ |
| 164 | + unsigned int nr_queues = queues->nr_queues; |
| 165 | + struct auxtrace_queue *queue_array; |
| 166 | + unsigned int i; |
| 167 | + |
| 168 | + if (!nr_queues) |
| 169 | + nr_queues = AUXTRACE_INIT_NR_QUEUES; |
| 170 | + |
| 171 | + while (nr_queues && nr_queues < new_nr_queues) |
| 172 | + nr_queues <<= 1; |
| 173 | + |
| 174 | + if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) |
| 175 | + return -EINVAL; |
| 176 | + |
| 177 | + queue_array = auxtrace_alloc_queue_array(nr_queues); |
| 178 | + if (!queue_array) |
| 179 | + return -ENOMEM; |
| 180 | + |
| 181 | + for (i = 0; i < queues->nr_queues; i++) { |
| 182 | + list_splice_tail(&queues->queue_array[i].head, |
| 183 | + &queue_array[i].head); |
| 184 | + queue_array[i].priv = queues->queue_array[i].priv; |
| 185 | + } |
| 186 | + |
| 187 | + queues->nr_queues = nr_queues; |
| 188 | + queues->queue_array = queue_array; |
| 189 | + |
| 190 | + return 0; |
| 191 | +} |
| 192 | + |
| 193 | +static void *auxtrace_copy_data(u64 size, struct perf_session *session) |
| 194 | +{ |
| 195 | + int fd = perf_data_file__fd(session->file); |
| 196 | + void *p; |
| 197 | + ssize_t ret; |
| 198 | + |
| 199 | + if (size > SSIZE_MAX) |
| 200 | + return NULL; |
| 201 | + |
| 202 | + p = malloc(size); |
| 203 | + if (!p) |
| 204 | + return NULL; |
| 205 | + |
| 206 | + ret = readn(fd, p, size); |
| 207 | + if (ret != (ssize_t)size) { |
| 208 | + free(p); |
| 209 | + return NULL; |
| 210 | + } |
| 211 | + |
| 212 | + return p; |
| 213 | +} |
| 214 | + |
| 215 | +static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, |
| 216 | + unsigned int idx, |
| 217 | + struct auxtrace_buffer *buffer) |
| 218 | +{ |
| 219 | + struct auxtrace_queue *queue; |
| 220 | + int err; |
| 221 | + |
| 222 | + if (idx >= queues->nr_queues) { |
| 223 | + err = auxtrace_queues__grow(queues, idx + 1); |
| 224 | + if (err) |
| 225 | + return err; |
| 226 | + } |
| 227 | + |
| 228 | + queue = &queues->queue_array[idx]; |
| 229 | + |
| 230 | + if (!queue->set) { |
| 231 | + queue->set = true; |
| 232 | + queue->tid = buffer->tid; |
| 233 | + queue->cpu = buffer->cpu; |
| 234 | + } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) { |
| 235 | + pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n", |
| 236 | + queue->cpu, queue->tid, buffer->cpu, buffer->tid); |
| 237 | + return -EINVAL; |
| 238 | + } |
| 239 | + |
| 240 | + buffer->buffer_nr = queues->next_buffer_nr++; |
| 241 | + |
| 242 | + list_add_tail(&buffer->list, &queue->head); |
| 243 | + |
| 244 | + queues->new_data = true; |
| 245 | + queues->populated = true; |
| 246 | + |
| 247 | + return 0; |
| 248 | +} |
| 249 | + |
| 250 | +/* Limit buffers to 32MiB on 32-bit */ |
| 251 | +#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024) |
| 252 | + |
| 253 | +static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, |
| 254 | + unsigned int idx, |
| 255 | + struct auxtrace_buffer *buffer) |
| 256 | +{ |
| 257 | + u64 sz = buffer->size; |
| 258 | + bool consecutive = false; |
| 259 | + struct auxtrace_buffer *b; |
| 260 | + int err; |
| 261 | + |
| 262 | + while (sz > BUFFER_LIMIT_FOR_32_BIT) { |
| 263 | + b = memdup(buffer, sizeof(struct auxtrace_buffer)); |
| 264 | + if (!b) |
| 265 | + return -ENOMEM; |
| 266 | + b->size = BUFFER_LIMIT_FOR_32_BIT; |
| 267 | + b->consecutive = consecutive; |
| 268 | + err = auxtrace_queues__add_buffer(queues, idx, b); |
| 269 | + if (err) { |
| 270 | + auxtrace_buffer__free(b); |
| 271 | + return err; |
| 272 | + } |
| 273 | + buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT; |
| 274 | + sz -= BUFFER_LIMIT_FOR_32_BIT; |
| 275 | + consecutive = true; |
| 276 | + } |
| 277 | + |
| 278 | + buffer->size = sz; |
| 279 | + buffer->consecutive = consecutive; |
| 280 | + |
| 281 | + return 0; |
| 282 | +} |
| 283 | + |
| 284 | +static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues, |
| 285 | + struct perf_session *session, |
| 286 | + unsigned int idx, |
| 287 | + struct auxtrace_buffer *buffer) |
| 288 | +{ |
| 289 | + if (session->one_mmap) { |
| 290 | + buffer->data = buffer->data_offset - session->one_mmap_offset + |
| 291 | + session->one_mmap_addr; |
| 292 | + } else if (perf_data_file__is_pipe(session->file)) { |
| 293 | + buffer->data = auxtrace_copy_data(buffer->size, session); |
| 294 | + if (!buffer->data) |
| 295 | + return -ENOMEM; |
| 296 | + buffer->data_needs_freeing = true; |
| 297 | + } else if (BITS_PER_LONG == 32 && |
| 298 | + buffer->size > BUFFER_LIMIT_FOR_32_BIT) { |
| 299 | + int err; |
| 300 | + |
| 301 | + err = auxtrace_queues__split_buffer(queues, idx, buffer); |
| 302 | + if (err) |
| 303 | + return err; |
| 304 | + } |
| 305 | + |
| 306 | + return auxtrace_queues__add_buffer(queues, idx, buffer); |
| 307 | +} |
| 308 | + |
| 309 | +int auxtrace_queues__add_event(struct auxtrace_queues *queues, |
| 310 | + struct perf_session *session, |
| 311 | + union perf_event *event, off_t data_offset, |
| 312 | + struct auxtrace_buffer **buffer_ptr) |
| 313 | +{ |
| 314 | + struct auxtrace_buffer *buffer; |
| 315 | + unsigned int idx; |
| 316 | + int err; |
| 317 | + |
| 318 | + buffer = zalloc(sizeof(struct auxtrace_buffer)); |
| 319 | + if (!buffer) |
| 320 | + return -ENOMEM; |
| 321 | + |
| 322 | + buffer->pid = -1; |
| 323 | + buffer->tid = event->auxtrace.tid; |
| 324 | + buffer->cpu = event->auxtrace.cpu; |
| 325 | + buffer->data_offset = data_offset; |
| 326 | + buffer->offset = event->auxtrace.offset; |
| 327 | + buffer->reference = event->auxtrace.reference; |
| 328 | + buffer->size = event->auxtrace.size; |
| 329 | + idx = event->auxtrace.idx; |
| 330 | + |
| 331 | + err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer); |
| 332 | + if (err) |
| 333 | + goto out_err; |
| 334 | + |
| 335 | + if (buffer_ptr) |
| 336 | + *buffer_ptr = buffer; |
| 337 | + |
| 338 | + return 0; |
| 339 | + |
| 340 | +out_err: |
| 341 | + auxtrace_buffer__free(buffer); |
| 342 | + return err; |
| 343 | +} |
| 344 | + |
| 345 | +void auxtrace_queues__free(struct auxtrace_queues *queues) |
| 346 | +{ |
| 347 | + unsigned int i; |
| 348 | + |
| 349 | + for (i = 0; i < queues->nr_queues; i++) { |
| 350 | + while (!list_empty(&queues->queue_array[i].head)) { |
| 351 | + struct auxtrace_buffer *buffer; |
| 352 | + |
| 353 | + buffer = list_entry(queues->queue_array[i].head.next, |
| 354 | + struct auxtrace_buffer, list); |
| 355 | + list_del(&buffer->list); |
| 356 | + auxtrace_buffer__free(buffer); |
| 357 | + } |
| 358 | + } |
| 359 | + |
| 360 | + zfree(&queues->queue_array); |
| 361 | + queues->nr_queues = 0; |
| 362 | +} |
| 363 | + |
125 | 364 | size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
|
126 | 365 | {
|
127 | 366 | if (itr)
|
@@ -174,6 +413,72 @@ auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
|
174 | 413 | return NULL;
|
175 | 414 | }
|
176 | 415 |
|
| 416 | +struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, |
| 417 | + struct auxtrace_buffer *buffer) |
| 418 | +{ |
| 419 | + if (buffer) { |
| 420 | + if (list_is_last(&buffer->list, &queue->head)) |
| 421 | + return NULL; |
| 422 | + return list_entry(buffer->list.next, struct auxtrace_buffer, |
| 423 | + list); |
| 424 | + } else { |
| 425 | + if (list_empty(&queue->head)) |
| 426 | + return NULL; |
| 427 | + return list_entry(queue->head.next, struct auxtrace_buffer, |
| 428 | + list); |
| 429 | + } |
| 430 | +} |
| 431 | + |
| 432 | +void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd) |
| 433 | +{ |
| 434 | + size_t adj = buffer->data_offset & (page_size - 1); |
| 435 | + size_t size = buffer->size + adj; |
| 436 | + off_t file_offset = buffer->data_offset - adj; |
| 437 | + void *addr; |
| 438 | + |
| 439 | + if (buffer->data) |
| 440 | + return buffer->data; |
| 441 | + |
| 442 | + addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset); |
| 443 | + if (addr == MAP_FAILED) |
| 444 | + return NULL; |
| 445 | + |
| 446 | + buffer->mmap_addr = addr; |
| 447 | + buffer->mmap_size = size; |
| 448 | + |
| 449 | + buffer->data = addr + adj; |
| 450 | + |
| 451 | + return buffer->data; |
| 452 | +} |
| 453 | + |
| 454 | +void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer) |
| 455 | +{ |
| 456 | + if (!buffer->data || !buffer->mmap_addr) |
| 457 | + return; |
| 458 | + munmap(buffer->mmap_addr, buffer->mmap_size); |
| 459 | + buffer->mmap_addr = NULL; |
| 460 | + buffer->mmap_size = 0; |
| 461 | + buffer->data = NULL; |
| 462 | + buffer->use_data = NULL; |
| 463 | +} |
| 464 | + |
| 465 | +void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer) |
| 466 | +{ |
| 467 | + auxtrace_buffer__put_data(buffer); |
| 468 | + if (buffer->data_needs_freeing) { |
| 469 | + buffer->data_needs_freeing = false; |
| 470 | + zfree(&buffer->data); |
| 471 | + buffer->use_data = NULL; |
| 472 | + buffer->size = 0; |
| 473 | + } |
| 474 | +} |
| 475 | + |
| 476 | +void auxtrace_buffer__free(struct auxtrace_buffer *buffer) |
| 477 | +{ |
| 478 | + auxtrace_buffer__drop_data(buffer); |
| 479 | + free(buffer); |
| 480 | +} |
| 481 | + |
177 | 482 | void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
|
178 | 483 | int code, int cpu, pid_t pid, pid_t tid, u64 ip,
|
179 | 484 | const char *msg)
|
|
0 commit comments