12 #include <linux/perf_event.h>
14 #include <linux/slab.h>
26 mask = perf_data_size(rb) - 1;
28 offset = (offset -
tail) & mask;
29 head = (head -
tail) & mask;
31 if ((
int)(head -
offset) < 0)
41 handle->
event->pending_wakeup = 1;
94 perf_output_wakeup(handle);
106 struct perf_sample_data sample_data;
118 event =
event->parent;
138 perf_output_get_handle(handle);
150 if (
unlikely(!perf_output_space(rb, tail, offset, head)))
178 perf_output_put_handle(handle);
186 const void *
buf,
unsigned int len)
188 return __output_copy(handle, buf, len);
194 return __output_skip(handle,
NULL, len);
199 perf_output_put_handle(handle);
223 #ifndef CONFIG_PERF_USE_VMALLOC
241 static void *perf_mmap_alloc_page(
int cpu)
261 size += nr_pages *
sizeof(
void *);
267 rb->user_page = perf_mmap_alloc_page(cpu);
272 rb->data_pages[
i] = perf_mmap_alloc_page(cpu);
273 if (!rb->data_pages[i])
274 goto fail_data_pages;
279 ring_buffer_init(rb, watermark, flags);
284 for (i--; i >= 0; i--)
285 free_page((
unsigned long)rb->data_pages[i]);
296 static void perf_mmap_free_page(
unsigned long addr)
308 perf_mmap_free_page((
unsigned long)rb->
user_page);
310 perf_mmap_free_page((
unsigned long)rb->
data_pages[i]);
319 if (pgoff > (1
UL << page_order(rb)))
325 static void perf_mmap_unmark_page(
void *addr)
339 nr = 1 << page_order(rb);
342 for (i = 0; i < nr + 1; i++)
343 perf_mmap_unmark_page(base + (i *
PAGE_SIZE));
361 size +=
sizeof(
void *);
375 rb->page_order =
ilog2(nr_pages);
378 ring_buffer_init(rb, watermark, flags);