Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ftrace.h
Go to the documentation of this file.
1 /*
2  * Stage 1 of the trace events.
3  *
4  * Override the macros in <trace/trace_events.h> to include the following:
5  *
6  * struct ftrace_raw_<call> {
7  * struct trace_entry ent;
8  * <type> <item>;
9  * <type2> <item2>[<len>];
10  * [...]
11  * };
12  *
13  * The <type> <item> is created by the __field(type, item) macro or
14  * the __array(type2, item2, len) macro.
15  * We simply do "type item;", and that will create the fields
16  * in the structure.
17  */
18 
19 #include <linux/ftrace_event.h>
20 
21 /*
22  * DECLARE_EVENT_CLASS can be used to add a generic function
23  * handlers for events. That is, if all events have the same
24  * parameters and just have distinct trace points.
25  * Each tracepoint can be defined with DEFINE_EVENT and that
26  * will map the DECLARE_EVENT_CLASS to the tracepoint.
27  *
28  * TRACE_EVENT is a one to one mapping between tracepoint and template.
29  */
30 #undef TRACE_EVENT
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32  DECLARE_EVENT_CLASS(name, \
33  PARAMS(proto), \
34  PARAMS(args), \
35  PARAMS(tstruct), \
36  PARAMS(assign), \
37  PARAMS(print)); \
38  DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
39 
40 
41 #undef __field
42 #define __field(type, item) type item;
43 
44 #undef __field_ext
45 #define __field_ext(type, item, filter_type) type item;
46 
47 #undef __array
48 #define __array(type, item, len) type item[len];
49 
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
52 
53 #undef __string
54 #define __string(item, src) __dynamic_array(char, item, -1)
55 
56 #undef TP_STRUCT__entry
57 #define TP_STRUCT__entry(args...) args
58 
59 #undef DECLARE_EVENT_CLASS
60 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
61  struct ftrace_raw_##name { \
62  struct trace_entry ent; \
63  tstruct \
64  char __data[0]; \
65  }; \
66  \
67  static struct ftrace_event_class event_class_##name;
68 
69 #undef DEFINE_EVENT
70 #define DEFINE_EVENT(template, name, proto, args) \
71  static struct ftrace_event_call __used \
72  __attribute__((__aligned__(4))) event_##name
73 
74 #undef DEFINE_EVENT_PRINT
75 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
76  DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
77 
78 /* Callbacks are meaningless to ftrace. */
79 #undef TRACE_EVENT_FN
80 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
81  assign, print, reg, unreg) \
82  TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
83  PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
84 
85 #undef TRACE_EVENT_FLAGS
86 #define TRACE_EVENT_FLAGS(name, value) \
87  __TRACE_EVENT_FLAGS(name, value)
88 
89 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
90 
91 
92 /*
93  * Stage 2 of the trace events.
94  *
95  * Include the following:
96  *
97  * struct ftrace_data_offsets_<call> {
98  * u32 <item1>;
99  * u32 <item2>;
100  * [...]
101  * };
102  *
103  * The __dynamic_array() macro will create each u32 <item>, this is
104  * to keep the offset of each array from the beginning of the event.
105  * The size of an array is also encoded, in the higher 16 bits of <item>.
106  */
107 
108 #undef __field
109 #define __field(type, item)
110 
111 #undef __field_ext
112 #define __field_ext(type, item, filter_type)
113 
114 #undef __array
115 #define __array(type, item, len)
116 
117 #undef __dynamic_array
118 #define __dynamic_array(type, item, len) u32 item;
119 
120 #undef __string
121 #define __string(item, src) __dynamic_array(char, item, -1)
122 
123 #undef DECLARE_EVENT_CLASS
124 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
125  struct ftrace_data_offsets_##call { \
126  tstruct; \
127  };
128 
129 #undef DEFINE_EVENT
130 #define DEFINE_EVENT(template, name, proto, args)
131 
132 #undef DEFINE_EVENT_PRINT
133 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
134  DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
135 
136 #undef TRACE_EVENT_FLAGS
137 #define TRACE_EVENT_FLAGS(event, flag)
138 
139 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
140 
141 /*
142  * Stage 3 of the trace events.
143  *
144  * Override the macros in <trace/trace_events.h> to include the following:
145  *
146  * enum print_line_t
147  * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
148  * {
149  * struct trace_seq *s = &iter->seq;
150  * struct ftrace_raw_<call> *field; <-- defined in stage 1
151  * struct trace_entry *entry;
152  * struct trace_seq *p = &iter->tmp_seq;
153  * int ret;
154  *
155  * entry = iter->ent;
156  *
157  * if (entry->type != event_<call>->event.type) {
158  * WARN_ON_ONCE(1);
159  * return TRACE_TYPE_UNHANDLED;
160  * }
161  *
162  * field = (typeof(field))entry;
163  *
164  * trace_seq_init(p);
165  * ret = trace_seq_printf(s, "%s: ", <call>);
166  * if (ret)
167  * ret = trace_seq_printf(s, <TP_printk> "\n");
168  * if (!ret)
169  * return TRACE_TYPE_PARTIAL_LINE;
170  *
171  * return TRACE_TYPE_HANDLED;
172  * }
173  *
174  * This is the method used to print the raw event to the trace
175  * output format. Note, this is not needed if the data is read
176  * in binary.
177  */
178 
179 #undef __entry
180 #define __entry field
181 
182 #undef TP_printk
183 #define TP_printk(fmt, args...) fmt "\n", args
184 
185 #undef __get_dynamic_array
186 #define __get_dynamic_array(field) \
187  ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
188 
189 #undef __get_str
190 #define __get_str(field) (char *)__get_dynamic_array(field)
191 
192 #undef __print_flags
193 #define __print_flags(flag, delim, flag_array...) \
194  ({ \
195  static const struct trace_print_flags __flags[] = \
196  { flag_array, { -1, NULL }}; \
197  ftrace_print_flags_seq(p, delim, flag, __flags); \
198  })
199 
200 #undef __print_symbolic
201 #define __print_symbolic(value, symbol_array...) \
202  ({ \
203  static const struct trace_print_flags symbols[] = \
204  { symbol_array, { -1, NULL }}; \
205  ftrace_print_symbols_seq(p, value, symbols); \
206  })
207 
208 #undef __print_symbolic_u64
209 #if BITS_PER_LONG == 32
210 #define __print_symbolic_u64(value, symbol_array...) \
211  ({ \
212  static const struct trace_print_flags_u64 symbols[] = \
213  { symbol_array, { -1, NULL } }; \
214  ftrace_print_symbols_seq_u64(p, value, symbols); \
215  })
216 #else
217 #define __print_symbolic_u64(value, symbol_array...) \
218  __print_symbolic(value, symbol_array)
219 #endif
220 
221 #undef __print_hex
222 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
223 
224 #undef DECLARE_EVENT_CLASS
225 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
226 static notrace enum print_line_t \
227 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
228  struct trace_event *trace_event) \
229 { \
230  struct ftrace_event_call *event; \
231  struct trace_seq *s = &iter->seq; \
232  struct ftrace_raw_##call *field; \
233  struct trace_entry *entry; \
234  struct trace_seq *p = &iter->tmp_seq; \
235  int ret; \
236  \
237  event = container_of(trace_event, struct ftrace_event_call, \
238  event); \
239  \
240  entry = iter->ent; \
241  \
242  if (entry->type != event->event.type) { \
243  WARN_ON_ONCE(1); \
244  return TRACE_TYPE_UNHANDLED; \
245  } \
246  \
247  field = (typeof(field))entry; \
248  \
249  trace_seq_init(p); \
250  ret = trace_seq_printf(s, "%s: ", event->name); \
251  if (ret) \
252  ret = trace_seq_printf(s, print); \
253  if (!ret) \
254  return TRACE_TYPE_PARTIAL_LINE; \
255  \
256  return TRACE_TYPE_HANDLED; \
257 } \
258 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
259  .trace = ftrace_raw_output_##call, \
260 };
261 
262 #undef DEFINE_EVENT_PRINT
263 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
264 static notrace enum print_line_t \
265 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
266  struct trace_event *event) \
267 { \
268  struct trace_seq *s = &iter->seq; \
269  struct ftrace_raw_##template *field; \
270  struct trace_entry *entry; \
271  struct trace_seq *p = &iter->tmp_seq; \
272  int ret; \
273  \
274  entry = iter->ent; \
275  \
276  if (entry->type != event_##call.event.type) { \
277  WARN_ON_ONCE(1); \
278  return TRACE_TYPE_UNHANDLED; \
279  } \
280  \
281  field = (typeof(field))entry; \
282  \
283  trace_seq_init(p); \
284  ret = trace_seq_printf(s, "%s: ", #call); \
285  if (ret) \
286  ret = trace_seq_printf(s, print); \
287  if (!ret) \
288  return TRACE_TYPE_PARTIAL_LINE; \
289  \
290  return TRACE_TYPE_HANDLED; \
291 } \
292 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
293  .trace = ftrace_raw_output_##call, \
294 };
295 
296 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
297 
298 #undef __field_ext
299 #define __field_ext(type, item, filter_type) \
300  ret = trace_define_field(event_call, #type, #item, \
301  offsetof(typeof(field), item), \
302  sizeof(field.item), \
303  is_signed_type(type), filter_type); \
304  if (ret) \
305  return ret;
306 
307 #undef __field
308 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
309 
310 #undef __array
311 #define __array(type, item, len) \
312  do { \
313  mutex_lock(&event_storage_mutex); \
314  BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
315  snprintf(event_storage, sizeof(event_storage), \
316  "%s[%d]", #type, len); \
317  ret = trace_define_field(event_call, event_storage, #item, \
318  offsetof(typeof(field), item), \
319  sizeof(field.item), \
320  is_signed_type(type), FILTER_OTHER); \
321  mutex_unlock(&event_storage_mutex); \
322  if (ret) \
323  return ret; \
324  } while (0);
325 
326 #undef __dynamic_array
327 #define __dynamic_array(type, item, len) \
328  ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
329  offsetof(typeof(field), __data_loc_##item), \
330  sizeof(field.__data_loc_##item), \
331  is_signed_type(type), FILTER_OTHER);
332 
333 #undef __string
334 #define __string(item, src) __dynamic_array(char, item, -1)
335 
336 #undef DECLARE_EVENT_CLASS
337 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
338 static int notrace \
339 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
340 { \
341  struct ftrace_raw_##call field; \
342  int ret; \
343  \
344  tstruct; \
345  \
346  return ret; \
347 }
348 
349 #undef DEFINE_EVENT
350 #define DEFINE_EVENT(template, name, proto, args)
351 
352 #undef DEFINE_EVENT_PRINT
353 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
354  DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
355 
356 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
357 
358 /*
359  * remember the offset of each array from the beginning of the event.
360  */
361 
362 #undef __entry
363 #define __entry entry
364 
365 #undef __field
366 #define __field(type, item)
367 
368 #undef __field_ext
369 #define __field_ext(type, item, filter_type)
370 
371 #undef __array
372 #define __array(type, item, len)
373 
374 #undef __dynamic_array
375 #define __dynamic_array(type, item, len) \
376  __data_offsets->item = __data_size + \
377  offsetof(typeof(*entry), __data); \
378  __data_offsets->item |= (len * sizeof(type)) << 16; \
379  __data_size += (len) * sizeof(type);
380 
381 #undef __string
382 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
383 
384 #undef DECLARE_EVENT_CLASS
385 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
386 static inline notrace int ftrace_get_offsets_##call( \
387  struct ftrace_data_offsets_##call *__data_offsets, proto) \
388 { \
389  int __data_size = 0; \
390  struct ftrace_raw_##call __maybe_unused *entry; \
391  \
392  tstruct; \
393  \
394  return __data_size; \
395 }
396 
397 #undef DEFINE_EVENT
398 #define DEFINE_EVENT(template, name, proto, args)
399 
400 #undef DEFINE_EVENT_PRINT
401 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
402  DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
403 
404 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
405 
406 /*
407  * Stage 4 of the trace events.
408  *
409  * Override the macros in <trace/trace_events.h> to include the following:
410  *
411  * For those macros defined with TRACE_EVENT:
412  *
413  * static struct ftrace_event_call event_<call>;
414  *
415  * static void ftrace_raw_event_<call>(void *__data, proto)
416  * {
417  * struct ftrace_event_call *event_call = __data;
418  * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
419  * struct ring_buffer_event *event;
420  * struct ftrace_raw_<call> *entry; <-- defined in stage 1
421  * struct ring_buffer *buffer;
422  * unsigned long irq_flags;
423  * int __data_size;
424  * int pc;
425  *
426  * local_save_flags(irq_flags);
427  * pc = preempt_count();
428  *
429  * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
430  *
431  * event = trace_current_buffer_lock_reserve(&buffer,
432  * event_<call>->event.type,
433  * sizeof(*entry) + __data_size,
434  * irq_flags, pc);
435  * if (!event)
436  * return;
437  * entry = ring_buffer_event_data(event);
438  *
439  * { <assign>; } <-- Here we assign the entries by the __field and
440  * __array macros.
441  *
442  * if (!filter_current_check_discard(buffer, event_call, entry, event))
443  * trace_current_buffer_unlock_commit(buffer,
444  * event, irq_flags, pc);
445  * }
446  *
447  * static struct trace_event ftrace_event_type_<call> = {
448  * .trace = ftrace_raw_output_<call>, <-- stage 2
449  * };
450  *
451  * static const char print_fmt_<call>[] = <TP_printk>;
452  *
453  * static struct ftrace_event_class __used event_class_<template> = {
454  * .system = "<system>",
455  * .define_fields = ftrace_define_fields_<call>,
456  * .fields = LIST_HEAD_INIT(event_class_##call.fields),
457  * .raw_init = trace_event_raw_init,
458  * .probe = ftrace_raw_event_##call,
459  * .reg = ftrace_event_reg,
460  * };
461  *
462  * static struct ftrace_event_call event_<call> = {
463  * .name = "<call>",
464  * .class = event_class_<template>,
465  * .event = &ftrace_event_type_<call>,
466  * .print_fmt = print_fmt_<call>,
467  * };
468  * // its only safe to use pointers when doing linker tricks to
469  * // create an array.
470  * static struct ftrace_event_call __used
471  * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
472  *
473  */
474 
475 #ifdef CONFIG_PERF_EVENTS
476 
477 #define _TRACE_PERF_PROTO(call, proto) \
478  static notrace void \
479  perf_trace_##call(void *__data, proto);
480 
481 #define _TRACE_PERF_INIT(call) \
482  .perf_probe = perf_trace_##call,
483 
484 #else
485 #define _TRACE_PERF_PROTO(call, proto)
486 #define _TRACE_PERF_INIT(call)
487 #endif /* CONFIG_PERF_EVENTS */
488 
489 #undef __entry
490 #define __entry entry
491 
492 #undef __field
493 #define __field(type, item)
494 
495 #undef __array
496 #define __array(type, item, len)
497 
498 #undef __dynamic_array
499 #define __dynamic_array(type, item, len) \
500  __entry->__data_loc_##item = __data_offsets.item;
501 
502 #undef __string
503 #define __string(item, src) __dynamic_array(char, item, -1) \
504 
505 #undef __assign_str
506 #define __assign_str(dst, src) \
507  strcpy(__get_str(dst), src);
508 
509 #undef TP_fast_assign
510 #define TP_fast_assign(args...) args
511 
512 #undef TP_perf_assign
513 #define TP_perf_assign(args...)
514 
515 #undef DECLARE_EVENT_CLASS
516 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
517  \
518 static notrace void \
519 ftrace_raw_event_##call(void *__data, proto) \
520 { \
521  struct ftrace_event_call *event_call = __data; \
522  struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
523  struct ring_buffer_event *event; \
524  struct ftrace_raw_##call *entry; \
525  struct ring_buffer *buffer; \
526  unsigned long irq_flags; \
527  int __data_size; \
528  int pc; \
529  \
530  local_save_flags(irq_flags); \
531  pc = preempt_count(); \
532  \
533  __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
534  \
535  event = trace_current_buffer_lock_reserve(&buffer, \
536  event_call->event.type, \
537  sizeof(*entry) + __data_size, \
538  irq_flags, pc); \
539  if (!event) \
540  return; \
541  entry = ring_buffer_event_data(event); \
542  \
543  tstruct \
544  \
545  { assign; } \
546  \
547  if (!filter_current_check_discard(buffer, event_call, entry, event)) \
548  trace_nowake_buffer_unlock_commit(buffer, \
549  event, irq_flags, pc); \
550 }
551 /*
552  * The ftrace_test_probe is compiled out, it is only here as a build time check
553  * to make sure that if the tracepoint handling changes, the ftrace probe will
554  * fail to compile unless it too is updated.
555  */
556 
557 #undef DEFINE_EVENT
558 #define DEFINE_EVENT(template, call, proto, args) \
559 static inline void ftrace_test_probe_##call(void) \
560 { \
561  check_trace_callback_type_##call(ftrace_raw_event_##template); \
562 }
563 
564 #undef DEFINE_EVENT_PRINT
565 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
566 
567 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
568 
569 #undef __entry
570 #define __entry REC
571 
572 #undef __print_flags
573 #undef __print_symbolic
574 #undef __print_hex
575 #undef __get_dynamic_array
576 #undef __get_str
577 
578 #undef TP_printk
579 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
580 
581 #undef DECLARE_EVENT_CLASS
582 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
583 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
584 static const char print_fmt_##call[] = print; \
585 static struct ftrace_event_class __used event_class_##call = { \
586  .system = __stringify(TRACE_SYSTEM), \
587  .define_fields = ftrace_define_fields_##call, \
588  .fields = LIST_HEAD_INIT(event_class_##call.fields),\
589  .raw_init = trace_event_raw_init, \
590  .probe = ftrace_raw_event_##call, \
591  .reg = ftrace_event_reg, \
592  _TRACE_PERF_INIT(call) \
593 };
594 
595 #undef DEFINE_EVENT
596 #define DEFINE_EVENT(template, call, proto, args) \
597  \
598 static struct ftrace_event_call __used event_##call = { \
599  .name = #call, \
600  .class = &event_class_##template, \
601  .event.funcs = &ftrace_event_type_funcs_##template, \
602  .print_fmt = print_fmt_##template, \
603 }; \
604 static struct ftrace_event_call __used \
605 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
606 
607 #undef DEFINE_EVENT_PRINT
608 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
609  \
610 static const char print_fmt_##call[] = print; \
611  \
612 static struct ftrace_event_call __used event_##call = { \
613  .name = #call, \
614  .class = &event_class_##template, \
615  .event.funcs = &ftrace_event_type_funcs_##call, \
616  .print_fmt = print_fmt_##call, \
617 }; \
618 static struct ftrace_event_call __used \
619 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
620 
621 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
622 
623 /*
624  * Define the insertion callback to perf events
625  *
626  * The job is very similar to ftrace_raw_event_<call> except that we don't
627  * insert in the ring buffer but in a perf counter.
628  *
629  * static void ftrace_perf_<call>(proto)
630  * {
631  * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
632  * struct ftrace_event_call *event_call = &event_<call>;
633  * extern void perf_tp_event(int, u64, u64, void *, int);
634  * struct ftrace_raw_##call *entry;
635  * struct perf_trace_buf *trace_buf;
636  * u64 __addr = 0, __count = 1;
637  * unsigned long irq_flags;
638  * struct trace_entry *ent;
639  * int __entry_size;
640  * int __data_size;
641  * int __cpu
642  * int pc;
643  *
644  * pc = preempt_count();
645  *
646  * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
647  *
648  * // Below we want to get the aligned size by taking into account
649  * // the u32 field that will later store the buffer size
650  * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
651  * sizeof(u64));
652  * __entry_size -= sizeof(u32);
653  *
654  * // Protect the non nmi buffer
655  * // This also protects the rcu read side
656  * local_irq_save(irq_flags);
657  * __cpu = smp_processor_id();
658  *
659  * if (in_nmi())
660  * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
661  * else
662  * trace_buf = rcu_dereference_sched(perf_trace_buf);
663  *
664  * if (!trace_buf)
665  * goto end;
666  *
667  * trace_buf = per_cpu_ptr(trace_buf, __cpu);
668  *
669  * // Avoid recursion from perf that could mess up the buffer
670  * if (trace_buf->recursion++)
671  * goto end_recursion;
672  *
673  * raw_data = trace_buf->buf;
674  *
675  * // Make recursion update visible before entering perf_tp_event
676  * // so that we protect from perf recursions.
677  *
678  * barrier();
679  *
680  * //zero dead bytes from alignment to avoid stack leak to userspace:
681  * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
682  * entry = (struct ftrace_raw_<call> *)raw_data;
683  * ent = &entry->ent;
684  * tracing_generic_entry_update(ent, irq_flags, pc);
685  * ent->type = event_call->id;
686  *
687  * <tstruct> <- do some jobs with dynamic arrays
688  *
689  * <assign> <- affect our values
690  *
691  * perf_tp_event(event_call->id, __addr, __count, entry,
692  * __entry_size); <- submit them to perf counter
693  *
694  * }
695  */
696 
697 #ifdef CONFIG_PERF_EVENTS
698 
699 #undef __entry
700 #define __entry entry
701 
702 #undef __get_dynamic_array
703 #define __get_dynamic_array(field) \
704  ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
705 
706 #undef __get_str
707 #define __get_str(field) (char *)__get_dynamic_array(field)
708 
709 #undef __perf_addr
710 #define __perf_addr(a) __addr = (a)
711 
712 #undef __perf_count
713 #define __perf_count(c) __count = (c)
714 
715 #undef __perf_task
716 #define __perf_task(t) __task = (t)
717 
718 #undef TP_perf_assign
719 #define TP_perf_assign(args...) args
720 
721 #undef DECLARE_EVENT_CLASS
722 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
723 static notrace void \
724 perf_trace_##call(void *__data, proto) \
725 { \
726  struct ftrace_event_call *event_call = __data; \
727  struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
728  struct ftrace_raw_##call *entry; \
729  struct pt_regs __regs; \
730  u64 __addr = 0, __count = 1; \
731  struct task_struct *__task = NULL; \
732  struct hlist_head *head; \
733  int __entry_size; \
734  int __data_size; \
735  int rctx; \
736  \
737  perf_fetch_caller_regs(&__regs); \
738  \
739  __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
740  __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
741  sizeof(u64)); \
742  __entry_size -= sizeof(u32); \
743  \
744  if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
745  "profile buffer not large enough")) \
746  return; \
747  \
748  entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
749  __entry_size, event_call->event.type, &__regs, &rctx); \
750  if (!entry) \
751  return; \
752  \
753  tstruct \
754  \
755  { assign; } \
756  \
757  head = this_cpu_ptr(event_call->perf_events); \
758  perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
759  __count, &__regs, head, __task); \
760 }
761 
762 /*
763  * This part is compiled out, it is only here as a build time check
764  * to make sure that if the tracepoint handling changes, the
765  * perf probe will fail to compile unless it too is updated.
766  */
767 #undef DEFINE_EVENT
768 #define DEFINE_EVENT(template, call, proto, args) \
769 static inline void perf_test_probe_##call(void) \
770 { \
771  check_trace_callback_type_##call(perf_trace_##template); \
772 }
773 
774 
775 #undef DEFINE_EVENT_PRINT
776 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
777  DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
778 
779 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
780 #endif /* CONFIG_PERF_EVENTS */
781 
782 #undef _TRACE_PROFILE_INIT
783