Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
kernel
events
internal.h
Go to the documentation of this file.
1
#ifndef _KERNEL_EVENTS_INTERNAL_H
2
#define _KERNEL_EVENTS_INTERNAL_H
3
4
#include <
linux/hardirq.h
>
5
#include <
linux/uaccess.h
>
6
7
/* Buffer handling */
8
9
#define RING_BUFFER_WRITABLE 0x01
10
11
struct
ring_buffer
{
12
atomic_t
refcount
;
13
struct
rcu_head
rcu_head
;
14
#ifdef CONFIG_PERF_USE_VMALLOC
15
struct
work_struct
work
;
16
int
page_order;
/* allocation order */
17
#endif
18
int
nr_pages
;
/* nr of data pages */
19
int
writable
;
/* are we writable */
20
21
atomic_t
poll
;
/* POLL_ for wakeups */
22
23
local_t
head
;
/* write position */
24
local_t
nest
;
/* nested writers */
25
local_t
events
;
/* event limit */
26
local_t
wakeup
;
/* wakeup stamp */
27
local_t
lost
;
/* nr records lost */
28
29
long
watermark
;
/* wakeup watermark */
30
/* poll crap */
31
spinlock_t
event_lock
;
32
struct
list_head
event_list
;
33
34
struct
perf_event_mmap_page
*
user_page
;
35
void
*
data_pages
[0];
36
};
37
38
extern
void
rb_free
(
struct
ring_buffer
*
rb
);
39
extern
struct
ring_buffer
*
40
rb_alloc
(
int
nr_pages
,
long
watermark
,
int
cpu
,
int
flags
);
41
extern
void
perf_event_wakeup
(
struct
perf_event
*
event
);
42
43
extern
void
44
perf_event_header__init_id
(
struct
perf_event_header
*
header
,
45
struct
perf_sample_data *
data
,
46
struct
perf_event
*
event
);
47
extern
void
48
perf_event__output_id_sample
(
struct
perf_event
*
event
,
49
struct
perf_output_handle
*
handle
,
50
struct
perf_sample_data *
sample
);
51
52
extern
struct
page
*
53
perf_mmap_to_page
(
struct
ring_buffer
*
rb
,
unsigned
long
pgoff);
54
55
#ifdef CONFIG_PERF_USE_VMALLOC
56
/*
57
* Back perf_mmap() with vmalloc memory.
58
*
59
* Required for architectures that have d-cache aliasing issues.
60
*/
61
62
static
inline
int
page_order(
struct
ring_buffer
*
rb
)
63
{
64
return
rb->page_order;
65
}
66
67
#else
68
69
static
inline
int
page_order(
struct
ring_buffer
*
rb
)
70
{
71
return
0;
72
}
73
#endif
74
75
static
inline
unsigned
long
perf_data_size(
struct
ring_buffer
*
rb
)
76
{
77
return
rb->
nr_pages
<< (
PAGE_SHIFT
+ page_order(rb));
78
}
79
80
#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
81
static inline unsigned int \
82
func_name(struct perf_output_handle *handle, \
83
const void *buf, unsigned int len) \
84
{ \
85
unsigned long size, written; \
86
\
87
do { \
88
size = min_t(unsigned long, handle->size, len); \
89
\
90
written = memcpy_func(handle->addr, buf, size); \
91
\
92
len -= written; \
93
handle->addr += written; \
94
buf += written; \
95
handle->size -= written; \
96
if (!handle->size) { \
97
struct ring_buffer *rb = handle->rb; \
98
\
99
handle->page++; \
100
handle->page &= rb->nr_pages - 1; \
101
handle->addr = rb->data_pages[handle->page]; \
102
handle->size = PAGE_SIZE << page_order(rb); \
103
} \
104
} while (len && written == size); \
105
\
106
return len; \
107
}
108
109
static
inline
int
memcpy_common(
void
*
dst
,
const
void
*
src
,
size_t
n
)
110
{
111
memcpy
(dst, src, n);
112
return
n
;
113
}
114
115
DEFINE_OUTPUT_COPY
(__output_copy, memcpy_common)
116
117
#define MEMCPY_SKIP(dst, src, n) (n)
118
119
DEFINE_OUTPUT_COPY
(__output_skip,
MEMCPY_SKIP
)
120
121
#ifndef arch_perf_out_copy_user
122
#define arch_perf_out_copy_user __copy_from_user_inatomic
123
#endif
124
125
DEFINE_OUTPUT_COPY
(__output_copy_user,
arch_perf_out_copy_user
)
126
127
/* Callchain handling */
128
extern
struct
perf_callchain_entry
*
129
perf_callchain
(
struct
perf_event
*
event
,
struct
pt_regs
*
regs
);
130
extern
int
get_callchain_buffers
(
void
);
131
extern
void
put_callchain_buffers
(
void
);
132
133
static
inline
int
get_recursion_context(
int
*recursion)
134
{
135
int
rctx;
136
137
if
(
in_nmi
())
138
rctx = 3;
139
else
if
(
in_irq
())
140
rctx = 2;
141
else
if
(
in_softirq
())
142
rctx = 1;
143
else
144
rctx = 0;
145
146
if
(recursion[rctx])
147
return
-1;
148
149
recursion[rctx]++;
150
barrier
();
151
152
return
rctx;
153
}
154
155
static
inline
void
put_recursion_context(
int
*recursion,
int
rctx)
156
{
157
barrier
();
158
recursion[rctx]--;
159
}
160
161
#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
162
static
inline
bool
arch_perf_have_user_stack_dump(
void
)
163
{
164
return
true
;
165
}
166
167
#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
168
#else
169
static
inline
bool
arch_perf_have_user_stack_dump(
void
)
170
{
171
return
false
;
172
}
173
174
#define perf_user_stack_pointer(regs) 0
175
#endif
/* CONFIG_HAVE_PERF_USER_STACK_DUMP */
176
177
#endif
/* _KERNEL_EVENTS_INTERNAL_H */
Generated on Thu Jan 10 2013 13:12:30 for Linux Kernel by
1.8.2