Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
mm
kmemcheck.c
Go to the documentation of this file.
1
#include <
linux/gfp.h
>
2
#include <
linux/mm_types.h
>
3
#include <
linux/mm.h
>
4
#include <linux/slab.h>
5
#include <
linux/kmemcheck.h
>
6
7
void
kmemcheck_alloc_shadow
(
struct
page
*
page
,
int
order
,
gfp_t
flags
,
int
node
)
8
{
9
struct
page *
shadow
;
10
int
pages
;
11
int
i
;
12
13
pages = 1 <<
order
;
14
15
/*
16
* With kmemcheck enabled, we need to allocate a memory area for the
17
* shadow bits as well.
18
*/
19
shadow = alloc_pages_node(node, flags |
__GFP_NOTRACK
, order);
20
if
(!shadow) {
21
if
(printk_ratelimit())
22
printk
(
KERN_ERR
"kmemcheck: failed to allocate "
23
"shadow bitmap\n"
);
24
return
;
25
}
26
27
for
(i = 0; i <
pages
; ++
i
)
28
page[i].shadow =
page_address
(&shadow[i]);
29
30
/*
31
* Mark it as non-present for the MMU so that our accesses to
32
* this memory will trigger a page fault and let us analyze
33
* the memory accesses.
34
*/
35
kmemcheck_hide_pages
(page, pages);
36
}
37
38
void
kmemcheck_free_shadow
(
struct
page
*
page
,
int
order
)
39
{
40
struct
page *
shadow
;
41
int
pages
;
42
int
i
;
43
44
if
(!
kmemcheck_page_is_tracked
(page))
45
return
;
46
47
pages = 1 <<
order
;
48
49
kmemcheck_show_pages
(page, pages);
50
51
shadow =
virt_to_page
(page[0].shadow);
52
53
for
(i = 0; i <
pages
; ++
i
)
54
page[i].shadow =
NULL
;
55
56
__free_pages
(shadow, order);
57
}
58
59
void
kmemcheck_slab_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags,
void
*
object
,
60
size_t
size
)
61
{
62
/*
63
* Has already been memset(), which initializes the shadow for us
64
* as well.
65
*/
66
if
(gfpflags &
__GFP_ZERO
)
67
return
;
68
69
/* No need to initialize the shadow of a non-tracked slab. */
70
if
(s->
flags
&
SLAB_NOTRACK
)
71
return
;
72
73
if
(!
kmemcheck_enabled
|| gfpflags &
__GFP_NOTRACK
) {
74
/*
75
* Allow notracked objects to be allocated from
76
* tracked caches. Note however that these objects
77
* will still get page faults on access, they just
78
* won't ever be flagged as uninitialized. If page
79
* faults are not acceptable, the slab cache itself
80
* should be marked NOTRACK.
81
*/
82
kmemcheck_mark_initialized
(
object
, size);
83
}
else
if
(!s->
ctor
) {
84
/*
85
* New objects should be marked uninitialized before
86
* they're returned to the called.
87
*/
88
kmemcheck_mark_uninitialized
(
object
, size);
89
}
90
}
91
92
void
kmemcheck_slab_free
(
struct
kmem_cache
*
s
,
void
*
object
,
size_t
size
)
93
{
94
/* TODO: RCU freeing is unsupported for now; hide false positives. */
95
if
(!s->
ctor
&& !(s->
flags
&
SLAB_DESTROY_BY_RCU
))
96
kmemcheck_mark_freed
(
object
, size);
97
}
98
99
void
kmemcheck_pagealloc_alloc
(
struct
page
*
page
,
unsigned
int
order
,
100
gfp_t
gfpflags)
101
{
102
int
pages
;
103
104
if
(gfpflags & (
__GFP_HIGHMEM
|
__GFP_NOTRACK
))
105
return
;
106
107
pages = 1 <<
order
;
108
109
/*
110
* NOTE: We choose to track GFP_ZERO pages too; in fact, they
111
* can become uninitialized by copying uninitialized memory
112
* into them.
113
*/
114
115
/* XXX: Can use zone->node for node? */
116
kmemcheck_alloc_shadow
(page, order, gfpflags, -1);
117
118
if
(gfpflags &
__GFP_ZERO
)
119
kmemcheck_mark_initialized_pages
(page, pages);
120
else
121
kmemcheck_mark_uninitialized_pages
(page, pages);
122
}
Generated on Thu Jan 10 2013 13:21:28 for Linux Kernel by
1.8.2