Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
malloc.c
Go to the documentation of this file.
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <[email protected]>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/jffs2.h>
18 #include "nodelist.h"
19 
20 /* These are initialised to NULL in the kernel startup code.
21  If you're porting to other operating systems, beware */
22 static struct kmem_cache *full_dnode_slab;
23 static struct kmem_cache *raw_dirent_slab;
24 static struct kmem_cache *raw_inode_slab;
25 static struct kmem_cache *tmp_dnode_info_slab;
26 static struct kmem_cache *raw_node_ref_slab;
27 static struct kmem_cache *node_frag_slab;
28 static struct kmem_cache *inode_cache_slab;
29 #ifdef CONFIG_JFFS2_FS_XATTR
30 static struct kmem_cache *xattr_datum_cache;
31 static struct kmem_cache *xattr_ref_cache;
32 #endif
33 
35 {
36  full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
37  sizeof(struct jffs2_full_dnode),
38  0, 0, NULL);
39  if (!full_dnode_slab)
40  goto err;
41 
42  raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
43  sizeof(struct jffs2_raw_dirent),
45  if (!raw_dirent_slab)
46  goto err;
47 
48  raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
49  sizeof(struct jffs2_raw_inode),
51  if (!raw_inode_slab)
52  goto err;
53 
54  tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
55  sizeof(struct jffs2_tmp_dnode_info),
56  0, 0, NULL);
57  if (!tmp_dnode_info_slab)
58  goto err;
59 
60  raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
61  sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
62  0, 0, NULL);
63  if (!raw_node_ref_slab)
64  goto err;
65 
66  node_frag_slab = kmem_cache_create("jffs2_node_frag",
67  sizeof(struct jffs2_node_frag),
68  0, 0, NULL);
69  if (!node_frag_slab)
70  goto err;
71 
72  inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
73  sizeof(struct jffs2_inode_cache),
74  0, 0, NULL);
75  if (!inode_cache_slab)
76  goto err;
77 
78 #ifdef CONFIG_JFFS2_FS_XATTR
79  xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
80  sizeof(struct jffs2_xattr_datum),
81  0, 0, NULL);
82  if (!xattr_datum_cache)
83  goto err;
84 
85  xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
86  sizeof(struct jffs2_xattr_ref),
87  0, 0, NULL);
88  if (!xattr_ref_cache)
89  goto err;
90 #endif
91 
92  return 0;
93  err:
95  return -ENOMEM;
96 }
97 
99 {
100  if(full_dnode_slab)
101  kmem_cache_destroy(full_dnode_slab);
102  if(raw_dirent_slab)
103  kmem_cache_destroy(raw_dirent_slab);
104  if(raw_inode_slab)
105  kmem_cache_destroy(raw_inode_slab);
106  if(tmp_dnode_info_slab)
107  kmem_cache_destroy(tmp_dnode_info_slab);
108  if(raw_node_ref_slab)
109  kmem_cache_destroy(raw_node_ref_slab);
110  if(node_frag_slab)
111  kmem_cache_destroy(node_frag_slab);
112  if(inode_cache_slab)
113  kmem_cache_destroy(inode_cache_slab);
114 #ifdef CONFIG_JFFS2_FS_XATTR
115  if (xattr_datum_cache)
116  kmem_cache_destroy(xattr_datum_cache);
117  if (xattr_ref_cache)
118  kmem_cache_destroy(xattr_ref_cache);
119 #endif
120 }
121 
123 {
124  struct jffs2_full_dirent *ret;
125  ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
126  dbg_memalloc("%p\n", ret);
127  return ret;
128 }
129 
131 {
132  dbg_memalloc("%p\n", x);
133  kfree(x);
134 }
135 
137 {
138  struct jffs2_full_dnode *ret;
139  ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
140  dbg_memalloc("%p\n", ret);
141  return ret;
142 }
143 
145 {
146  dbg_memalloc("%p\n", x);
147  kmem_cache_free(full_dnode_slab, x);
148 }
149 
151 {
152  struct jffs2_raw_dirent *ret;
153  ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
154  dbg_memalloc("%p\n", ret);
155  return ret;
156 }
157 
159 {
160  dbg_memalloc("%p\n", x);
161  kmem_cache_free(raw_dirent_slab, x);
162 }
163 
165 {
166  struct jffs2_raw_inode *ret;
167  ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
168  dbg_memalloc("%p\n", ret);
169  return ret;
170 }
171 
173 {
174  dbg_memalloc("%p\n", x);
175  kmem_cache_free(raw_inode_slab, x);
176 }
177 
179 {
180  struct jffs2_tmp_dnode_info *ret;
181  ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
182  dbg_memalloc("%p\n",
183  ret);
184  return ret;
185 }
186 
188 {
189  dbg_memalloc("%p\n", x);
190  kmem_cache_free(tmp_dnode_info_slab, x);
191 }
192 
193 static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
194 {
195  struct jffs2_raw_node_ref *ret;
196 
197  ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
198  if (ret) {
199  int i = 0;
200  for (i=0; i < REFS_PER_BLOCK; i++) {
202  ret[i].next_in_ino = NULL;
203  }
205  ret[i].next_in_ino = NULL;
206  }
207  return ret;
208 }
209 
211  struct jffs2_eraseblock *jeb, int nr)
212 {
213  struct jffs2_raw_node_ref **p, *ref;
214  int i = nr;
215 
216  dbg_memalloc("%d\n", nr);
217 
218  p = &jeb->last_node;
219  ref = *p;
220 
221  dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
222 
223  /* If jeb->last_node is really a valid node then skip over it */
224  if (ref && ref->flash_offset != REF_EMPTY_NODE)
225  ref++;
226 
227  while (i) {
228  if (!ref) {
229  dbg_memalloc("Allocating new refblock linked from %p\n", p);
230  ref = *p = jffs2_alloc_refblock();
231  if (!ref)
232  return -ENOMEM;
233  }
234  if (ref->flash_offset == REF_LINK_NODE) {
235  p = &ref->next_in_ino;
236  ref = *p;
237  continue;
238  }
239  i--;
240  ref++;
241  }
242  jeb->allocated_refs = nr;
243 
244  dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
245  nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
246  jeb->last_node->next_in_ino);
247 
248  return 0;
249 }
250 
252 {
253  dbg_memalloc("%p\n", x);
254  kmem_cache_free(raw_node_ref_slab, x);
255 }
256 
258 {
259  struct jffs2_node_frag *ret;
260  ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
261  dbg_memalloc("%p\n", ret);
262  return ret;
263 }
264 
266 {
267  dbg_memalloc("%p\n", x);
268  kmem_cache_free(node_frag_slab, x);
269 }
270 
272 {
273  struct jffs2_inode_cache *ret;
274  ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
275  dbg_memalloc("%p\n", ret);
276  return ret;
277 }
278 
280 {
281  dbg_memalloc("%p\n", x);
282  kmem_cache_free(inode_cache_slab, x);
283 }
284 
285 #ifdef CONFIG_JFFS2_FS_XATTR
286 struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
287 {
288  struct jffs2_xattr_datum *xd;
289  xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
290  dbg_memalloc("%p\n", xd);
291 
293  xd->node = (void *)xd;
294  INIT_LIST_HEAD(&xd->xindex);
295  return xd;
296 }
297 
298 void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
299 {
300  dbg_memalloc("%p\n", xd);
301  kmem_cache_free(xattr_datum_cache, xd);
302 }
303 
304 struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
305 {
306  struct jffs2_xattr_ref *ref;
307  ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
308  dbg_memalloc("%p\n", ref);
309 
311  ref->node = (void *)ref;
312  return ref;
313 }
314 
315 void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
316 {
317  dbg_memalloc("%p\n", ref);
318  kmem_cache_free(xattr_ref_cache, ref);
319 }
320 #endif