Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
intmem.c
Go to the documentation of this file.
1 /*
2  * Simple allocator for internal RAM in ETRAX FS
3  *
4  * Copyright (c) 2004 Axis Communications AB.
5  */
6 
7 #include <linux/list.h>
8 #include <linux/slab.h>
9 #include <asm/io.h>
10 #include <memmap.h>
11 
12 #define STATUS_FREE 0
13 #define STATUS_ALLOCATED 1
14 
15 #ifdef CONFIG_ETRAX_L2CACHE
16 #define RESERVED_SIZE 66*1024
17 #else
18 #define RESERVED_SIZE 0
19 #endif
20 
22  struct list_head entry;
23  unsigned int size;
24  unsigned offset;
25  char status;
26 };
27 
28 
29 static struct list_head intmem_allocations;
30 static void* intmem_virtual;
31 
32 static void crisv32_intmem_init(void)
33 {
34  static int initiated = 0;
35  if (!initiated) {
36  struct intmem_allocation* alloc;
37  alloc = kmalloc(sizeof *alloc, GFP_KERNEL);
38  INIT_LIST_HEAD(&intmem_allocations);
39  intmem_virtual = ioremap(MEM_INTMEM_START + RESERVED_SIZE,
41  initiated = 1;
43  alloc->offset = 0;
44  alloc->status = STATUS_FREE;
45  list_add_tail(&alloc->entry, &intmem_allocations);
46  }
47 }
48 
49 void* crisv32_intmem_alloc(unsigned size, unsigned align)
50 {
51  struct intmem_allocation* allocation;
52  struct intmem_allocation* tmp;
53  void* ret = NULL;
54 
56  crisv32_intmem_init();
57 
58  list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
59  int alignment = allocation->offset % align;
60  alignment = alignment ? align - alignment : alignment;
61 
62  if (allocation->status == STATUS_FREE &&
63  allocation->size >= size + alignment) {
64  if (allocation->size > size + alignment) {
65  struct intmem_allocation* alloc;
66  alloc = kmalloc(sizeof *alloc, GFP_ATOMIC);
67  alloc->status = STATUS_FREE;
68  alloc->size = allocation->size - size -
69  alignment;
70  alloc->offset = allocation->offset + size +
71  alignment;
72  list_add(&alloc->entry, &allocation->entry);
73 
74  if (alignment) {
75  struct intmem_allocation *tmp;
76  tmp = kmalloc(sizeof *tmp, GFP_ATOMIC);
77  tmp->offset = allocation->offset;
78  tmp->size = alignment;
79  tmp->status = STATUS_FREE;
80  allocation->offset += alignment;
81  list_add_tail(&tmp->entry,
82  &allocation->entry);
83  }
84  }
85  allocation->status = STATUS_ALLOCATED;
86  allocation->size = size;
87  ret = (void*)((int)intmem_virtual + allocation->offset);
88  }
89  }
91  return ret;
92 }
93 
95 {
96  struct intmem_allocation* allocation;
97  struct intmem_allocation* tmp;
98 
99  if (addr == NULL)
100  return;
101 
102  preempt_disable();
103  crisv32_intmem_init();
104 
105  list_for_each_entry_safe(allocation, tmp, &intmem_allocations, entry) {
106  if (allocation->offset == (int)(addr - intmem_virtual)) {
107  struct intmem_allocation *prev =
108  list_entry(allocation->entry.prev,
109  struct intmem_allocation, entry);
110  struct intmem_allocation *next =
111  list_entry(allocation->entry.next,
112  struct intmem_allocation, entry);
113 
114  allocation->status = STATUS_FREE;
115  /* Join with prev and/or next if also free */
116  if ((prev != &intmem_allocations) &&
117  (prev->status == STATUS_FREE)) {
118  prev->size += allocation->size;
119  list_del(&allocation->entry);
120  kfree(allocation);
121  allocation = prev;
122  }
123  if ((next != &intmem_allocations) &&
124  (next->status == STATUS_FREE)) {
125  allocation->size += next->size;
126  list_del(&next->entry);
127  kfree(next);
128  }
129  preempt_enable();
130  return;
131  }
132  }
133  preempt_enable();
134 }
135 
136 void* crisv32_intmem_phys_to_virt(unsigned long addr)
137 {
138  return (void *)(addr - (MEM_INTMEM_START + RESERVED_SIZE) +
139  (unsigned long)intmem_virtual);
140 }
141 
142 unsigned long crisv32_intmem_virt_to_phys(void* addr)
143 {
144  return (unsigned long)((unsigned long )addr -
145  (unsigned long)intmem_virtual + MEM_INTMEM_START +
146  RESERVED_SIZE);
147 }
148 
149 module_init(crisv32_intmem_init);
150