Linux Kernel
3.7.1
Main Page
Related Pages
Modules
Namespaces
Data Structures
Files
File List
Globals
All
Data Structures
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Macros
Groups
Pages
arch
hexagon
mm
init.c
Go to the documentation of this file.
1
/*
2
* Memory subsystem initialization for Hexagon
3
*
4
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
5
*
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License version 2 and
8
* only version 2 as published by the Free Software Foundation.
9
*
10
* This program is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
* GNU General Public License for more details.
14
*
15
* You should have received a copy of the GNU General Public License
16
* along with this program; if not, write to the Free Software
17
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18
* 02110-1301, USA.
19
*/
20
21
#include <
linux/init.h
>
22
#include <
linux/mm.h
>
23
#include <
linux/bootmem.h
>
24
#include <asm/atomic.h>
25
#include <
linux/highmem.h
>
26
#include <asm/tlb.h>
27
#include <asm/sections.h>
28
#include <
asm/vm_mmu.h
>
29
30
/*
31
* Define a startpg just past the end of the kernel image and a lastpg
32
* that corresponds to the end of real or simulated platform memory.
33
*/
34
#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET))
35
36
unsigned
long
bootmem_lastpg
;
/* Should be set by platform code */
37
38
/* Set as variable to limit PMD copies */
39
int
max_kernel_seg
= 0x303;
40
41
/* think this should be (page_size-1) the way it's used...*/
42
unsigned
long
zero_page_mask
;
43
44
/* indicate pfn's of high memory */
45
unsigned
long
highstart_pfn
,
highend_pfn
;
46
47
/* struct mmu_gather defined in asm-generic.h; */
48
DEFINE_PER_CPU
(
struct
mmu_gather
, mmu_gathers);
49
50
/* Default cache attribute for newly created page tables */
51
unsigned
long
_dflt_cache_att
=
CACHEDEF
;
52
53
/*
54
* The current "generation" of kernel map, which should not roll
55
* over until Hell freezes over. Actual bound in years needs to be
56
* calculated to confirm.
57
*/
58
DEFINE_SPINLOCK
(kmap_gen_lock);
59
60
/* checkpatch says don't init this to 0. */
61
unsigned
long
long
kmap_generation
;
62
63
/*
64
* mem_init - initializes memory
65
*
66
* Frees up bootmem
67
* Fixes up more stuff for HIGHMEM
68
* Calculates and displays memory available/used
69
*/
70
void
__init
mem_init
(
void
)
71
{
72
/* No idea where this is actually declared. Seems to evade LXR. */
73
totalram_pages +=
free_all_bootmem
();
74
num_physpages
=
bootmem_lastpg
;
/* seriously, what? */
75
76
printk
(
KERN_INFO
"totalram_pages = %ld\n"
, totalram_pages);
77
78
/*
79
* To-Do: someone somewhere should wipe out the bootmem map
80
* after we're done?
81
*/
82
83
/*
84
* This can be moved to some more virtual-memory-specific
85
* initialization hook at some point. Set the init_mm
86
* descriptors "context" value to point to the initial
87
* kernel segment table's physical address.
88
*/
89
init_mm
.context.ptbase =
__pa
(
init_mm
.pgd);
90
}
91
92
/*
93
* free_initmem - frees memory used by stuff declared with __init
94
*
95
* Todo: free pages between __init_begin and __init_end; possibly
96
* some devtree related stuff as well.
97
*/
98
void
__init_refok
free_initmem
(
void
)
99
{
100
}
101
102
/*
103
* free_initrd_mem - frees... initrd memory.
104
* @start - start of init memory
105
* @end - end of init memory
106
*
107
* Apparently has to be passed the address of the initrd memory.
108
*
109
* Wrapped by #ifdef CONFIG_BLKDEV_INITRD
110
*/
111
void
free_initrd_mem
(
unsigned
long
start
,
unsigned
long
end
)
112
{
113
}
114
115
void
sync_icache_dcache
(
pte_t
pte
)
116
{
117
unsigned
long
addr
;
118
struct
page
*
page
;
119
120
page =
pte_page
(pte);
121
addr = (
unsigned
long
)
page_address
(page);
122
123
__vmcache_idsync(addr,
PAGE_SIZE
);
124
}
125
126
/*
127
* In order to set up page allocator "nodes",
128
* somebody has to call free_area_init() for UMA.
129
*
130
* In this mode, we only have one pg_data_t
131
* structure: contig_mem_data.
132
*/
133
void
__init
paging_init
(
void
)
134
{
135
unsigned
long
zones_sizes[MAX_NR_ZONES] = {0, };
136
137
/*
138
* This is not particularly well documented anywhere, but
139
* give ZONE_NORMAL all the memory, including the big holes
140
* left by the kernel+bootmem_map which are already left as reserved
141
* in the bootmem_map; free_area_init should see those bits and
142
* adjust accordingly.
143
*/
144
145
zones_sizes[
ZONE_NORMAL
] =
max_low_pfn
;
146
147
free_area_init
(zones_sizes);
/* sets up the zonelists and mem_map */
148
149
/*
150
* Start of high memory area. Will probably need something more
151
* fancy if we... get more fancy.
152
*/
153
high_memory
= (
void
*)((
bootmem_lastpg
+ 1) <<
PAGE_SHIFT
);
154
}
155
156
#ifndef DMA_RESERVE
157
#define DMA_RESERVE (4)
158
#endif
159
160
#define DMA_CHUNKSIZE (1<<22)
161
#define DMA_RESERVED_BYTES (DMA_RESERVE * DMA_CHUNKSIZE)
162
163
/*
164
* Pick out the memory size. We look for mem=size,
165
* where size is "size[KkMm]"
166
*/
167
static
int
__init
early_mem(
char
*
p
)
168
{
169
unsigned
long
size
;
170
char
*endp;
171
172
size =
memparse
(p, &endp);
173
174
bootmem_lastpg
=
PFN_DOWN
(size);
175
176
return
0;
177
}
178
early_param
(
"mem"
, early_mem);
179
180
size_t
hexagon_coherent_pool_size
= (
size_t
) (
DMA_RESERVE
<< 22);
181
182
void
__init
setup_arch_memory
(
void
)
183
{
184
int
bootmap_size;
185
/* XXX Todo: this probably should be cleaned up */
186
u32
*segtable = (
u32
*) &
swapper_pg_dir
[0];
187
u32
*segtable_end;
188
189
/*
190
* Set up boot memory allocator
191
*
192
* The Gorman book also talks about these functions.
193
* This needs to change for highmem setups.
194
*/
195
196
/* Memory size needs to be a multiple of 16M */
197
bootmem_lastpg
=
PFN_DOWN
((
bootmem_lastpg
<<
PAGE_SHIFT
) &
198
~((
BIG_KERNEL_PAGE_SIZE
) - 1));
199
200
/*
201
* Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
202
* memory allocation
203
*/
204
bootmap_size =
init_bootmem
(
bootmem_startpg
,
bootmem_lastpg
-
205
PFN_DOWN
(
DMA_RESERVED_BYTES
));
206
207
printk
(
KERN_INFO
"bootmem_startpg: 0x%08lx\n"
,
bootmem_startpg
);
208
printk
(
KERN_INFO
"bootmem_lastpg: 0x%08lx\n"
,
bootmem_lastpg
);
209
printk
(
KERN_INFO
"bootmap_size: %d\n"
, bootmap_size);
210
printk
(
KERN_INFO
"max_low_pfn: 0x%08lx\n"
,
max_low_pfn
);
211
212
/*
213
* The default VM page tables (will be) populated with
214
* VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries
215
* higher than what we have memory for.
216
*/
217
218
/* this is pointer arithmetic; each entry covers 4MB */
219
segtable = segtable + (
PAGE_OFFSET
>> 22);
220
221
/* this actually only goes to the end of the first gig */
222
segtable_end = segtable + (1<<(30-22));
223
224
/* Move forward to the start of empty pages */
225
segtable +=
bootmem_lastpg
>> (22-
PAGE_SHIFT
);
226
227
{
228
int
i
;
229
230
for
(i = 1 ; i <=
DMA_RESERVE
; i++)
231
segtable[-i] = ((segtable[-i] &
__HVM_PTE_PGMASK_4MB
)
232
|
__HVM_PTE_R
|
__HVM_PTE_W
|
__HVM_PTE_X
233
|
__HEXAGON_C_UNC
<< 6
234
|
__HVM_PDE_S_4MB
);
235
}
236
237
printk
(
KERN_INFO
"clearing segtable from %p to %p\n"
, segtable,
238
segtable_end);
239
while
(segtable < (segtable_end-8))
240
*(segtable++) =
__HVM_PDE_S_INVALID
;
241
/* stop the pointer at the device I/O 4MB page */
242
243
printk
(
KERN_INFO
"segtable = %p (should be equal to _K_io_map)\n"
,
244
segtable);
245
246
#if 0
247
/* Other half of the early device table from vm_init_segtable. */
248
printk
(
KERN_INFO
"&_K_init_devicetable = 0x%08x\n"
,
249
(
unsigned
long
) _K_init_devicetable-
PAGE_OFFSET
);
250
*segtable = ((
u32
) (
unsigned
long
) _K_init_devicetable-
PAGE_OFFSET
) |
251
__HVM_PDE_S_4KB
;
252
printk
(
KERN_INFO
"*segtable = 0x%08x\n"
, *segtable);
253
#endif
254
255
/*
256
* Free all the memory that wasn't taken up by the bootmap, the DMA
257
* reserve, or kernel itself.
258
*/
259
free_bootmem
(
PFN_PHYS
(
bootmem_startpg
)+bootmap_size,
260
PFN_PHYS
(
bootmem_lastpg
-
bootmem_startpg
) - bootmap_size -
261
DMA_RESERVED_BYTES
);
262
263
/*
264
* The bootmem allocator seemingly just lives to feed memory
265
* to the paging system
266
*/
267
printk
(
KERN_INFO
"PAGE_SIZE=%lu\n"
,
PAGE_SIZE
);
268
paging_init
();
/* See Gorman Book, 2.3 */
269
270
/*
271
* At this point, the page allocator is kind of initialized, but
272
* apparently no pages are available (just like with the bootmem
273
* allocator), and need to be freed themselves via mem_init(),
274
* which is called by start_kernel() later on in the process
275
*/
276
}
Generated on Thu Jan 10 2013 12:53:59 for Linux Kernel by
1.8.2