Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mem_detect.c
Go to the documentation of this file.
1 /*
2  * Copyright IBM Corp. 2008, 2009
3  *
4  * Author(s): Heiko Carstens <[email protected]>
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <asm/ipl.h>
10 #include <asm/sclp.h>
11 #include <asm/setup.h>
12 
13 #define ADDR2G (1ULL << 31)
14 
15 static void find_memory_chunks(struct mem_chunk chunk[])
16 {
17  unsigned long long memsize, rnmax, rzm;
18  unsigned long addr = 0, size;
19  int i = 0, type;
20 
21  rzm = sclp_get_rzm();
22  rnmax = sclp_get_rnmax();
23  memsize = rzm * rnmax;
24  if (!rzm)
25  rzm = 1ULL << 17;
26  if (sizeof(long) == 4) {
27  rzm = min(ADDR2G, rzm);
28  memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
29  }
30  do {
31  size = 0;
32  type = tprot(addr);
33  do {
34  size += rzm;
35  if (memsize && addr + size >= memsize)
36  break;
37  } while (type == tprot(addr + size));
39  chunk[i].addr = addr;
40  chunk[i].size = size;
41  chunk[i].type = type;
42  i++;
43  }
44  addr += size;
45  } while (addr < memsize && i < MEMORY_CHUNKS);
46 }
47 
49 {
50  unsigned long flags, cr0;
51 
52  memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
53  /* Disable IRQs, DAT and low address protection so tprot does the
54  * right thing and we don't get scheduled away with low address
55  * protection disabled.
56  */
57  flags = __arch_local_irq_stnsm(0xf8);
58  __ctl_store(cr0, 0, 0);
59  __ctl_clear_bit(0, 28);
60  find_memory_chunks(chunk);
61  __ctl_load(cr0, 0, 0);
63 }
65 
66 /*
67  * Move memory chunks array from index "from" to index "to"
68  */
69 static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
70 {
71  int cnt = MEMORY_CHUNKS - to;
72 
73  memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
74 }
75 
76 /*
77  * Initialize memory chunk
78  */
79 static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
80  unsigned long size, int type)
81 {
82  chunk->type = type;
83  chunk->addr = addr;
84  chunk->size = size;
85 }
86 
87 /*
88  * Create memory hole with given address, size, and type
89  */
90 void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
91  unsigned long size, int type)
92 {
93  unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
94  int i, ch_type;
95 
96  for (i = 0; i < MEMORY_CHUNKS; i++) {
97  if (chunk[i].size == 0)
98  continue;
99 
100  /* Define chunk properties */
101  ch_start = chunk[i].addr;
102  ch_size = chunk[i].size;
103  ch_end = ch_start + ch_size - 1;
104  ch_type = chunk[i].type;
105 
106  /* Is memory chunk hit by memory hole? */
107  if (addr + size <= ch_start)
108  continue; /* No: memory hole in front of chunk */
109  if (addr > ch_end)
110  continue; /* No: memory hole after chunk */
111 
112  /* Yes: Define local hole properties */
113  lh_start = max(addr, chunk[i].addr);
114  lh_end = min(addr + size - 1, ch_end);
115  lh_size = lh_end - lh_start + 1;
116 
117  if (lh_start == ch_start && lh_end == ch_end) {
118  /* Hole covers complete memory chunk */
119  mem_chunk_init(&chunk[i], lh_start, lh_size, type);
120  } else if (lh_end == ch_end) {
121  /* Hole starts in memory chunk and convers chunk end */
122  mem_chunk_move(chunk, i + 1, i);
123  mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
124  ch_type);
125  mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
126  i += 1;
127  } else if (lh_start == ch_start) {
128  /* Hole ends in memory chunk */
129  mem_chunk_move(chunk, i + 1, i);
130  mem_chunk_init(&chunk[i], lh_start, lh_size, type);
131  mem_chunk_init(&chunk[i + 1], lh_end + 1,
132  ch_size - lh_size, ch_type);
133  break;
134  } else {
135  /* Hole splits memory chunk */
136  mem_chunk_move(chunk, i + 2, i);
137  mem_chunk_init(&chunk[i], ch_start,
138  lh_start - ch_start, ch_type);
139  mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
140  mem_chunk_init(&chunk[i + 2], lh_end + 1,
141  ch_end - lh_end, ch_type);
142  break;
143  }
144  }
145 }