Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
inventory.c
Go to the documentation of this file.
1 /*
2  * inventory.c
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  *
9  * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
10  * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
11  *
12  * These are the routines to discover what hardware exists in this box.
13  * This task is complicated by there being 3 different ways of
14  * performing an inventory, depending largely on the age of the box.
15  * The recommended way to do this is to check to see whether the machine
16  * is a `Snake' first, then try System Map, then try PAT. We try System
17  * Map before checking for a Snake -- this probably doesn't cause any
18  * problems, but...
19  */
20 
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <asm/hardware.h>
27 #include <asm/io.h>
28 #include <asm/mmzone.h>
29 #include <asm/pdc.h>
30 #include <asm/pdcpat.h>
31 #include <asm/processor.h>
32 #include <asm/page.h>
33 #include <asm/parisc-device.h>
34 
35 /*
36 ** Debug options
37 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
38 */
39 #undef DEBUG_PAT
40 
42 
43 void __init setup_pdc(void)
44 {
45  long status;
46  unsigned int bus_id;
47  struct pdc_system_map_mod_info module_result;
48  struct pdc_module_path module_path;
49  struct pdc_model model;
50 #ifdef CONFIG_64BIT
51  struct pdc_pat_cell_num cell_info;
52 #endif
53 
54  /* Determine the pdc "type" used on this machine */
55 
56  printk(KERN_INFO "Determining PDC firmware type: ");
57 
58  status = pdc_system_map_find_mods(&module_result, &module_path, 0);
59  if (status == PDC_OK) {
61  printk("System Map.\n");
62  return;
63  }
64 
65  /*
66  * If the machine doesn't support PDC_SYSTEM_MAP then either it
67  * is a pdc pat box, or it is an older box. All 64 bit capable
68  * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
69  */
70 
71  /*
72  * TODO: We should test for 64 bit capability and give a
73  * clearer message.
74  */
75 
76 #ifdef CONFIG_64BIT
77  status = pdc_pat_cell_get_number(&cell_info);
78  if (status == PDC_OK) {
80  printk("64 bit PAT.\n");
81  return;
82  }
83 #endif
84 
85  /* Check the CPU's bus ID. There's probably a better test. */
86 
87  status = pdc_model_info(&model);
88 
89  bus_id = (model.hversion >> (4 + 7)) & 0x1f;
90 
91  switch (bus_id) {
92  case 0x4: /* 720, 730, 750, 735, 755 */
93  case 0x6: /* 705, 710 */
94  case 0x7: /* 715, 725 */
95  case 0x8: /* 745, 747, 742 */
96  case 0xA: /* 712 and similar */
97  case 0xC: /* 715/64, at least */
98 
100  printk("Snake.\n");
101  return;
102 
103  default: /* Everything else */
104 
105  printk("Unsupported.\n");
106  panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
107  }
108 }
109 
110 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
111 
112 static void __init
113 set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
114  unsigned long pages4k)
115 {
116  /* Rather than aligning and potentially throwing away
117  * memory, we'll assume that any ranges are already
118  * nicely aligned with any reasonable page size, and
119  * panic if they are not (it's more likely that the
120  * pdc info is bad in this case).
121  */
122 
123  if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
124  || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
125 
126  panic("Memory range doesn't align with page size!\n");
127  }
128 
129  pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
130  pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
131 }
132 
133 static void __init pagezero_memconfig(void)
134 {
135  unsigned long npages;
136 
137  /* Use the 32 bit information from page zero to create a single
138  * entry in the pmem_ranges[] table.
139  *
140  * We currently don't support machines with contiguous memory
141  * >= 4 Gb, who report that memory using 64 bit only fields
142  * on page zero. It's not worth doing until it can be tested,
143  * and it is not clear we can support those machines for other
144  * reasons.
145  *
146  * If that support is done in the future, this is where it
147  * should be done.
148  */
149 
150  npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
151  set_pmem_entry(pmem_ranges,0UL,npages);
152  npmem_ranges = 1;
153 }
154 
155 #ifdef CONFIG_64BIT
156 
157 /* All of the PDC PAT specific code is 64-bit only */
158 
159 /*
160 ** The module object is filled via PDC_PAT_CELL[Return Cell Module].
161 ** If a module is found, register module will get the IODC bytes via
162 ** pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
163 **
164 ** The IO view can be used by PDC_PAT_CELL[Return Cell Module]
165 ** only for SBAs and LBAs. This view will cause an invalid
166 ** argument error for all other cell module types.
167 **
168 */
169 
170 static int __init
171 pat_query_module(ulong pcell_loc, ulong mod_index)
172 {
173  pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
174  unsigned long bytecnt;
175  unsigned long temp; /* 64-bit scratch value */
176  long status; /* PDC return value status */
177  struct parisc_device *dev;
178 
179  pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
180  if (!pa_pdc_cell)
181  panic("couldn't allocate memory for PDC_PAT_CELL!");
182 
183  /* return cell module (PA or Processor view) */
184  status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
185  PA_VIEW, pa_pdc_cell);
186 
187  if (status != PDC_OK) {
188  /* no more cell modules or error */
189  return status;
190  }
191 
192  temp = pa_pdc_cell->cba;
193  dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
194  if (!dev) {
195  return PDC_OK;
196  }
197 
198  /* alloc_pa_dev sets dev->hpa */
199 
200  /*
201  ** save parameters in the parisc_device
202  ** (The idea being the device driver will call pdc_pat_cell_module()
203  ** and store the results in its own data structure.)
204  */
205  dev->pcell_loc = pcell_loc;
206  dev->mod_index = mod_index;
207 
208  /* save generic info returned from the call */
209  /* REVISIT: who is the consumer of this? not sure yet... */
210  dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
211  dev->pmod_loc = pa_pdc_cell->mod_location;
212 
213  register_parisc_device(dev); /* advertise device */
214 
215 #ifdef DEBUG_PAT
216  pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
217  /* dump what we see so far... */
218  switch (PAT_GET_ENTITY(dev->mod_info)) {
219  unsigned long i;
220 
221  case PAT_ENTITY_PROC:
222  printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
223  pa_pdc_cell->mod[0]);
224  break;
225 
226  case PAT_ENTITY_MEM:
228  "PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
229  pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
230  pa_pdc_cell->mod[2]);
231  break;
232  case PAT_ENTITY_CA:
233  printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
234  break;
235 
236  case PAT_ENTITY_PBC:
237  printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
238  goto print_ranges;
239 
240  case PAT_ENTITY_SBA:
241  printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
242  goto print_ranges;
243 
244  case PAT_ENTITY_LBA:
245  printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
246 
247  print_ranges:
248  pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
249  IO_VIEW, &io_pdc_cell);
250  printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
251  for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
253  " PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
254  i, pa_pdc_cell->mod[2 + i * 3], /* type */
255  pa_pdc_cell->mod[3 + i * 3], /* start */
256  pa_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
258  " IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
259  i, io_pdc_cell->mod[2 + i * 3], /* type */
260  io_pdc_cell->mod[3 + i * 3], /* start */
261  io_pdc_cell->mod[4 + i * 3]); /* finish (ie end) */
262  }
263  printk(KERN_DEBUG "\n");
264  break;
265  }
266 #endif /* DEBUG_PAT */
267 
268  kfree(pa_pdc_cell);
269 
270  return PDC_OK;
271 }
272 
273 
274 /* pat pdc can return information about a variety of different
275  * types of memory (e.g. firmware,i/o, etc) but we only care about
276  * the usable physical ram right now. Since the firmware specific
277  * information is allocated on the stack, we'll be generous, in
278  * case there is a lot of other information we don't care about.
279  */
280 
281 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
282 
283 static void __init pat_memconfig(void)
284 {
285  unsigned long actual_len;
286  struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
287  struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
288  physmem_range_t *pmem_ptr;
289  long status;
290  int entries;
291  unsigned long length;
292  int i;
293 
294  length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
295 
296  status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
297 
298  if ((status != PDC_OK)
299  || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
300 
301  /* The above pdc call shouldn't fail, but, just in
302  * case, just use the PAGE0 info.
303  */
304 
305  printk("\n\n\n");
306  printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
307  "All memory may not be used!\n\n\n");
308  pagezero_memconfig();
309  return;
310  }
311 
312  entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
313 
314  if (entries > PAT_MAX_RANGES) {
315  printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
316  printk(KERN_WARNING "Some memory may not be used!\n");
317  }
318 
319  /* Copy information into the firmware independent pmem_ranges
320  * array, skipping types we don't care about. Notice we said
321  * "may" above. We'll use all the entries that were returned.
322  */
323 
324  npmem_ranges = 0;
325  mtbl_ptr = mem_table;
326  pmem_ptr = pmem_ranges; /* Global firmware independent table */
327  for (i = 0; i < entries; i++,mtbl_ptr++) {
328  if ( (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
329  || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
330  || (mtbl_ptr->pages == 0)
331  || ( (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
332  && (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
333  && (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
334 
335  continue;
336  }
337 
338  if (npmem_ranges == MAX_PHYSMEM_RANGES) {
339  printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
340  printk(KERN_WARNING "Some memory will not be used!\n");
341  break;
342  }
343 
344  set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
345  npmem_ranges++;
346  }
347 }
348 
349 static int __init pat_inventory(void)
350 {
351  int status;
352  ulong mod_index = 0;
353  struct pdc_pat_cell_num cell_info;
354 
355  /*
356  ** Note: Prelude (and it's successors: Lclass, A400/500) only
357  ** implement PDC_PAT_CELL sub-options 0 and 2.
358  */
359  status = pdc_pat_cell_get_number(&cell_info);
360  if (status != PDC_OK) {
361  return 0;
362  }
363 
364 #ifdef DEBUG_PAT
365  printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
366  cell_info.cell_loc);
367 #endif
368 
369  while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
370  mod_index++;
371  }
372 
373  return mod_index;
374 }
375 
376 /* We only look for extended memory ranges on a 64 bit capable box */
377 static void __init sprockets_memconfig(void)
378 {
379  struct pdc_memory_table_raddr r_addr;
380  struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
381  struct pdc_memory_table *mtbl_ptr;
382  physmem_range_t *pmem_ptr;
383  long status;
384  int entries;
385  int i;
386 
387  status = pdc_mem_mem_table(&r_addr,mem_table,
388  (unsigned long)MAX_PHYSMEM_RANGES);
389 
390  if (status != PDC_OK) {
391 
392  /* The above pdc call only works on boxes with sprockets
393  * firmware (newer B,C,J class). Other non PAT PDC machines
394  * do support more than 3.75 Gb of memory, but we don't
395  * support them yet.
396  */
397 
398  pagezero_memconfig();
399  return;
400  }
401 
402  if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
403  printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
404  printk(KERN_WARNING "Some memory will not be used!\n");
405  }
406 
407  entries = (int)r_addr.entries_returned;
408 
409  npmem_ranges = 0;
410  mtbl_ptr = mem_table;
411  pmem_ptr = pmem_ranges; /* Global firmware independent table */
412  for (i = 0; i < entries; i++,mtbl_ptr++) {
413  set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
414  npmem_ranges++;
415  }
416 }
417 
418 #else /* !CONFIG_64BIT */
419 
420 #define pat_inventory() do { } while (0)
421 #define pat_memconfig() do { } while (0)
422 #define sprockets_memconfig() pagezero_memconfig()
423 
424 #endif /* !CONFIG_64BIT */
425 
426 
427 #ifndef CONFIG_PA20
428 
429 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
430 
431 static struct parisc_device * __init
432 legacy_create_device(struct pdc_memory_map *r_addr,
433  struct pdc_module_path *module_path)
434 {
435  struct parisc_device *dev;
436  int status = pdc_mem_map_hpa(r_addr, module_path);
437  if (status != PDC_OK)
438  return NULL;
439 
440  dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
441  if (dev == NULL)
442  return NULL;
443 
445  return dev;
446 }
447 
457 static void __init snake_inventory(void)
458 {
459  int mod;
460  for (mod = 0; mod < 16; mod++) {
461  struct parisc_device *dev;
462  struct pdc_module_path module_path;
463  struct pdc_memory_map r_addr;
464  unsigned int func;
465 
466  memset(module_path.path.bc, 0xff, 6);
467  module_path.path.mod = mod;
468  dev = legacy_create_device(&r_addr, &module_path);
469  if ((!dev) || (dev->id.hw_type != HPHW_BA))
470  continue;
471 
472  memset(module_path.path.bc, 0xff, 4);
473  module_path.path.bc[4] = mod;
474 
475  for (func = 0; func < 16; func++) {
476  module_path.path.bc[5] = 0;
477  module_path.path.mod = func;
478  legacy_create_device(&r_addr, &module_path);
479  }
480  }
481 }
482 
483 #else /* CONFIG_PA20 */
484 #define snake_inventory() do { } while (0)
485 #endif /* CONFIG_PA20 */
486 
487 /* Common 32/64 bit based code goes here */
488 
498 static void __init
499 add_system_map_addresses(struct parisc_device *dev, int num_addrs,
500  int module_instance)
501 {
502  int i;
503  long status;
504  struct pdc_system_map_addr_info addr_result;
505 
506  dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL);
507  if(!dev->addr) {
508  printk(KERN_ERR "%s %s(): memory allocation failure\n",
509  __FILE__, __func__);
510  return;
511  }
512 
513  for(i = 1; i <= num_addrs; ++i) {
514  status = pdc_system_map_find_addrs(&addr_result,
515  module_instance, i);
516  if(PDC_OK == status) {
517  dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
518  dev->num_addrs++;
519  } else {
521  "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
522  status, i);
523  }
524  }
525 }
526 
533 static void __init system_map_inventory(void)
534 {
535  int i;
536  long status = PDC_OK;
537 
538  for (i = 0; i < 256; i++) {
539  struct parisc_device *dev;
540  struct pdc_system_map_mod_info module_result;
541  struct pdc_module_path module_path;
542 
543  status = pdc_system_map_find_mods(&module_result,
544  &module_path, i);
545  if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
546  break;
547  if (status != PDC_OK)
548  continue;
549 
550  dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
551  if (!dev)
552  continue;
553 
555 
556  /* if available, get the additional addresses for a module */
557  if (!module_result.add_addrs)
558  continue;
559 
560  add_system_map_addresses(dev, module_result.add_addrs, i);
561  }
562 
564  return;
565 }
566 
568 {
569  switch (pdc_type) {
570 
571  case PDC_TYPE_PAT:
572  pat_memconfig();
573  break;
574 
575  case PDC_TYPE_SYSTEM_MAP:
577  break;
578 
579  case PDC_TYPE_SNAKE:
580  pagezero_memconfig();
581  return;
582 
583  default:
584  panic("Unknown PDC type!\n");
585  }
586 
587  if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
588  printk(KERN_WARNING "Bad memory configuration returned!\n");
589  printk(KERN_WARNING "Some memory may not be used!\n");
590  pagezero_memconfig();
591  }
592 }
593 
595 {
596  printk(KERN_INFO "Searching for devices...\n");
597 
598  init_parisc_bus();
599 
600  switch (pdc_type) {
601 
602  case PDC_TYPE_PAT:
603  pat_inventory();
604  break;
605 
606  case PDC_TYPE_SYSTEM_MAP:
607  system_map_inventory();
608  break;
609 
610  case PDC_TYPE_SNAKE:
611  snake_inventory();
612  break;
613 
614  default:
615  panic("Unknown PDC type!\n");
616  }
617  printk(KERN_INFO "Found devices:\n");
619 }