Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sclp_cmd.c
Go to the documentation of this file.
1 /*
2  * Copyright IBM Corp. 2007, 2009
3  *
4  * Author(s): Heiko Carstens <[email protected]>,
5  * Peter Oberparleiter <[email protected]>
6  */
7 
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/mm.h>
18 #include <linux/mmzone.h>
19 #include <linux/memory.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <asm/chpid.h>
23 #include <asm/sclp.h>
24 #include <asm/setup.h>
25 #include <asm/ctl_reg.h>
26 
27 #include "sclp.h"
28 
29 #define SCLP_CMDW_READ_SCP_INFO 0x00020001
30 #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001
31 
33  struct sccb_header header; /* 0-7 */
34  u16 rnmax; /* 8-9 */
35  u8 rnsize; /* 10 */
36  u8 _reserved0[24 - 11]; /* 11-15 */
37  u8 loadparm[8]; /* 24-31 */
38  u8 _reserved1[48 - 32]; /* 32-47 */
39  u64 facilities; /* 48-55 */
40  u8 _reserved2[84 - 56]; /* 56-83 */
41  u8 fac84; /* 84 */
42  u8 fac85; /* 85 */
43  u8 _reserved3[91 - 86]; /* 86-90 */
44  u8 flags; /* 91 */
45  u8 _reserved4[100 - 92]; /* 92-99 */
46  u32 rnsize2; /* 100-103 */
47  u64 rnmax2; /* 104-111 */
48  u8 _reserved5[4096 - 112]; /* 112-4095 */
49 } __attribute__((packed, aligned(PAGE_SIZE)));
50 
51 static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE);
52 static struct read_info_sccb __initdata early_read_info_sccb;
53 static int __initdata early_read_info_sccb_valid;
54 
56 static u8 sclp_fac84;
57 static u8 sclp_fac85;
58 static unsigned long long rzm;
59 static unsigned long long rnmax;
60 
61 static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
62 {
63  int rc;
64 
66  rc = sclp_service_call(cmd, sccb);
67  if (rc)
68  goto out;
69  __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
72 out:
73  /* Contents of the sccb might have changed. */
74  barrier();
75  __ctl_clear_bit(0, 9);
76  return rc;
77 }
78 
79 static void __init sclp_read_info_early(void)
80 {
81  int rc;
82  int i;
83  struct read_info_sccb *sccb;
86 
87  sccb = &early_read_info_sccb;
88  for (i = 0; i < ARRAY_SIZE(commands); i++) {
89  do {
90  memset(sccb, 0, sizeof(*sccb));
91  sccb->header.length = sizeof(*sccb);
92  sccb->header.function_code = 0x80;
93  sccb->header.control_mask[2] = 0x80;
94  rc = sclp_cmd_sync_early(commands[i], sccb);
95  } while (rc == -EBUSY);
96 
97  if (rc)
98  break;
99  if (sccb->header.response_code == 0x10) {
100  early_read_info_sccb_valid = 1;
101  break;
102  }
103  if (sccb->header.response_code != 0x1f0)
104  break;
105  }
106 }
107 
108 static void __init sclp_event_mask_early(void)
109 {
110  struct init_sccb *sccb = &early_event_mask_sccb;
111  int rc;
112 
113  do {
114  memset(sccb, 0, sizeof(*sccb));
115  sccb->header.length = sizeof(*sccb);
116  sccb->mask_length = sizeof(sccb_mask_t);
117  rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
118  } while (rc == -EBUSY);
119 }
120 
122 {
123  struct read_info_sccb *sccb;
124 
125  sclp_read_info_early();
126  if (!early_read_info_sccb_valid)
127  return;
128 
129  sccb = &early_read_info_sccb;
130  sclp_facilities = sccb->facilities;
131  sclp_fac84 = sccb->fac84;
132  sclp_fac85 = sccb->fac85;
133  rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
134  rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
135  rzm <<= 20;
136 
137  sclp_event_mask_early();
138 }
139 
141 {
142  struct init_sccb *sccb = &early_event_mask_sccb;
143 
144  if (sccb->header.response_code != 0x20)
145  return 0;
147  return 1;
148  return 0;
149 }
150 
152 {
153  struct init_sccb *sccb = &early_event_mask_sccb;
154 
155  if (sccb->header.response_code != 0x20)
156  return 0;
158  return 1;
159  return 0;
160 }
161 
162 unsigned long long sclp_get_rnmax(void)
163 {
164  return rnmax;
165 }
166 
167 unsigned long long sclp_get_rzm(void)
168 {
169  return rzm;
170 }
171 
173 {
174  return sclp_fac85;
175 }
177 
178 /*
179  * This function will be called after sclp_facilities_detect(), which gets
180  * called from early.c code. Therefore the sccb should have valid contents.
181  */
183 {
184  struct read_info_sccb *sccb;
185 
186  if (!early_read_info_sccb_valid)
187  return;
188  sccb = &early_read_info_sccb;
189  info->is_valid = 1;
190  if (sccb->flags & 0x2)
191  info->has_dump = 1;
192  memcpy(&info->loadparm, &sccb->loadparm, LOADPARM_LEN);
193 }
194 
195 static void sclp_sync_callback(struct sclp_req *req, void *data)
196 {
197  struct completion *completion = data;
198 
199  complete(completion);
200 }
201 
202 static int do_sync_request(sclp_cmdw_t cmd, void *sccb)
203 {
204  struct completion completion;
205  struct sclp_req *request;
206  int rc;
207 
208  request = kzalloc(sizeof(*request), GFP_KERNEL);
209  if (!request)
210  return -ENOMEM;
211  request->command = cmd;
212  request->sccb = sccb;
213  request->status = SCLP_REQ_FILLED;
214  request->callback = sclp_sync_callback;
215  request->callback_data = &completion;
216  init_completion(&completion);
217 
218  /* Perform sclp request. */
219  rc = sclp_add_request(request);
220  if (rc)
221  goto out;
223 
224  /* Check response. */
225  if (request->status != SCLP_REQ_DONE) {
226  pr_warning("sync request failed (cmd=0x%08x, "
227  "status=0x%02x)\n", cmd, request->status);
228  rc = -EIO;
229  }
230 out:
231  kfree(request);
232  return rc;
233 }
234 
235 /*
236  * CPU configuration related functions.
237  */
238 
239 #define SCLP_CMDW_READ_CPU_INFO 0x00010001
240 #define SCLP_CMDW_CONFIGURE_CPU 0x00110001
241 #define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
242 
249  u8 reserved[4096 - 16];
250 } __attribute__((packed, aligned(PAGE_SIZE)));
251 
252 static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
253  struct read_cpu_info_sccb *sccb)
254 {
255  char *page = (char *) sccb;
257  memset(info, 0, sizeof(*info));
258  info->configured = sccb->nr_configured;
259  info->standby = sccb->nr_standby;
260  info->combined = sccb->nr_configured + sccb->nr_standby;
261  info->has_cpu_type = sclp_fac84 & 0x1;
262  memcpy(&info->cpu, page + sccb->offset_configured,
263  info->combined * sizeof(struct sclp_cpu_entry));
264 }
265 
267 {
268  int rc;
269  struct read_cpu_info_sccb *sccb;
270 
271  if (!SCLP_HAS_CPU_INFO)
272  return -EOPNOTSUPP;
273  sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
274  if (!sccb)
275  return -ENOMEM;
276  sccb->header.length = sizeof(*sccb);
277  rc = do_sync_request(SCLP_CMDW_READ_CPU_INFO, sccb);
278  if (rc)
279  goto out;
280  if (sccb->header.response_code != 0x0010) {
281  pr_warning("readcpuinfo failed (response=0x%04x)\n",
282  sccb->header.response_code);
283  rc = -EIO;
284  goto out;
285  }
286  sclp_fill_cpu_info(info, sccb);
287 out:
288  free_page((unsigned long) sccb);
289  return rc;
290 }
291 
294 } __attribute__((packed, aligned(8)));
295 
296 static int do_cpu_configure(sclp_cmdw_t cmd)
297 {
298  struct cpu_configure_sccb *sccb;
299  int rc;
300 
302  return -EOPNOTSUPP;
303  /*
304  * This is not going to cross a page boundary since we force
305  * kmalloc to have a minimum alignment of 8 bytes on s390.
306  */
307  sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
308  if (!sccb)
309  return -ENOMEM;
310  sccb->header.length = sizeof(*sccb);
311  rc = do_sync_request(cmd, sccb);
312  if (rc)
313  goto out;
314  switch (sccb->header.response_code) {
315  case 0x0020:
316  case 0x0120:
317  break;
318  default:
319  pr_warning("configure cpu failed (cmd=0x%08x, "
320  "response=0x%04x)\n", cmd,
321  sccb->header.response_code);
322  rc = -EIO;
323  break;
324  }
325 out:
326  kfree(sccb);
327  return rc;
328 }
329 
331 {
332  return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
333 }
334 
336 {
337  return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
338 }
339 
340 #ifdef CONFIG_MEMORY_HOTPLUG
341 
342 static DEFINE_MUTEX(sclp_mem_mutex);
343 static LIST_HEAD(sclp_mem_list);
344 static u8 sclp_max_storage_id;
345 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
346 static int sclp_mem_state_changed;
347 
348 struct memory_increment {
349  struct list_head list;
350  u16 rn;
351  int standby;
352  int usecount;
353 };
354 
355 struct assign_storage_sccb {
356  struct sccb_header header;
357  u16 rn;
358 } __packed;
359 
360 int arch_get_memory_phys_device(unsigned long start_pfn)
361 {
362  if (!rzm)
363  return 0;
364  return PFN_PHYS(start_pfn) >> ilog2(rzm);
365 }
366 
367 static unsigned long long rn2addr(u16 rn)
368 {
369  return (unsigned long long) (rn - 1) * rzm;
370 }
371 
372 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
373 {
374  struct assign_storage_sccb *sccb;
375  int rc;
376 
377  sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
378  if (!sccb)
379  return -ENOMEM;
380  sccb->header.length = PAGE_SIZE;
381  sccb->rn = rn;
382  rc = do_sync_request(cmd, sccb);
383  if (rc)
384  goto out;
385  switch (sccb->header.response_code) {
386  case 0x0020:
387  case 0x0120:
388  break;
389  default:
390  pr_warning("assign storage failed (cmd=0x%08x, "
391  "response=0x%04x, rn=0x%04x)\n", cmd,
392  sccb->header.response_code, rn);
393  rc = -EIO;
394  break;
395  }
396 out:
397  free_page((unsigned long) sccb);
398  return rc;
399 }
400 
401 static int sclp_assign_storage(u16 rn)
402 {
403  unsigned long long start, address;
404  int rc;
405 
406  rc = do_assign_storage(0x000d0001, rn);
407  if (rc)
408  goto out;
409  start = address = rn2addr(rn);
410  for (; address < start + rzm; address += PAGE_SIZE)
411  page_set_storage_key(address, PAGE_DEFAULT_KEY, 0);
412 out:
413  return rc;
414 }
415 
416 static int sclp_unassign_storage(u16 rn)
417 {
418  return do_assign_storage(0x000c0001, rn);
419 }
420 
421 struct attach_storage_sccb {
422  struct sccb_header header;
423  u16 :16;
424  u16 assigned;
425  u32 :32;
426  u32 entries[0];
427 } __packed;
428 
429 static int sclp_attach_storage(u8 id)
430 {
431  struct attach_storage_sccb *sccb;
432  int rc;
433  int i;
434 
435  sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
436  if (!sccb)
437  return -ENOMEM;
438  sccb->header.length = PAGE_SIZE;
439  rc = do_sync_request(0x00080001 | id << 8, sccb);
440  if (rc)
441  goto out;
442  switch (sccb->header.response_code) {
443  case 0x0020:
444  set_bit(id, sclp_storage_ids);
445  for (i = 0; i < sccb->assigned; i++) {
446  if (sccb->entries[i])
447  sclp_unassign_storage(sccb->entries[i] >> 16);
448  }
449  break;
450  default:
451  rc = -EIO;
452  break;
453  }
454 out:
455  free_page((unsigned long) sccb);
456  return rc;
457 }
458 
459 static int sclp_mem_change_state(unsigned long start, unsigned long size,
460  int online)
461 {
462  struct memory_increment *incr;
463  unsigned long long istart;
464  int rc = 0;
465 
466  list_for_each_entry(incr, &sclp_mem_list, list) {
467  istart = rn2addr(incr->rn);
468  if (start + size - 1 < istart)
469  break;
470  if (start > istart + rzm - 1)
471  continue;
472  if (online) {
473  if (incr->usecount++)
474  continue;
475  /*
476  * Don't break the loop if one assign fails. Loop may
477  * be walked again on CANCEL and we can't save
478  * information if state changed before or not.
479  * So continue and increase usecount for all increments.
480  */
481  rc |= sclp_assign_storage(incr->rn);
482  } else {
483  if (--incr->usecount)
484  continue;
485  sclp_unassign_storage(incr->rn);
486  }
487  }
488  return rc ? -EIO : 0;
489 }
490 
491 static int sclp_mem_notifier(struct notifier_block *nb,
492  unsigned long action, void *data)
493 {
494  unsigned long start, size;
495  struct memory_notify *arg;
496  unsigned char id;
497  int rc = 0;
498 
499  arg = data;
500  start = arg->start_pfn << PAGE_SHIFT;
501  size = arg->nr_pages << PAGE_SHIFT;
502  mutex_lock(&sclp_mem_mutex);
503  for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
504  sclp_attach_storage(id);
505  switch (action) {
506  case MEM_ONLINE:
507  case MEM_GOING_OFFLINE:
508  case MEM_CANCEL_OFFLINE:
509  break;
510  case MEM_GOING_ONLINE:
511  rc = sclp_mem_change_state(start, size, 1);
512  break;
513  case MEM_CANCEL_ONLINE:
514  sclp_mem_change_state(start, size, 0);
515  break;
516  case MEM_OFFLINE:
517  sclp_mem_change_state(start, size, 0);
518  break;
519  default:
520  rc = -EINVAL;
521  break;
522  }
523  if (!rc)
524  sclp_mem_state_changed = 1;
525  mutex_unlock(&sclp_mem_mutex);
526  return rc ? NOTIFY_BAD : NOTIFY_OK;
527 }
528 
529 static struct notifier_block sclp_mem_nb = {
530  .notifier_call = sclp_mem_notifier,
531 };
532 
533 static void __init add_memory_merged(u16 rn)
534 {
535  static u16 first_rn, num;
536  unsigned long long start, size;
537 
538  if (rn && first_rn && (first_rn + num == rn)) {
539  num++;
540  return;
541  }
542  if (!first_rn)
543  goto skip_add;
544  start = rn2addr(first_rn);
545  size = (unsigned long long ) num * rzm;
546  if (start >= VMEM_MAX_PHYS)
547  goto skip_add;
548  if (start + size > VMEM_MAX_PHYS)
549  size = VMEM_MAX_PHYS - start;
550  if (memory_end_set && (start >= memory_end))
551  goto skip_add;
552  if (memory_end_set && (start + size > memory_end))
553  size = memory_end - start;
554  add_memory(0, start, size);
555 skip_add:
556  first_rn = rn;
557  num = 1;
558 }
559 
560 static void __init sclp_add_standby_memory(void)
561 {
562  struct memory_increment *incr;
563 
564  list_for_each_entry(incr, &sclp_mem_list, list)
565  if (incr->standby)
566  add_memory_merged(incr->rn);
567  add_memory_merged(0);
568 }
569 
570 static void __init insert_increment(u16 rn, int standby, int assigned)
571 {
572  struct memory_increment *incr, *new_incr;
573  struct list_head *prev;
574  u16 last_rn;
575 
576  new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
577  if (!new_incr)
578  return;
579  new_incr->rn = rn;
580  new_incr->standby = standby;
581  if (!standby)
582  new_incr->usecount = 1;
583  last_rn = 0;
584  prev = &sclp_mem_list;
585  list_for_each_entry(incr, &sclp_mem_list, list) {
586  if (assigned && incr->rn > rn)
587  break;
588  if (!assigned && incr->rn - last_rn > 1)
589  break;
590  last_rn = incr->rn;
591  prev = &incr->list;
592  }
593  if (!assigned)
594  new_incr->rn = last_rn + 1;
595  if (new_incr->rn > rnmax) {
596  kfree(new_incr);
597  return;
598  }
599  list_add(&new_incr->list, prev);
600 }
601 
602 static int sclp_mem_freeze(struct device *dev)
603 {
604  if (!sclp_mem_state_changed)
605  return 0;
606  pr_err("Memory hotplug state changed, suspend refused.\n");
607  return -EPERM;
608 }
609 
610 struct read_storage_sccb {
611  struct sccb_header header;
612  u16 max_id;
613  u16 assigned;
614  u16 standby;
615  u16 :16;
616  u32 entries[0];
617 } __packed;
618 
619 static const struct dev_pm_ops sclp_mem_pm_ops = {
620  .freeze = sclp_mem_freeze,
621 };
622 
623 static struct platform_driver sclp_mem_pdrv = {
624  .driver = {
625  .name = "sclp_mem",
626  .pm = &sclp_mem_pm_ops,
627  },
628 };
629 
630 static int __init sclp_detect_standby_memory(void)
631 {
632  struct platform_device *sclp_pdev;
633  struct read_storage_sccb *sccb;
634  int i, id, assigned, rc;
635 
636  if (!early_read_info_sccb_valid)
637  return 0;
638  if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
639  return 0;
640  rc = -ENOMEM;
641  sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
642  if (!sccb)
643  goto out;
644  assigned = 0;
645  for (id = 0; id <= sclp_max_storage_id; id++) {
646  memset(sccb, 0, PAGE_SIZE);
647  sccb->header.length = PAGE_SIZE;
648  rc = do_sync_request(0x00040001 | id << 8, sccb);
649  if (rc)
650  goto out;
651  switch (sccb->header.response_code) {
652  case 0x0010:
653  set_bit(id, sclp_storage_ids);
654  for (i = 0; i < sccb->assigned; i++) {
655  if (!sccb->entries[i])
656  continue;
657  assigned++;
658  insert_increment(sccb->entries[i] >> 16, 0, 1);
659  }
660  break;
661  case 0x0310:
662  break;
663  case 0x0410:
664  for (i = 0; i < sccb->assigned; i++) {
665  if (!sccb->entries[i])
666  continue;
667  assigned++;
668  insert_increment(sccb->entries[i] >> 16, 1, 1);
669  }
670  break;
671  default:
672  rc = -EIO;
673  break;
674  }
675  if (!rc)
676  sclp_max_storage_id = sccb->max_id;
677  }
678  if (rc || list_empty(&sclp_mem_list))
679  goto out;
680  for (i = 1; i <= rnmax - assigned; i++)
681  insert_increment(0, 1, 0);
682  rc = register_memory_notifier(&sclp_mem_nb);
683  if (rc)
684  goto out;
685  rc = platform_driver_register(&sclp_mem_pdrv);
686  if (rc)
687  goto out;
688  sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
689  rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
690  if (rc)
691  goto out_driver;
692  sclp_add_standby_memory();
693  goto out;
694 out_driver:
695  platform_driver_unregister(&sclp_mem_pdrv);
696 out:
697  free_page((unsigned long) sccb);
698  return rc;
699 }
700 __initcall(sclp_detect_standby_memory);
701 
702 #endif /* CONFIG_MEMORY_HOTPLUG */
703 
704 /*
705  * Channel path configuration related functions.
706  */
707 
708 #define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
709 #define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
710 #define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
711 
712 struct chp_cfg_sccb {
717 } __attribute__((packed));
718 
719 static int do_chp_configure(sclp_cmdw_t cmd)
720 {
721  struct chp_cfg_sccb *sccb;
722  int rc;
723 
725  return -EOPNOTSUPP;
726  /* Prepare sccb. */
727  sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
728  if (!sccb)
729  return -ENOMEM;
730  sccb->header.length = sizeof(*sccb);
731  rc = do_sync_request(cmd, sccb);
732  if (rc)
733  goto out;
734  switch (sccb->header.response_code) {
735  case 0x0020:
736  case 0x0120:
737  case 0x0440:
738  case 0x0450:
739  break;
740  default:
741  pr_warning("configure channel-path failed "
742  "(cmd=0x%08x, response=0x%04x)\n", cmd,
743  sccb->header.response_code);
744  rc = -EIO;
745  break;
746  }
747 out:
748  free_page((unsigned long) sccb);
749  return rc;
750 }
751 
760 {
761  return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
762 }
763 
772 {
773  return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
774 }
775 
784 } __attribute__((packed));
785 
795 {
796  struct chp_info_sccb *sccb;
797  int rc;
798 
799  if (!SCLP_HAS_CHP_INFO)
800  return -EOPNOTSUPP;
801  /* Prepare sccb. */
802  sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
803  if (!sccb)
804  return -ENOMEM;
805  sccb->header.length = sizeof(*sccb);
806  rc = do_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
807  if (rc)
808  goto out;
809  if (sccb->header.response_code != 0x0010) {
810  pr_warning("read channel-path info failed "
811  "(response=0x%04x)\n", sccb->header.response_code);
812  rc = -EIO;
813  goto out;
814  }
818 out:
819  free_page((unsigned long) sccb);
820  return rc;
821 }