Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
i2o_config.c
Go to the documentation of this file.
1 /*
2  * I2O Configuration Interface Driver
3  *
4  * (C) Copyright 1999-2002 Red Hat
5  *
6  * Written by Alan Cox, Building Number Three Ltd
7  *
8  * Fixes/additions:
9  * Deepak Saxena (04/20/1999):
10  * Added basic ioctl() support
11  * Deepak Saxena (06/07/1999):
12  * Added software download ioctl (still testing)
13  * Auvo Häkkinen (09/10/1999):
14  * Changes to i2o_cfg_reply(), ioctl_parms()
15  * Added ioct_validate()
16  * Taneli Vähäkangas (09/30/1999):
17  * Fixed ioctl_swdl()
18  * Taneli Vähäkangas (10/04/1999):
19  * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
20  * Deepak Saxena (11/18/1999):
21  * Added event managmenet support
22  * Alan Cox <[email protected]>:
23  * 2.4 rewrite ported to 2.5
24  * Markus Lidel <[email protected]>:
25  * Added pass-thru support for Adaptec's raidutils
26  *
27  * This program is free software; you can redistribute it and/or
28  * modify it under the terms of the GNU General Public License
29  * as published by the Free Software Foundation; either version
30  * 2 of the License, or (at your option) any later version.
31  */
32 
33 #include <linux/miscdevice.h>
34 #include <linux/mutex.h>
35 #include <linux/compat.h>
36 #include <linux/slab.h>
37 
38 #include <asm/uaccess.h>
39 
40 #include "core.h"
41 
42 #define SG_TABLESIZE 30
43 
44 static DEFINE_MUTEX(i2o_cfg_mutex);
45 static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long);
46 
47 static spinlock_t i2o_config_lock;
48 
49 #define MODINC(x,y) ((x) = ((x) + 1) % (y))
50 
54 };
55 
56 struct i2o_cfg_info {
57  struct file *fp;
60  u16 q_in; // Queue head index
61  u16 q_out; // Queue tail index
62  u16 q_len; // Queue length
63  u16 q_lost; // Number of lost events
64  ulong q_id; // Event queue ID...used as tx_context
65  struct i2o_cfg_info *next;
66 };
67 static struct i2o_cfg_info *open_files = NULL;
68 static ulong i2o_cfg_info_id = 0;
69 
70 static int i2o_cfg_getiops(unsigned long arg)
71 {
72  struct i2o_controller *c;
73  u8 __user *user_iop_table = (void __user *)arg;
75  int ret = 0;
76 
77  memset(tmp, 0, MAX_I2O_CONTROLLERS);
78 
80  tmp[c->unit] = 1;
81 
82  if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS))
83  ret = -EFAULT;
84 
85  return ret;
86 };
87 
88 static int i2o_cfg_gethrt(unsigned long arg)
89 {
90  struct i2o_controller *c;
91  struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
92  struct i2o_cmd_hrtlct kcmd;
93  i2o_hrt *hrt;
94  int len;
95  u32 reslen;
96  int ret = 0;
97 
98  if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
99  return -EFAULT;
100 
101  if (get_user(reslen, kcmd.reslen) < 0)
102  return -EFAULT;
103 
104  if (kcmd.resbuf == NULL)
105  return -EFAULT;
106 
107  c = i2o_find_iop(kcmd.iop);
108  if (!c)
109  return -ENXIO;
110 
111  hrt = (i2o_hrt *) c->hrt.virt;
112 
113  len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
114 
115  if (put_user(len, kcmd.reslen))
116  ret = -EFAULT;
117  else if (len > reslen)
118  ret = -ENOBUFS;
119  else if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
120  ret = -EFAULT;
121 
122  return ret;
123 };
124 
125 static int i2o_cfg_getlct(unsigned long arg)
126 {
127  struct i2o_controller *c;
128  struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
129  struct i2o_cmd_hrtlct kcmd;
130  i2o_lct *lct;
131  int len;
132  int ret = 0;
133  u32 reslen;
134 
135  if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
136  return -EFAULT;
137 
138  if (get_user(reslen, kcmd.reslen) < 0)
139  return -EFAULT;
140 
141  if (kcmd.resbuf == NULL)
142  return -EFAULT;
143 
144  c = i2o_find_iop(kcmd.iop);
145  if (!c)
146  return -ENXIO;
147 
148  lct = (i2o_lct *) c->lct;
149 
150  len = (unsigned int)lct->table_size << 2;
151  if (put_user(len, kcmd.reslen))
152  ret = -EFAULT;
153  else if (len > reslen)
154  ret = -ENOBUFS;
155  else if (copy_to_user(kcmd.resbuf, lct, len))
156  ret = -EFAULT;
157 
158  return ret;
159 };
160 
161 static int i2o_cfg_parms(unsigned long arg, unsigned int type)
162 {
163  int ret = 0;
164  struct i2o_controller *c;
165  struct i2o_device *dev;
166  struct i2o_cmd_psetget __user *cmd =
167  (struct i2o_cmd_psetget __user *)arg;
168  struct i2o_cmd_psetget kcmd;
169  u32 reslen;
170  u8 *ops;
171  u8 *res;
172  int len = 0;
173 
174  u32 i2o_cmd = (type == I2OPARMGET ?
176 
177  if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
178  return -EFAULT;
179 
180  if (get_user(reslen, kcmd.reslen))
181  return -EFAULT;
182 
183  c = i2o_find_iop(kcmd.iop);
184  if (!c)
185  return -ENXIO;
186 
187  dev = i2o_iop_find_device(c, kcmd.tid);
188  if (!dev)
189  return -ENXIO;
190 
191  /*
192  * Stop users being able to try and allocate arbitary amounts
193  * of DMA space. 64K is way more than sufficient for this.
194  */
195  if (kcmd.oplen > 65536)
196  return -EMSGSIZE;
197 
198  ops = memdup_user(kcmd.opbuf, kcmd.oplen);
199  if (IS_ERR(ops))
200  return PTR_ERR(ops);
201 
202  /*
203  * It's possible to have a _very_ large table
204  * and that the user asks for all of it at once...
205  */
206  res = kmalloc(65536, GFP_KERNEL);
207  if (!res) {
208  kfree(ops);
209  return -ENOMEM;
210  }
211 
212  len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536);
213  kfree(ops);
214 
215  if (len < 0) {
216  kfree(res);
217  return -EAGAIN;
218  }
219 
220  if (put_user(len, kcmd.reslen))
221  ret = -EFAULT;
222  else if (len > reslen)
223  ret = -ENOBUFS;
224  else if (copy_to_user(kcmd.resbuf, res, len))
225  ret = -EFAULT;
226 
227  kfree(res);
228 
229  return ret;
230 };
231 
232 static int i2o_cfg_swdl(unsigned long arg)
233 {
234  struct i2o_sw_xfer kxfer;
235  struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
236  unsigned char maxfrag = 0, curfrag = 1;
237  struct i2o_dma buffer;
238  struct i2o_message *msg;
239  unsigned int status = 0, swlen = 0, fragsize = 8192;
240  struct i2o_controller *c;
241 
242  if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
243  return -EFAULT;
244 
245  if (get_user(swlen, kxfer.swlen) < 0)
246  return -EFAULT;
247 
248  if (get_user(maxfrag, kxfer.maxfrag) < 0)
249  return -EFAULT;
250 
251  if (get_user(curfrag, kxfer.curfrag) < 0)
252  return -EFAULT;
253 
254  if (curfrag == maxfrag)
255  fragsize = swlen - (maxfrag - 1) * 8192;
256 
257  if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
258  return -EFAULT;
259 
260  c = i2o_find_iop(kxfer.iop);
261  if (!c)
262  return -ENXIO;
263 
265  if (IS_ERR(msg))
266  return PTR_ERR(msg);
267 
268  if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) {
269  i2o_msg_nop(c, msg);
270  return -ENOMEM;
271  }
272 
273  if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) {
274  i2o_msg_nop(c, msg);
275  i2o_dma_free(&c->pdev->dev, &buffer);
276  return -EFAULT;
277  }
278 
280  msg->u.head[1] =
282  ADAPTER_TID);
283  msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
284  msg->u.head[3] = cpu_to_le32(0);
285  msg->body[0] =
286  cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
287  sw_type) << 16) |
288  (((u32) maxfrag) << 8) | (((u32) curfrag)));
289  msg->body[1] = cpu_to_le32(swlen);
290  msg->body[2] = cpu_to_le32(kxfer.sw_id);
291  msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
292  msg->body[4] = cpu_to_le32(buffer.phys);
293 
294  osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
295  status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
296 
297  if (status != -ETIMEDOUT)
298  i2o_dma_free(&c->pdev->dev, &buffer);
299 
300  if (status != I2O_POST_WAIT_OK) {
301  // it fails if you try and send frags out of order
302  // and for some yet unknown reasons too
303  osm_info("swdl failed, DetailedStatus = %d\n", status);
304  return status;
305  }
306 
307  return 0;
308 };
309 
310 static int i2o_cfg_swul(unsigned long arg)
311 {
312  struct i2o_sw_xfer kxfer;
313  struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
314  unsigned char maxfrag = 0, curfrag = 1;
315  struct i2o_dma buffer;
316  struct i2o_message *msg;
317  unsigned int status = 0, swlen = 0, fragsize = 8192;
318  struct i2o_controller *c;
319  int ret = 0;
320 
321  if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
322  return -EFAULT;
323 
324  if (get_user(swlen, kxfer.swlen) < 0)
325  return -EFAULT;
326 
327  if (get_user(maxfrag, kxfer.maxfrag) < 0)
328  return -EFAULT;
329 
330  if (get_user(curfrag, kxfer.curfrag) < 0)
331  return -EFAULT;
332 
333  if (curfrag == maxfrag)
334  fragsize = swlen - (maxfrag - 1) * 8192;
335 
336  if (!kxfer.buf)
337  return -EFAULT;
338 
339  c = i2o_find_iop(kxfer.iop);
340  if (!c)
341  return -ENXIO;
342 
344  if (IS_ERR(msg))
345  return PTR_ERR(msg);
346 
347  if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) {
348  i2o_msg_nop(c, msg);
349  return -ENOMEM;
350  }
351 
353  msg->u.head[1] =
355  msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
356  msg->u.head[3] = cpu_to_le32(0);
357  msg->body[0] =
358  cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
359  sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
360  msg->body[1] = cpu_to_le32(swlen);
361  msg->body[2] = cpu_to_le32(kxfer.sw_id);
362  msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
363  msg->body[4] = cpu_to_le32(buffer.phys);
364 
365  osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
366  status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
367 
368  if (status != I2O_POST_WAIT_OK) {
369  if (status != -ETIMEDOUT)
370  i2o_dma_free(&c->pdev->dev, &buffer);
371 
372  osm_info("swul failed, DetailedStatus = %d\n", status);
373  return status;
374  }
375 
376  if (copy_to_user(kxfer.buf, buffer.virt, fragsize))
377  ret = -EFAULT;
378 
379  i2o_dma_free(&c->pdev->dev, &buffer);
380 
381  return ret;
382 }
383 
384 static int i2o_cfg_swdel(unsigned long arg)
385 {
386  struct i2o_controller *c;
387  struct i2o_sw_xfer kxfer;
388  struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
389  struct i2o_message *msg;
390  unsigned int swlen;
391  int token;
392 
393  if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
394  return -EFAULT;
395 
396  if (get_user(swlen, kxfer.swlen) < 0)
397  return -EFAULT;
398 
399  c = i2o_find_iop(kxfer.iop);
400  if (!c)
401  return -ENXIO;
402 
404  if (IS_ERR(msg))
405  return PTR_ERR(msg);
406 
408  msg->u.head[1] =
410  msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
411  msg->u.head[3] = cpu_to_le32(0);
412  msg->body[0] =
413  cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
414  msg->body[1] = cpu_to_le32(swlen);
415  msg->body[2] = cpu_to_le32(kxfer.sw_id);
416 
417  token = i2o_msg_post_wait(c, msg, 10);
418 
419  if (token != I2O_POST_WAIT_OK) {
420  osm_info("swdel failed, DetailedStatus = %d\n", token);
421  return -ETIMEDOUT;
422  }
423 
424  return 0;
425 };
426 
427 static int i2o_cfg_validate(unsigned long arg)
428 {
429  int token;
430  int iop = (int)arg;
431  struct i2o_message *msg;
432  struct i2o_controller *c;
433 
434  c = i2o_find_iop(iop);
435  if (!c)
436  return -ENXIO;
437 
439  if (IS_ERR(msg))
440  return PTR_ERR(msg);
441 
443  msg->u.head[1] =
444  cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
445  msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
446  msg->u.head[3] = cpu_to_le32(0);
447 
448  token = i2o_msg_post_wait(c, msg, 10);
449 
450  if (token != I2O_POST_WAIT_OK) {
451  osm_info("Can't validate configuration, ErrorStatus = %d\n",
452  token);
453  return -ETIMEDOUT;
454  }
455 
456  return 0;
457 };
458 
459 static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
460 {
461  struct i2o_message *msg;
462  struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
463  struct i2o_evt_id kdesc;
464  struct i2o_controller *c;
465  struct i2o_device *d;
466 
467  if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
468  return -EFAULT;
469 
470  /* IOP exists? */
471  c = i2o_find_iop(kdesc.iop);
472  if (!c)
473  return -ENXIO;
474 
475  /* Device exists? */
476  d = i2o_iop_find_device(c, kdesc.tid);
477  if (!d)
478  return -ENODEV;
479 
481  if (IS_ERR(msg))
482  return PTR_ERR(msg);
483 
485  msg->u.head[1] =
487  kdesc.tid);
488  msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
489  msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
490  msg->body[0] = cpu_to_le32(kdesc.evt_mask);
491 
492  i2o_msg_post(c, msg);
493 
494  return 0;
495 }
496 
497 static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
498 {
499  struct i2o_cfg_info *p = NULL;
500  struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg;
501  struct i2o_evt_get kget;
502  unsigned long flags;
503 
504  for (p = open_files; p; p = p->next)
505  if (p->q_id == (ulong) fp->private_data)
506  break;
507 
508  if (!p->q_len)
509  return -ENOENT;
510 
511  memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
513  spin_lock_irqsave(&i2o_config_lock, flags);
514  p->q_len--;
515  kget.pending = p->q_len;
516  kget.lost = p->q_lost;
517  spin_unlock_irqrestore(&i2o_config_lock, flags);
518 
519  if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
520  return -EFAULT;
521  return 0;
522 }
523 
524 #ifdef CONFIG_COMPAT
525 static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
526  unsigned long arg)
527 {
528  struct i2o_cmd_passthru32 __user *cmd;
529  struct i2o_controller *c;
530  u32 __user *user_msg;
531  u32 *reply = NULL;
532  u32 __user *user_reply = NULL;
533  u32 size = 0;
534  u32 reply_size = 0;
535  u32 rcode = 0;
536  struct i2o_dma sg_list[SG_TABLESIZE];
537  u32 sg_offset = 0;
538  u32 sg_count = 0;
539  u32 i = 0;
540  u32 sg_index = 0;
542  struct i2o_message *msg;
543  unsigned int iop;
544 
545  cmd = (struct i2o_cmd_passthru32 __user *)arg;
546 
547  if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg))
548  return -EFAULT;
549 
550  user_msg = compat_ptr(i);
551 
552  c = i2o_find_iop(iop);
553  if (!c) {
554  osm_debug("controller %d not found\n", iop);
555  return -ENXIO;
556  }
557 
558  sb = c->status_block.virt;
559 
560  if (get_user(size, &user_msg[0])) {
561  osm_warn("unable to get size!\n");
562  return -EFAULT;
563  }
564  size = size >> 16;
565 
566  if (size > sb->inbound_frame_size) {
567  osm_warn("size of message > inbound_frame_size");
568  return -EFAULT;
569  }
570 
571  user_reply = &user_msg[size];
572 
573  size <<= 2; // Convert to bytes
574 
576  if (IS_ERR(msg))
577  return PTR_ERR(msg);
578 
579  rcode = -EFAULT;
580  /* Copy in the user's I2O command */
581  if (copy_from_user(msg, user_msg, size)) {
582  osm_warn("unable to copy user message\n");
583  goto out;
584  }
585  i2o_dump_message(msg);
586 
587  if (get_user(reply_size, &user_reply[0]) < 0)
588  goto out;
589 
590  reply_size >>= 16;
591  reply_size <<= 2;
592 
593  rcode = -ENOMEM;
594  reply = kzalloc(reply_size, GFP_KERNEL);
595  if (!reply) {
596  printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
597  c->name);
598  goto out;
599  }
600 
601  sg_offset = (msg->u.head[0] >> 4) & 0x0f;
602 
603  memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
604  if (sg_offset) {
605  struct sg_simple_element *sg;
606 
607  if (sg_offset * 4 >= size) {
608  rcode = -EFAULT;
609  goto cleanup;
610  }
611  // TODO 64bit fix
612  sg = (struct sg_simple_element *)((&msg->u.head[0]) +
613  sg_offset);
614  sg_count =
615  (size - sg_offset * 4) / sizeof(struct sg_simple_element);
616  if (sg_count > SG_TABLESIZE) {
617  printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
618  c->name, sg_count);
619  rcode = -EINVAL;
620  goto cleanup;
621  }
622 
623  for (i = 0; i < sg_count; i++) {
624  int sg_size;
625  struct i2o_dma *p;
626 
627  if (!(sg[i].flag_count & 0x10000000
628  /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
630  "%s:Bad SG element %d - not simple (%x)\n",
631  c->name, i, sg[i].flag_count);
632  rcode = -EINVAL;
633  goto cleanup;
634  }
635  sg_size = sg[i].flag_count & 0xffffff;
636  p = &(sg_list[sg_index]);
637  /* Allocate memory for the transfer */
638  if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) {
640  "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
641  c->name, sg_size, i, sg_count);
642  rcode = -ENOMEM;
643  goto sg_list_cleanup;
644  }
645  sg_index++;
646  /* Copy in the user's SG buffer if necessary */
647  if (sg[i].
648  flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
649  // TODO 64bit fix
650  if (copy_from_user
651  (p->virt,
652  (void __user *)(unsigned long)sg[i].
653  addr_bus, sg_size)) {
655  "%s: Could not copy SG buf %d FROM user\n",
656  c->name, i);
657  rcode = -EFAULT;
658  goto sg_list_cleanup;
659  }
660  }
661  //TODO 64bit fix
662  sg[i].addr_bus = (u32) p->phys;
663  }
664  }
665 
666  rcode = i2o_msg_post_wait(c, msg, 60);
667  msg = NULL;
668  if (rcode) {
669  reply[4] = ((u32) rcode) << 24;
670  goto sg_list_cleanup;
671  }
672 
673  if (sg_offset) {
675  /* Copy back the Scatter Gather buffers back to user space */
676  u32 j;
677  // TODO 64bit fix
678  struct sg_simple_element *sg;
679  int sg_size;
680 
681  // re-acquire the original message to handle correctly the sg copy operation
682  memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
683  // get user msg size in u32s
684  if (get_user(size, &user_msg[0])) {
685  rcode = -EFAULT;
686  goto sg_list_cleanup;
687  }
688  size = size >> 16;
689  size *= 4;
690  /* Copy in the user's I2O command */
691  if (copy_from_user(rmsg, user_msg, size)) {
692  rcode = -EFAULT;
693  goto sg_list_cleanup;
694  }
695  sg_count =
696  (size - sg_offset * 4) / sizeof(struct sg_simple_element);
697 
698  // TODO 64bit fix
699  sg = (struct sg_simple_element *)(rmsg + sg_offset);
700  for (j = 0; j < sg_count; j++) {
701  /* Copy out the SG list to user's buffer if necessary */
702  if (!
703  (sg[j].
704  flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
705  sg_size = sg[j].flag_count & 0xffffff;
706  // TODO 64bit fix
707  if (copy_to_user
708  ((void __user *)(u64) sg[j].addr_bus,
709  sg_list[j].virt, sg_size)) {
711  "%s: Could not copy %p TO user %x\n",
712  c->name, sg_list[j].virt,
713  sg[j].addr_bus);
714  rcode = -EFAULT;
715  goto sg_list_cleanup;
716  }
717  }
718  }
719  }
720 
721 sg_list_cleanup:
722  /* Copy back the reply to user space */
723  if (reply_size) {
724  // we wrote our own values for context - now restore the user supplied ones
725  if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
727  "%s: Could not copy message context FROM user\n",
728  c->name);
729  rcode = -EFAULT;
730  }
731  if (copy_to_user(user_reply, reply, reply_size)) {
733  "%s: Could not copy reply TO user\n", c->name);
734  rcode = -EFAULT;
735  }
736  }
737  for (i = 0; i < sg_index; i++)
738  i2o_dma_free(&c->pdev->dev, &sg_list[i]);
739 
740 cleanup:
741  kfree(reply);
742 out:
743  if (msg)
744  i2o_msg_nop(c, msg);
745  return rcode;
746 }
747 
748 static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
749  unsigned long arg)
750 {
751  int ret;
752  mutex_lock(&i2o_cfg_mutex);
753  switch (cmd) {
754  case I2OGETIOPS:
755  ret = i2o_cfg_ioctl(file, cmd, arg);
756  break;
757  case I2OPASSTHRU32:
758  ret = i2o_cfg_passthru32(file, cmd, arg);
759  break;
760  default:
761  ret = -ENOIOCTLCMD;
762  break;
763  }
764  mutex_unlock(&i2o_cfg_mutex);
765  return ret;
766 }
767 
768 #endif
769 
770 #ifdef CONFIG_I2O_EXT_ADAPTEC
771 static int i2o_cfg_passthru(unsigned long arg)
772 {
773  struct i2o_cmd_passthru __user *cmd =
774  (struct i2o_cmd_passthru __user *)arg;
775  struct i2o_controller *c;
776  u32 __user *user_msg;
777  u32 *reply = NULL;
778  u32 __user *user_reply = NULL;
779  u32 size = 0;
780  u32 reply_size = 0;
781  u32 rcode = 0;
782  struct i2o_dma sg_list[SG_TABLESIZE];
783  u32 sg_offset = 0;
784  u32 sg_count = 0;
785  int sg_index = 0;
786  u32 i = 0;
788  struct i2o_message *msg;
789  unsigned int iop;
790 
791  if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
792  return -EFAULT;
793 
794  c = i2o_find_iop(iop);
795  if (!c) {
796  osm_warn("controller %d not found\n", iop);
797  return -ENXIO;
798  }
799 
800  sb = c->status_block.virt;
801 
802  if (get_user(size, &user_msg[0]))
803  return -EFAULT;
804  size = size >> 16;
805 
806  if (size > sb->inbound_frame_size) {
807  osm_warn("size of message > inbound_frame_size");
808  return -EFAULT;
809  }
810 
811  user_reply = &user_msg[size];
812 
813  size <<= 2; // Convert to bytes
814 
816  if (IS_ERR(msg))
817  return PTR_ERR(msg);
818 
819  rcode = -EFAULT;
820  /* Copy in the user's I2O command */
821  if (copy_from_user(msg, user_msg, size))
822  goto out;
823 
824  if (get_user(reply_size, &user_reply[0]) < 0)
825  goto out;
826 
827  reply_size >>= 16;
828  reply_size <<= 2;
829 
830  reply = kzalloc(reply_size, GFP_KERNEL);
831  if (!reply) {
832  printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
833  c->name);
834  rcode = -ENOMEM;
835  goto out;
836  }
837 
838  sg_offset = (msg->u.head[0] >> 4) & 0x0f;
839 
840  memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
841  if (sg_offset) {
842  struct sg_simple_element *sg;
843  struct i2o_dma *p;
844 
845  if (sg_offset * 4 >= size) {
846  rcode = -EFAULT;
847  goto cleanup;
848  }
849  // TODO 64bit fix
850  sg = (struct sg_simple_element *)((&msg->u.head[0]) +
851  sg_offset);
852  sg_count =
853  (size - sg_offset * 4) / sizeof(struct sg_simple_element);
854  if (sg_count > SG_TABLESIZE) {
855  printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
856  c->name, sg_count);
857  rcode = -EINVAL;
858  goto cleanup;
859  }
860 
861  for (i = 0; i < sg_count; i++) {
862  int sg_size;
863 
864  if (!(sg[i].flag_count & 0x10000000
865  /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
867  "%s:Bad SG element %d - not simple (%x)\n",
868  c->name, i, sg[i].flag_count);
869  rcode = -EINVAL;
870  goto sg_list_cleanup;
871  }
872  sg_size = sg[i].flag_count & 0xffffff;
873  p = &(sg_list[sg_index]);
874  if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) {
875  /* Allocate memory for the transfer */
877  "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
878  c->name, sg_size, i, sg_count);
879  rcode = -ENOMEM;
880  goto sg_list_cleanup;
881  }
882  sg_index++;
883  /* Copy in the user's SG buffer if necessary */
884  if (sg[i].
885  flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
886  // TODO 64bit fix
887  if (copy_from_user
888  (p->virt, (void __user *)sg[i].addr_bus,
889  sg_size)) {
891  "%s: Could not copy SG buf %d FROM user\n",
892  c->name, i);
893  rcode = -EFAULT;
894  goto sg_list_cleanup;
895  }
896  }
897  sg[i].addr_bus = p->phys;
898  }
899  }
900 
901  rcode = i2o_msg_post_wait(c, msg, 60);
902  msg = NULL;
903  if (rcode) {
904  reply[4] = ((u32) rcode) << 24;
905  goto sg_list_cleanup;
906  }
907 
908  if (sg_offset) {
910  /* Copy back the Scatter Gather buffers back to user space */
911  u32 j;
912  // TODO 64bit fix
913  struct sg_simple_element *sg;
914  int sg_size;
915 
916  // re-acquire the original message to handle correctly the sg copy operation
917  memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
918  // get user msg size in u32s
919  if (get_user(size, &user_msg[0])) {
920  rcode = -EFAULT;
921  goto sg_list_cleanup;
922  }
923  size = size >> 16;
924  size *= 4;
925  /* Copy in the user's I2O command */
926  if (copy_from_user(rmsg, user_msg, size)) {
927  rcode = -EFAULT;
928  goto sg_list_cleanup;
929  }
930  sg_count =
931  (size - sg_offset * 4) / sizeof(struct sg_simple_element);
932 
933  // TODO 64bit fix
934  sg = (struct sg_simple_element *)(rmsg + sg_offset);
935  for (j = 0; j < sg_count; j++) {
936  /* Copy out the SG list to user's buffer if necessary */
937  if (!
938  (sg[j].
939  flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
940  sg_size = sg[j].flag_count & 0xffffff;
941  // TODO 64bit fix
942  if (copy_to_user
943  ((void __user *)sg[j].addr_bus, sg_list[j].virt,
944  sg_size)) {
946  "%s: Could not copy %p TO user %x\n",
947  c->name, sg_list[j].virt,
948  sg[j].addr_bus);
949  rcode = -EFAULT;
950  goto sg_list_cleanup;
951  }
952  }
953  }
954  }
955 
956 sg_list_cleanup:
957  /* Copy back the reply to user space */
958  if (reply_size) {
959  // we wrote our own values for context - now restore the user supplied ones
960  if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
962  "%s: Could not copy message context FROM user\n",
963  c->name);
964  rcode = -EFAULT;
965  }
966  if (copy_to_user(user_reply, reply, reply_size)) {
968  "%s: Could not copy reply TO user\n", c->name);
969  rcode = -EFAULT;
970  }
971  }
972 
973  for (i = 0; i < sg_index; i++)
974  i2o_dma_free(&c->pdev->dev, &sg_list[i]);
975 
976 cleanup:
977  kfree(reply);
978 out:
979  if (msg)
980  i2o_msg_nop(c, msg);
981  return rcode;
982 }
983 #endif
984 
985 /*
986  * IOCTL Handler
987  */
988 static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
989 {
990  int ret;
991 
992  mutex_lock(&i2o_cfg_mutex);
993  switch (cmd) {
994  case I2OGETIOPS:
995  ret = i2o_cfg_getiops(arg);
996  break;
997 
998  case I2OHRTGET:
999  ret = i2o_cfg_gethrt(arg);
1000  break;
1001 
1002  case I2OLCTGET:
1003  ret = i2o_cfg_getlct(arg);
1004  break;
1005 
1006  case I2OPARMSET:
1007  ret = i2o_cfg_parms(arg, I2OPARMSET);
1008  break;
1009 
1010  case I2OPARMGET:
1011  ret = i2o_cfg_parms(arg, I2OPARMGET);
1012  break;
1013 
1014  case I2OSWDL:
1015  ret = i2o_cfg_swdl(arg);
1016  break;
1017 
1018  case I2OSWUL:
1019  ret = i2o_cfg_swul(arg);
1020  break;
1021 
1022  case I2OSWDEL:
1023  ret = i2o_cfg_swdel(arg);
1024  break;
1025 
1026  case I2OVALIDATE:
1027  ret = i2o_cfg_validate(arg);
1028  break;
1029 
1030  case I2OEVTREG:
1031  ret = i2o_cfg_evt_reg(arg, fp);
1032  break;
1033 
1034  case I2OEVTGET:
1035  ret = i2o_cfg_evt_get(arg, fp);
1036  break;
1037 
1038 #ifdef CONFIG_I2O_EXT_ADAPTEC
1039  case I2OPASSTHRU:
1040  ret = i2o_cfg_passthru(arg);
1041  break;
1042 #endif
1043 
1044  default:
1045  osm_debug("unknown ioctl called!\n");
1046  ret = -EINVAL;
1047  }
1048  mutex_unlock(&i2o_cfg_mutex);
1049  return ret;
1050 }
1051 
1052 static int cfg_open(struct inode *inode, struct file *file)
1053 {
1054  struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info),
1055  GFP_KERNEL);
1056  unsigned long flags;
1057 
1058  if (!tmp)
1059  return -ENOMEM;
1060 
1061  mutex_lock(&i2o_cfg_mutex);
1062  file->private_data = (void *)(i2o_cfg_info_id++);
1063  tmp->fp = file;
1064  tmp->fasync = NULL;
1065  tmp->q_id = (ulong) file->private_data;
1066  tmp->q_len = 0;
1067  tmp->q_in = 0;
1068  tmp->q_out = 0;
1069  tmp->q_lost = 0;
1070  tmp->next = open_files;
1071 
1072  spin_lock_irqsave(&i2o_config_lock, flags);
1073  open_files = tmp;
1074  spin_unlock_irqrestore(&i2o_config_lock, flags);
1075  mutex_unlock(&i2o_cfg_mutex);
1076 
1077  return 0;
1078 }
1079 
1080 static int cfg_fasync(int fd, struct file *fp, int on)
1081 {
1082  ulong id = (ulong) fp->private_data;
1083  struct i2o_cfg_info *p;
1084  int ret = -EBADF;
1085 
1086  mutex_lock(&i2o_cfg_mutex);
1087  for (p = open_files; p; p = p->next)
1088  if (p->q_id == id)
1089  break;
1090 
1091  if (p)
1092  ret = fasync_helper(fd, fp, on, &p->fasync);
1093  mutex_unlock(&i2o_cfg_mutex);
1094  return ret;
1095 }
1096 
1097 static int cfg_release(struct inode *inode, struct file *file)
1098 {
1099  ulong id = (ulong) file->private_data;
1100  struct i2o_cfg_info *p, **q;
1101  unsigned long flags;
1102 
1103  mutex_lock(&i2o_cfg_mutex);
1104  spin_lock_irqsave(&i2o_config_lock, flags);
1105  for (q = &open_files; (p = *q) != NULL; q = &p->next) {
1106  if (p->q_id == id) {
1107  *q = p->next;
1108  kfree(p);
1109  break;
1110  }
1111  }
1112  spin_unlock_irqrestore(&i2o_config_lock, flags);
1113  mutex_unlock(&i2o_cfg_mutex);
1114 
1115  return 0;
1116 }
1117 
1118 static const struct file_operations config_fops = {
1119  .owner = THIS_MODULE,
1120  .llseek = no_llseek,
1121  .unlocked_ioctl = i2o_cfg_ioctl,
1122 #ifdef CONFIG_COMPAT
1123  .compat_ioctl = i2o_cfg_compat_ioctl,
1124 #endif
1125  .open = cfg_open,
1126  .release = cfg_release,
1127  .fasync = cfg_fasync,
1128 };
1129 
1130 static struct miscdevice i2o_miscdev = {
1131  I2O_MINOR,
1132  "i2octl",
1133  &config_fops
1134 };
1135 
1136 static int __init i2o_config_old_init(void)
1137 {
1138  spin_lock_init(&i2o_config_lock);
1139 
1140  if (misc_register(&i2o_miscdev) < 0) {
1141  osm_err("can't register device.\n");
1142  return -EBUSY;
1143  }
1144 
1145  return 0;
1146 }
1147 
1148 static void i2o_config_old_exit(void)
1149 {
1150  misc_deregister(&i2o_miscdev);
1151 }
1152 
1153 MODULE_AUTHOR("Red Hat Software");