Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
sg.c
Go to the documentation of this file.
1 /*
2  * History:
3  * Started: Aug 9 by Lawrence Foard ([email protected]),
4  * to allow user process control of SCSI devices.
5  * Development Sponsored by Killy Corp. NY NY
6  *
7  * Original driver (sg.c):
8  * Copyright (C) 1992 Lawrence Foard
9  * Version 2 and 3 extensions to driver:
10  * Copyright (C) 1998 - 2005 Douglas Gilbert
11  *
12  * Modified 19-JAN-1998 Richard Gooch <[email protected]> Devfs support
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  */
20 
21 static int sg_version_num = 30534; /* 2 digits for each component */
22 #define SG_VERSION_STR "3.5.34"
23 
24 /*
25  * D. P. Gilbert ([email protected], [email protected]), notes:
26  * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
27  * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
28  * (otherwise the macros compile to empty statements).
29  *
30  */
31 #include <linux/module.h>
32 
33 #include <linux/fs.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/string.h>
37 #include <linux/mm.h>
38 #include <linux/errno.h>
39 #include <linux/mtio.h>
40 #include <linux/ioctl.h>
41 #include <linux/slab.h>
42 #include <linux/fcntl.h>
43 #include <linux/init.h>
44 #include <linux/poll.h>
45 #include <linux/moduleparam.h>
46 #include <linux/cdev.h>
47 #include <linux/idr.h>
48 #include <linux/seq_file.h>
49 #include <linux/blkdev.h>
50 #include <linux/delay.h>
51 #include <linux/blktrace_api.h>
52 #include <linux/mutex.h>
53 #include <linux/ratelimit.h>
54 
55 #include "scsi.h"
56 #include <scsi/scsi_dbg.h>
57 #include <scsi/scsi_host.h>
58 #include <scsi/scsi_driver.h>
59 #include <scsi/scsi_ioctl.h>
60 #include <scsi/sg.h>
61 
62 #include "scsi_logging.h"
63 
64 #ifdef CONFIG_SCSI_PROC_FS
65 #include <linux/proc_fs.h>
66 static char *sg_version_date = "20061027";
67 
68 static int sg_proc_init(void);
69 static void sg_proc_cleanup(void);
70 #endif
71 
72 #define SG_ALLOW_DIO_DEF 0
73 
74 #define SG_MAX_DEVS 32768
75 
76 /*
77  * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
78  * Then when using 32 bit integers x * m may overflow during the calculation.
79  * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
80  * calculates the same, but prevents the overflow when both m and d
81  * are "small" numbers (like HZ and USER_HZ).
82  * Of course an overflow is inavoidable if the result of muldiv doesn't fit
83  * in 32 bits.
84  */
85 #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
86 
87 #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
88 
90 /* N.B. This variable is readable and writeable via
91  /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
92  of this size (or less if there is not enough memory) will be reserved
93  for use by this file descriptor. [Deprecated usage: this variable is also
94  readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
95  the kernel (i.e. it is not a module).] */
96 static int def_reserved_size = -1; /* picks up init parameter */
97 static int sg_allow_dio = SG_ALLOW_DIO_DEF;
98 
99 static int scatter_elem_sz = SG_SCATTER_SZ;
100 static int scatter_elem_sz_prev = SG_SCATTER_SZ;
101 
102 #define SG_SECTOR_SZ 512
103 
104 static int sg_add(struct device *, struct class_interface *);
105 static void sg_remove(struct device *, struct class_interface *);
106 
107 static DEFINE_SPINLOCK(sg_open_exclusive_lock);
108 
109 static DEFINE_IDR(sg_index_idr);
110 static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
111  file descriptor list for device */
112 
113 static struct class_interface sg_interface = {
114  .add_dev = sg_add,
115  .remove_dev = sg_remove,
116 };
117 
118 typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
119  unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
120  unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
121  unsigned bufflen; /* Size of (aggregate) data buffer */
122  struct page **pages;
124  char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
125  unsigned char cmd_opcode; /* first byte of command */
127 
128 struct sg_device; /* forward declarations */
129 struct sg_fd;
130 
131 typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
132  struct sg_request *nextrp; /* NULL -> tail request (slist) */
133  struct sg_fd *parentfp; /* NULL -> not in use */
134  Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
135  sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
137  char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
138  char orphan; /* 1 -> drop on sight, 0 -> normal */
139  char sg_io_owned; /* 1 -> packet belongs to SG_IO */
140  /* done protected by rq_list_lock */
141  char done; /* 0->before bh, 1->before read, 2->read */
142  struct request *rq;
143  struct bio *bio;
144  struct execute_work ew;
145 } Sg_request;
146 
147 typedef struct sg_fd { /* holds the state of a file descriptor */
148  /* sfd_siblings is protected by sg_index_lock */
150  struct sg_device *parentdp; /* owning device */
151  wait_queue_head_t read_wait; /* queue read until command done */
152  rwlock_t rq_list_lock; /* protect access to list in req_arr */
153  int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
154  int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
155  Sg_scatter_hold reserve; /* buffer held for this file descriptor */
156  unsigned save_scat_len; /* original length of trunc. scat. element */
157  Sg_request *headrp; /* head of request slist, NULL->empty */
158  struct fasync_struct *async_qp; /* used by asynchronous notification */
159  Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
160  char low_dma; /* as in parent but possibly overridden to 1 */
161  char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
162  char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
163  char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
164  char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
165  char mmap_called; /* 0 -> mmap() never called on this fd */
166  struct kref f_ref;
167  struct execute_work ew;
168 } Sg_fd;
169 
170 typedef struct sg_device { /* holds the state of each scsi generic device */
172  wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
173  int sg_tablesize; /* adapter's max scatter-gather table size */
174  u32 index; /* device index number */
175  /* sfds is protected by sg_index_lock */
176  struct list_head sfds;
177  volatile char detached; /* 0->attached, 1->detached pending removal */
178  /* exclude protected by sg_open_exclusive_lock */
179  char exclude; /* opened for exclusive access */
180  char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
181  struct gendisk *disk;
182  struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
183  struct kref d_ref;
184 } Sg_device;
185 
186 /* tasklet or soft irq callback */
187 static void sg_rq_end_io(struct request *rq, int uptodate);
188 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
189 static int sg_finish_rem_req(Sg_request * srp);
190 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
191 static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
192  Sg_request * srp);
193 static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
194  const char __user *buf, size_t count, int blocking,
195  int read_only, int sg_io_owned, Sg_request **o_srp);
196 static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
197  unsigned char *cmnd, int timeout, int blocking);
198 static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
199 static void sg_remove_scat(Sg_scatter_hold * schp);
200 static void sg_build_reserve(Sg_fd * sfp, int req_size);
201 static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
202 static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
203 static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
204 static void sg_remove_sfp(struct kref *);
205 static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
206 static Sg_request *sg_add_request(Sg_fd * sfp);
207 static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
208 static int sg_res_in_use(Sg_fd * sfp);
209 static Sg_device *sg_get_dev(int dev);
210 static void sg_put_dev(Sg_device *sdp);
211 
212 #define SZ_SG_HEADER sizeof(struct sg_header)
213 #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
214 #define SZ_SG_IOVEC sizeof(sg_iovec_t)
215 #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
216 
217 static int sg_allow_access(struct file *filp, unsigned char *cmd)
218 {
219  struct sg_fd *sfp = filp->private_data;
220 
221  if (sfp->parentdp->device->type == TYPE_SCANNER)
222  return 0;
223 
224  return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
225 }
226 
227 static int get_exclude(Sg_device *sdp)
228 {
229  unsigned long flags;
230  int ret;
231 
232  spin_lock_irqsave(&sg_open_exclusive_lock, flags);
233  ret = sdp->exclude;
234  spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
235  return ret;
236 }
237 
238 static int set_exclude(Sg_device *sdp, char val)
239 {
240  unsigned long flags;
241 
242  spin_lock_irqsave(&sg_open_exclusive_lock, flags);
243  sdp->exclude = val;
244  spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
245  return val;
246 }
247 
248 static int sfds_list_empty(Sg_device *sdp)
249 {
250  unsigned long flags;
251  int ret;
252 
253  read_lock_irqsave(&sg_index_lock, flags);
254  ret = list_empty(&sdp->sfds);
255  read_unlock_irqrestore(&sg_index_lock, flags);
256  return ret;
257 }
258 
259 static int
260 sg_open(struct inode *inode, struct file *filp)
261 {
262  int dev = iminor(inode);
263  int flags = filp->f_flags;
264  struct request_queue *q;
265  Sg_device *sdp;
266  Sg_fd *sfp;
267  int res;
268  int retval;
269 
270  nonseekable_open(inode, filp);
271  SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
272  sdp = sg_get_dev(dev);
273  if (IS_ERR(sdp)) {
274  retval = PTR_ERR(sdp);
275  sdp = NULL;
276  goto sg_put;
277  }
278 
279  /* This driver's module count bumped by fops_get in <linux/fs.h> */
280  /* Prevent the device driver from vanishing while we sleep */
281  retval = scsi_device_get(sdp->device);
282  if (retval)
283  goto sg_put;
284 
285  retval = scsi_autopm_get_device(sdp->device);
286  if (retval)
287  goto sdp_put;
288 
289  if (!((flags & O_NONBLOCK) ||
291  retval = -ENXIO;
292  /* we are in error recovery for this device */
293  goto error_out;
294  }
295 
296  if (flags & O_EXCL) {
297  if (O_RDONLY == (flags & O_ACCMODE)) {
298  retval = -EPERM; /* Can't lock it with read only access */
299  goto error_out;
300  }
301  if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
302  retval = -EBUSY;
303  goto error_out;
304  }
306  ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
307  if (res) {
308  retval = res; /* -ERESTARTSYS because signal hit process */
309  goto error_out;
310  }
311  } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
312  if (flags & O_NONBLOCK) {
313  retval = -EBUSY;
314  goto error_out;
315  }
316  res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
317  if (res) {
318  retval = res; /* -ERESTARTSYS because signal hit process */
319  goto error_out;
320  }
321  }
322  if (sdp->detached) {
323  retval = -ENODEV;
324  goto error_out;
325  }
326  if (sfds_list_empty(sdp)) { /* no existing opens on this device */
327  sdp->sgdebug = 0;
328  q = sdp->device->request_queue;
329  sdp->sg_tablesize = queue_max_segments(q);
330  }
331  if ((sfp = sg_add_sfp(sdp, dev)))
332  filp->private_data = sfp;
333  else {
334  if (flags & O_EXCL) {
335  set_exclude(sdp, 0); /* undo if error */
337  }
338  retval = -ENOMEM;
339  goto error_out;
340  }
341  retval = 0;
342 error_out:
343  if (retval) {
344  scsi_autopm_put_device(sdp->device);
345 sdp_put:
346  scsi_device_put(sdp->device);
347  }
348 sg_put:
349  if (sdp)
350  sg_put_dev(sdp);
351  return retval;
352 }
353 
354 /* Following function was formerly called 'sg_close' */
355 static int
356 sg_release(struct inode *inode, struct file *filp)
357 {
358  Sg_device *sdp;
359  Sg_fd *sfp;
360 
361  if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
362  return -ENXIO;
363  SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
364 
365  set_exclude(sdp, 0);
367 
368  scsi_autopm_put_device(sdp->device);
369  kref_put(&sfp->f_ref, sg_remove_sfp);
370  return 0;
371 }
372 
373 static ssize_t
374 sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
375 {
376  Sg_device *sdp;
377  Sg_fd *sfp;
378  Sg_request *srp;
379  int req_pack_id = -1;
380  sg_io_hdr_t *hp;
381  struct sg_header *old_hdr = NULL;
382  int retval = 0;
383 
384  if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
385  return -ENXIO;
386  SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
387  sdp->disk->disk_name, (int) count));
388 
389  if (!access_ok(VERIFY_WRITE, buf, count))
390  return -EFAULT;
391  if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
392  old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
393  if (!old_hdr)
394  return -ENOMEM;
395  if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
396  retval = -EFAULT;
397  goto free_old_hdr;
398  }
399  if (old_hdr->reply_len < 0) {
400  if (count >= SZ_SG_IO_HDR) {
401  sg_io_hdr_t *new_hdr;
402  new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
403  if (!new_hdr) {
404  retval = -ENOMEM;
405  goto free_old_hdr;
406  }
407  retval =__copy_from_user
408  (new_hdr, buf, SZ_SG_IO_HDR);
409  req_pack_id = new_hdr->pack_id;
410  kfree(new_hdr);
411  if (retval) {
412  retval = -EFAULT;
413  goto free_old_hdr;
414  }
415  }
416  } else
417  req_pack_id = old_hdr->pack_id;
418  }
419  srp = sg_get_rq_mark(sfp, req_pack_id);
420  if (!srp) { /* now wait on packet to arrive */
421  if (sdp->detached) {
422  retval = -ENODEV;
423  goto free_old_hdr;
424  }
425  if (filp->f_flags & O_NONBLOCK) {
426  retval = -EAGAIN;
427  goto free_old_hdr;
428  }
429  retval = wait_event_interruptible(sfp->read_wait,
430  (sdp->detached ||
431  (srp = sg_get_rq_mark(sfp, req_pack_id))));
432  if (sdp->detached) {
433  retval = -ENODEV;
434  goto free_old_hdr;
435  }
436  if (retval) {
437  /* -ERESTARTSYS as signal hit process */
438  goto free_old_hdr;
439  }
440  }
441  if (srp->header.interface_id != '\0') {
442  retval = sg_new_read(sfp, buf, count, srp);
443  goto free_old_hdr;
444  }
445 
446  hp = &srp->header;
447  if (old_hdr == NULL) {
448  old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
449  if (! old_hdr) {
450  retval = -ENOMEM;
451  goto free_old_hdr;
452  }
453  }
454  memset(old_hdr, 0, SZ_SG_HEADER);
455  old_hdr->reply_len = (int) hp->timeout;
456  old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
457  old_hdr->pack_id = hp->pack_id;
458  old_hdr->twelve_byte =
459  ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
460  old_hdr->target_status = hp->masked_status;
461  old_hdr->host_status = hp->host_status;
462  old_hdr->driver_status = hp->driver_status;
463  if ((CHECK_CONDITION & hp->masked_status) ||
464  (DRIVER_SENSE & hp->driver_status))
465  memcpy(old_hdr->sense_buffer, srp->sense_b,
466  sizeof (old_hdr->sense_buffer));
467  switch (hp->host_status) {
468  /* This setup of 'result' is for backward compatibility and is best
469  ignored by the user who should use target, host + driver status */
470  case DID_OK:
471  case DID_PASSTHROUGH:
472  case DID_SOFT_ERROR:
473  old_hdr->result = 0;
474  break;
475  case DID_NO_CONNECT:
476  case DID_BUS_BUSY:
477  case DID_TIME_OUT:
478  old_hdr->result = EBUSY;
479  break;
480  case DID_BAD_TARGET:
481  case DID_ABORT:
482  case DID_PARITY:
483  case DID_RESET:
484  case DID_BAD_INTR:
485  old_hdr->result = EIO;
486  break;
487  case DID_ERROR:
488  old_hdr->result = (srp->sense_b[0] == 0 &&
489  hp->masked_status == GOOD) ? 0 : EIO;
490  break;
491  default:
492  old_hdr->result = EIO;
493  break;
494  }
495 
496  /* Now copy the result back to the user buffer. */
497  if (count >= SZ_SG_HEADER) {
498  if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
499  retval = -EFAULT;
500  goto free_old_hdr;
501  }
502  buf += SZ_SG_HEADER;
503  if (count > old_hdr->reply_len)
504  count = old_hdr->reply_len;
505  if (count > SZ_SG_HEADER) {
506  if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
507  retval = -EFAULT;
508  goto free_old_hdr;
509  }
510  }
511  } else
512  count = (old_hdr->result == 0) ? 0 : -EIO;
513  sg_finish_rem_req(srp);
514  retval = count;
515 free_old_hdr:
516  kfree(old_hdr);
517  return retval;
518 }
519 
520 static ssize_t
521 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
522 {
523  sg_io_hdr_t *hp = &srp->header;
524  int err = 0;
525  int len;
526 
527  if (count < SZ_SG_IO_HDR) {
528  err = -EINVAL;
529  goto err_out;
530  }
531  hp->sb_len_wr = 0;
532  if ((hp->mx_sb_len > 0) && hp->sbp) {
533  if ((CHECK_CONDITION & hp->masked_status) ||
534  (DRIVER_SENSE & hp->driver_status)) {
535  int sb_len = SCSI_SENSE_BUFFERSIZE;
536  sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
537  len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
538  len = (len > sb_len) ? sb_len : len;
539  if (copy_to_user(hp->sbp, srp->sense_b, len)) {
540  err = -EFAULT;
541  goto err_out;
542  }
543  hp->sb_len_wr = len;
544  }
545  }
546  if (hp->masked_status || hp->host_status || hp->driver_status)
547  hp->info |= SG_INFO_CHECK;
548  if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
549  err = -EFAULT;
550  goto err_out;
551  }
552 err_out:
553  err = sg_finish_rem_req(srp);
554  return (0 == err) ? count : err;
555 }
556 
557 static ssize_t
558 sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
559 {
560  int mxsize, cmd_size, k;
561  int input_size, blocking;
562  unsigned char opcode;
563  Sg_device *sdp;
564  Sg_fd *sfp;
565  Sg_request *srp;
566  struct sg_header old_hdr;
567  sg_io_hdr_t *hp;
568  unsigned char cmnd[MAX_COMMAND_SIZE];
569 
570  if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
571  return -ENXIO;
572  SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
573  sdp->disk->disk_name, (int) count));
574  if (sdp->detached)
575  return -ENODEV;
576  if (!((filp->f_flags & O_NONBLOCK) ||
578  return -ENXIO;
579 
580  if (!access_ok(VERIFY_READ, buf, count))
581  return -EFAULT; /* protects following copy_from_user()s + get_user()s */
582  if (count < SZ_SG_HEADER)
583  return -EIO;
584  if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
585  return -EFAULT;
586  blocking = !(filp->f_flags & O_NONBLOCK);
587  if (old_hdr.reply_len < 0)
588  return sg_new_write(sfp, filp, buf, count,
589  blocking, 0, 0, NULL);
590  if (count < (SZ_SG_HEADER + 6))
591  return -EIO; /* The minimum scsi command length is 6 bytes. */
592 
593  if (!(srp = sg_add_request(sfp))) {
594  SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
595  return -EDOM;
596  }
597  buf += SZ_SG_HEADER;
598  __get_user(opcode, buf);
599  if (sfp->next_cmd_len > 0) {
600  if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
601  SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
602  sfp->next_cmd_len = 0;
603  sg_remove_request(sfp, srp);
604  return -EIO;
605  }
606  cmd_size = sfp->next_cmd_len;
607  sfp->next_cmd_len = 0; /* reset so only this write() effected */
608  } else {
609  cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
610  if ((opcode >= 0xc0) && old_hdr.twelve_byte)
611  cmd_size = 12;
612  }
614  "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
615 /* Determine buffer size. */
616  input_size = count - cmd_size;
617  mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
618  mxsize -= SZ_SG_HEADER;
619  input_size -= SZ_SG_HEADER;
620  if (input_size < 0) {
621  sg_remove_request(sfp, srp);
622  return -EIO; /* User did not pass enough bytes for this command. */
623  }
624  hp = &srp->header;
625  hp->interface_id = '\0'; /* indicator of old interface tunnelled */
626  hp->cmd_len = (unsigned char) cmd_size;
627  hp->iovec_count = 0;
628  hp->mx_sb_len = 0;
629  if (input_size > 0)
630  hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
632  else
633  hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
634  hp->dxfer_len = mxsize;
635  if (hp->dxfer_direction == SG_DXFER_TO_DEV)
636  hp->dxferp = (char __user *)buf + cmd_size;
637  else
638  hp->dxferp = NULL;
639  hp->sbp = NULL;
640  hp->timeout = old_hdr.reply_len; /* structure abuse ... */
641  hp->flags = input_size; /* structure abuse ... */
642  hp->pack_id = old_hdr.pack_id;
643  hp->usr_ptr = NULL;
644  if (__copy_from_user(cmnd, buf, cmd_size))
645  return -EFAULT;
646  /*
647  * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
648  * but is is possible that the app intended SG_DXFER_TO_DEV, because there
649  * is a non-zero input_size, so emit a warning.
650  */
651  if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
652  static char cmd[TASK_COMM_LEN];
653  if (strcmp(current->comm, cmd)) {
655  "sg_write: data in/out %d/%d bytes "
656  "for SCSI command 0x%x-- guessing "
657  "data in;\n program %s not setting "
658  "count and/or reply_len properly\n",
659  old_hdr.reply_len - (int)SZ_SG_HEADER,
660  input_size, (unsigned int) cmnd[0],
661  current->comm);
662  strcpy(cmd, current->comm);
663  }
664  }
665  k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
666  return (k < 0) ? k : count;
667 }
668 
669 static ssize_t
670 sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
671  size_t count, int blocking, int read_only, int sg_io_owned,
672  Sg_request **o_srp)
673 {
674  int k;
675  Sg_request *srp;
676  sg_io_hdr_t *hp;
677  unsigned char cmnd[MAX_COMMAND_SIZE];
678  int timeout;
679  unsigned long ul_timeout;
680 
681  if (count < SZ_SG_IO_HDR)
682  return -EINVAL;
683  if (!access_ok(VERIFY_READ, buf, count))
684  return -EFAULT; /* protects following copy_from_user()s + get_user()s */
685 
686  sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
687  if (!(srp = sg_add_request(sfp))) {
688  SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
689  return -EDOM;
690  }
691  srp->sg_io_owned = sg_io_owned;
692  hp = &srp->header;
693  if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
694  sg_remove_request(sfp, srp);
695  return -EFAULT;
696  }
697  if (hp->interface_id != 'S') {
698  sg_remove_request(sfp, srp);
699  return -ENOSYS;
700  }
701  if (hp->flags & SG_FLAG_MMAP_IO) {
702  if (hp->dxfer_len > sfp->reserve.bufflen) {
703  sg_remove_request(sfp, srp);
704  return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
705  }
706  if (hp->flags & SG_FLAG_DIRECT_IO) {
707  sg_remove_request(sfp, srp);
708  return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
709  }
710  if (sg_res_in_use(sfp)) {
711  sg_remove_request(sfp, srp);
712  return -EBUSY; /* reserve buffer already being used */
713  }
714  }
715  ul_timeout = msecs_to_jiffies(srp->header.timeout);
716  timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
717  if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
718  sg_remove_request(sfp, srp);
719  return -EMSGSIZE;
720  }
721  if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
722  sg_remove_request(sfp, srp);
723  return -EFAULT; /* protects following copy_from_user()s + get_user()s */
724  }
725  if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
726  sg_remove_request(sfp, srp);
727  return -EFAULT;
728  }
729  if (read_only && sg_allow_access(file, cmnd)) {
730  sg_remove_request(sfp, srp);
731  return -EPERM;
732  }
733  k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
734  if (k < 0)
735  return k;
736  if (o_srp)
737  *o_srp = srp;
738  return count;
739 }
740 
741 static int
742 sg_common_write(Sg_fd * sfp, Sg_request * srp,
743  unsigned char *cmnd, int timeout, int blocking)
744 {
745  int k, data_dir;
746  Sg_device *sdp = sfp->parentdp;
747  sg_io_hdr_t *hp = &srp->header;
748 
749  srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
750  hp->status = 0;
751  hp->masked_status = 0;
752  hp->msg_status = 0;
753  hp->info = 0;
754  hp->host_status = 0;
755  hp->driver_status = 0;
756  hp->resid = 0;
757  SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
758  (int) cmnd[0], (int) hp->cmd_len));
759 
760  k = sg_start_req(srp, cmnd);
761  if (k) {
762  SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
763  sg_finish_rem_req(srp);
764  return k; /* probably out of space --> ENOMEM */
765  }
766  if (sdp->detached) {
767  if (srp->bio)
768  blk_end_request_all(srp->rq, -EIO);
769  sg_finish_rem_req(srp);
770  return -ENODEV;
771  }
772 
773  switch (hp->dxfer_direction) {
775  case SG_DXFER_FROM_DEV:
776  data_dir = DMA_FROM_DEVICE;
777  break;
778  case SG_DXFER_TO_DEV:
779  data_dir = DMA_TO_DEVICE;
780  break;
781  case SG_DXFER_UNKNOWN:
782  data_dir = DMA_BIDIRECTIONAL;
783  break;
784  default:
785  data_dir = DMA_NONE;
786  break;
787  }
788  hp->duration = jiffies_to_msecs(jiffies);
789 
790  srp->rq->timeout = timeout;
791  kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
792  blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
793  srp->rq, 1, sg_rq_end_io);
794  return 0;
795 }
796 
797 static int srp_done(Sg_fd *sfp, Sg_request *srp)
798 {
799  unsigned long flags;
800  int ret;
801 
802  read_lock_irqsave(&sfp->rq_list_lock, flags);
803  ret = srp->done;
804  read_unlock_irqrestore(&sfp->rq_list_lock, flags);
805  return ret;
806 }
807 
808 static long
809 sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
810 {
811  void __user *p = (void __user *)arg;
812  int __user *ip = p;
813  int result, val, read_only;
814  Sg_device *sdp;
815  Sg_fd *sfp;
816  Sg_request *srp;
817  unsigned long iflags;
818 
819  if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
820  return -ENXIO;
821 
822  SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
823  sdp->disk->disk_name, (int) cmd_in));
824  read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
825 
826  switch (cmd_in) {
827  case SG_IO:
828  if (sdp->detached)
829  return -ENODEV;
831  return -ENXIO;
833  return -EFAULT;
834  result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
835  1, read_only, 1, &srp);
836  if (result < 0)
837  return result;
838  result = wait_event_interruptible(sfp->read_wait,
839  (srp_done(sfp, srp) || sdp->detached));
840  if (sdp->detached)
841  return -ENODEV;
843  if (srp->done) {
844  srp->done = 2;
846  result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
847  return (result < 0) ? result : 0;
848  }
849  srp->orphan = 1;
851  return result; /* -ERESTARTSYS because signal hit process */
852  case SG_SET_TIMEOUT:
853  result = get_user(val, ip);
854  if (result)
855  return result;
856  if (val < 0)
857  return -EIO;
858  if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
859  val = MULDIV (INT_MAX, USER_HZ, HZ);
860  sfp->timeout_user = val;
861  sfp->timeout = MULDIV (val, HZ, USER_HZ);
862 
863  return 0;
864  case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
865  /* strange ..., for backward compatibility */
866  return sfp->timeout_user;
868  result = get_user(val, ip);
869  if (result)
870  return result;
871  if (val) {
872  sfp->low_dma = 1;
873  if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
874  val = (int) sfp->reserve.bufflen;
875  sg_remove_scat(&sfp->reserve);
876  sg_build_reserve(sfp, val);
877  }
878  } else {
879  if (sdp->detached)
880  return -ENODEV;
881  sfp->low_dma = sdp->device->host->unchecked_isa_dma;
882  }
883  return 0;
884  case SG_GET_LOW_DMA:
885  return put_user((int) sfp->low_dma, ip);
886  case SG_GET_SCSI_ID:
887  if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
888  return -EFAULT;
889  else {
890  sg_scsi_id_t __user *sg_idp = p;
891 
892  if (sdp->detached)
893  return -ENODEV;
894  __put_user((int) sdp->device->host->host_no,
895  &sg_idp->host_no);
896  __put_user((int) sdp->device->channel,
897  &sg_idp->channel);
898  __put_user((int) sdp->device->id, &sg_idp->scsi_id);
899  __put_user((int) sdp->device->lun, &sg_idp->lun);
900  __put_user((int) sdp->device->type, &sg_idp->scsi_type);
901  __put_user((short) sdp->device->host->cmd_per_lun,
902  &sg_idp->h_cmd_per_lun);
903  __put_user((short) sdp->device->queue_depth,
904  &sg_idp->d_queue_depth);
905  __put_user(0, &sg_idp->unused[0]);
906  __put_user(0, &sg_idp->unused[1]);
907  return 0;
908  }
910  result = get_user(val, ip);
911  if (result)
912  return result;
913  sfp->force_packid = val ? 1 : 0;
914  return 0;
915  case SG_GET_PACK_ID:
916  if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
917  return -EFAULT;
918  read_lock_irqsave(&sfp->rq_list_lock, iflags);
919  for (srp = sfp->headrp; srp; srp = srp->nextrp) {
920  if ((1 == srp->done) && (!srp->sg_io_owned)) {
922  iflags);
923  __put_user(srp->header.pack_id, ip);
924  return 0;
925  }
926  }
927  read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
928  __put_user(-1, ip);
929  return 0;
930  case SG_GET_NUM_WAITING:
931  read_lock_irqsave(&sfp->rq_list_lock, iflags);
932  for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
933  if ((1 == srp->done) && (!srp->sg_io_owned))
934  ++val;
935  }
936  read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
937  return put_user(val, ip);
938  case SG_GET_SG_TABLESIZE:
939  return put_user(sdp->sg_tablesize, ip);
941  result = get_user(val, ip);
942  if (result)
943  return result;
944  if (val < 0)
945  return -EINVAL;
946  val = min_t(int, val,
947  queue_max_sectors(sdp->device->request_queue) * 512);
948  if (val != sfp->reserve.bufflen) {
949  if (sg_res_in_use(sfp) || sfp->mmap_called)
950  return -EBUSY;
951  sg_remove_scat(&sfp->reserve);
952  sg_build_reserve(sfp, val);
953  }
954  return 0;
956  val = min_t(int, sfp->reserve.bufflen,
957  queue_max_sectors(sdp->device->request_queue) * 512);
958  return put_user(val, ip);
959  case SG_SET_COMMAND_Q:
960  result = get_user(val, ip);
961  if (result)
962  return result;
963  sfp->cmd_q = val ? 1 : 0;
964  return 0;
965  case SG_GET_COMMAND_Q:
966  return put_user((int) sfp->cmd_q, ip);
967  case SG_SET_KEEP_ORPHAN:
968  result = get_user(val, ip);
969  if (result)
970  return result;
971  sfp->keep_orphan = val;
972  return 0;
973  case SG_GET_KEEP_ORPHAN:
974  return put_user((int) sfp->keep_orphan, ip);
975  case SG_NEXT_CMD_LEN:
976  result = get_user(val, ip);
977  if (result)
978  return result;
979  sfp->next_cmd_len = (val > 0) ? val : 0;
980  return 0;
981  case SG_GET_VERSION_NUM:
982  return put_user(sg_version_num, ip);
983  case SG_GET_ACCESS_COUNT:
984  /* faked - we don't have a real access count anymore */
985  val = (sdp->device ? 1 : 0);
986  return put_user(val, ip);
989  return -EFAULT;
990  else {
991  sg_req_info_t *rinfo;
992  unsigned int ms;
993 
994  rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
995  GFP_KERNEL);
996  if (!rinfo)
997  return -ENOMEM;
998  read_lock_irqsave(&sfp->rq_list_lock, iflags);
999  for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
1000  ++val, srp = srp ? srp->nextrp : srp) {
1001  memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
1002  if (srp) {
1003  rinfo[val].req_state = srp->done + 1;
1004  rinfo[val].problem =
1005  srp->header.masked_status &
1006  srp->header.host_status &
1007  srp->header.driver_status;
1008  if (srp->done)
1009  rinfo[val].duration =
1010  srp->header.duration;
1011  else {
1012  ms = jiffies_to_msecs(jiffies);
1013  rinfo[val].duration =
1014  (ms > srp->header.duration) ?
1015  (ms - srp->header.duration) : 0;
1016  }
1017  rinfo[val].orphan = srp->orphan;
1018  rinfo[val].sg_io_owned =
1019  srp->sg_io_owned;
1020  rinfo[val].pack_id =
1021  srp->header.pack_id;
1022  rinfo[val].usr_ptr =
1023  srp->header.usr_ptr;
1024  }
1025  }
1026  read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1027  result = __copy_to_user(p, rinfo,
1028  SZ_SG_REQ_INFO * SG_MAX_QUEUE);
1029  result = result ? -EFAULT : 0;
1030  kfree(rinfo);
1031  return result;
1032  }
1033  case SG_EMULATED_HOST:
1034  if (sdp->detached)
1035  return -ENODEV;
1036  return put_user(sdp->device->host->hostt->emulated, ip);
1037  case SG_SCSI_RESET:
1038  if (sdp->detached)
1039  return -ENODEV;
1040  if (filp->f_flags & O_NONBLOCK) {
1041  if (scsi_host_in_recovery(sdp->device->host))
1042  return -EBUSY;
1043  } else if (!scsi_block_when_processing_errors(sdp->device))
1044  return -EBUSY;
1045  result = get_user(val, ip);
1046  if (result)
1047  return result;
1048  if (SG_SCSI_RESET_NOTHING == val)
1049  return 0;
1050  switch (val) {
1051  case SG_SCSI_RESET_DEVICE:
1052  val = SCSI_TRY_RESET_DEVICE;
1053  break;
1054  case SG_SCSI_RESET_TARGET:
1055  val = SCSI_TRY_RESET_TARGET;
1056  break;
1057  case SG_SCSI_RESET_BUS:
1058  val = SCSI_TRY_RESET_BUS;
1059  break;
1060  case SG_SCSI_RESET_HOST:
1061  val = SCSI_TRY_RESET_HOST;
1062  break;
1063  default:
1064  return -EINVAL;
1065  }
1067  return -EACCES;
1068  return (scsi_reset_provider(sdp->device, val) ==
1069  SUCCESS) ? 0 : -EIO;
1071  if (sdp->detached)
1072  return -ENODEV;
1073  if (read_only) {
1074  unsigned char opcode = WRITE_6;
1075  Scsi_Ioctl_Command __user *siocp = p;
1076 
1077  if (copy_from_user(&opcode, siocp->data, 1))
1078  return -EFAULT;
1079  if (sg_allow_access(filp, &opcode))
1080  return -EPERM;
1081  }
1082  return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
1083  case SG_SET_DEBUG:
1084  result = get_user(val, ip);
1085  if (result)
1086  return result;
1087  sdp->sgdebug = (char) val;
1088  return 0;
1089  case SCSI_IOCTL_GET_IDLUN:
1091  case SCSI_IOCTL_PROBE_HOST:
1092  case SG_GET_TRANSFORM:
1093  if (sdp->detached)
1094  return -ENODEV;
1095  return scsi_ioctl(sdp->device, cmd_in, p);
1096  case BLKSECTGET:
1097  return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
1098  ip);
1099  case BLKTRACESETUP:
1100  return blk_trace_setup(sdp->device->request_queue,
1101  sdp->disk->disk_name,
1103  NULL,
1104  (char *)arg);
1105  case BLKTRACESTART:
1106  return blk_trace_startstop(sdp->device->request_queue, 1);
1107  case BLKTRACESTOP:
1108  return blk_trace_startstop(sdp->device->request_queue, 0);
1109  case BLKTRACETEARDOWN:
1110  return blk_trace_remove(sdp->device->request_queue);
1111  default:
1112  if (read_only)
1113  return -EPERM; /* don't know so take safe approach */
1114  return scsi_ioctl(sdp->device, cmd_in, p);
1115  }
1116 }
1117 
1118 #ifdef CONFIG_COMPAT
1119 static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1120 {
1121  Sg_device *sdp;
1122  Sg_fd *sfp;
1123  struct scsi_device *sdev;
1124 
1125  if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1126  return -ENXIO;
1127 
1128  sdev = sdp->device;
1129  if (sdev->host->hostt->compat_ioctl) {
1130  int ret;
1131 
1132  ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
1133 
1134  return ret;
1135  }
1136 
1137  return -ENOIOCTLCMD;
1138 }
1139 #endif
1140 
1141 static unsigned int
1142 sg_poll(struct file *filp, poll_table * wait)
1143 {
1144  unsigned int res = 0;
1145  Sg_device *sdp;
1146  Sg_fd *sfp;
1147  Sg_request *srp;
1148  int count = 0;
1149  unsigned long iflags;
1150 
1151  sfp = filp->private_data;
1152  if (!sfp)
1153  return POLLERR;
1154  sdp = sfp->parentdp;
1155  if (!sdp)
1156  return POLLERR;
1157  poll_wait(filp, &sfp->read_wait, wait);
1158  read_lock_irqsave(&sfp->rq_list_lock, iflags);
1159  for (srp = sfp->headrp; srp; srp = srp->nextrp) {
1160  /* if any read waiting, flag it */
1161  if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
1162  res = POLLIN | POLLRDNORM;
1163  ++count;
1164  }
1165  read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1166 
1167  if (sdp->detached)
1168  res |= POLLHUP;
1169  else if (!sfp->cmd_q) {
1170  if (0 == count)
1171  res |= POLLOUT | POLLWRNORM;
1172  } else if (count < SG_MAX_QUEUE)
1173  res |= POLLOUT | POLLWRNORM;
1174  SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
1175  sdp->disk->disk_name, (int) res));
1176  return res;
1177 }
1178 
1179 static int
1180 sg_fasync(int fd, struct file *filp, int mode)
1181 {
1182  Sg_device *sdp;
1183  Sg_fd *sfp;
1184 
1185  if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
1186  return -ENXIO;
1187  SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
1188  sdp->disk->disk_name, mode));
1189 
1190  return fasync_helper(fd, filp, mode, &sfp->async_qp);
1191 }
1192 
1193 static int
1194 sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1195 {
1196  Sg_fd *sfp;
1197  unsigned long offset, len, sa;
1198  Sg_scatter_hold *rsv_schp;
1199  int k, length;
1200 
1201  if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
1202  return VM_FAULT_SIGBUS;
1203  rsv_schp = &sfp->reserve;
1204  offset = vmf->pgoff << PAGE_SHIFT;
1205  if (offset >= rsv_schp->bufflen)
1206  return VM_FAULT_SIGBUS;
1207  SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
1208  offset, rsv_schp->k_use_sg));
1209  sa = vma->vm_start;
1210  length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1211  for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1212  len = vma->vm_end - sa;
1213  len = (len < length) ? len : length;
1214  if (offset < len) {
1215  struct page *page = nth_page(rsv_schp->pages[k],
1216  offset >> PAGE_SHIFT);
1217  get_page(page); /* increment page count */
1218  vmf->page = page;
1219  return 0; /* success */
1220  }
1221  sa += len;
1222  offset -= len;
1223  }
1224 
1225  return VM_FAULT_SIGBUS;
1226 }
1227 
1228 static const struct vm_operations_struct sg_mmap_vm_ops = {
1229  .fault = sg_vma_fault,
1230 };
1231 
1232 static int
1233 sg_mmap(struct file *filp, struct vm_area_struct *vma)
1234 {
1235  Sg_fd *sfp;
1236  unsigned long req_sz, len, sa;
1237  Sg_scatter_hold *rsv_schp;
1238  int k, length;
1239 
1240  if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
1241  return -ENXIO;
1242  req_sz = vma->vm_end - vma->vm_start;
1243  SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
1244  (void *) vma->vm_start, (int) req_sz));
1245  if (vma->vm_pgoff)
1246  return -EINVAL; /* want no offset */
1247  rsv_schp = &sfp->reserve;
1248  if (req_sz > rsv_schp->bufflen)
1249  return -ENOMEM; /* cannot map more than reserved buffer */
1250 
1251  sa = vma->vm_start;
1252  length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1253  for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
1254  len = vma->vm_end - sa;
1255  len = (len < length) ? len : length;
1256  sa += len;
1257  }
1258 
1259  sfp->mmap_called = 1;
1260  vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1261  vma->vm_private_data = sfp;
1262  vma->vm_ops = &sg_mmap_vm_ops;
1263  return 0;
1264 }
1265 
1266 static void sg_rq_end_io_usercontext(struct work_struct *work)
1267 {
1268  struct sg_request *srp = container_of(work, struct sg_request, ew.work);
1269  struct sg_fd *sfp = srp->parentfp;
1270 
1271  sg_finish_rem_req(srp);
1272  kref_put(&sfp->f_ref, sg_remove_sfp);
1273 }
1274 
1275 /*
1276  * This function is a "bottom half" handler that is called by the mid
1277  * level when a command is completed (or has failed).
1278  */
1279 static void sg_rq_end_io(struct request *rq, int uptodate)
1280 {
1281  struct sg_request *srp = rq->end_io_data;
1282  Sg_device *sdp;
1283  Sg_fd *sfp;
1284  unsigned long iflags;
1285  unsigned int ms;
1286  char *sense;
1287  int result, resid, done = 1;
1288 
1289  if (WARN_ON(srp->done != 0))
1290  return;
1291 
1292  sfp = srp->parentfp;
1293  if (WARN_ON(sfp == NULL))
1294  return;
1295 
1296  sdp = sfp->parentdp;
1297  if (unlikely(sdp->detached))
1298  printk(KERN_INFO "sg_rq_end_io: device detached\n");
1299 
1300  sense = rq->sense;
1301  result = rq->errors;
1302  resid = rq->resid_len;
1303 
1304  SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
1305  sdp->disk->disk_name, srp->header.pack_id, result));
1306  srp->header.resid = resid;
1307  ms = jiffies_to_msecs(jiffies);
1308  srp->header.duration = (ms > srp->header.duration) ?
1309  (ms - srp->header.duration) : 0;
1310  if (0 != result) {
1311  struct scsi_sense_hdr sshdr;
1312 
1313  srp->header.status = 0xff & result;
1314  srp->header.masked_status = status_byte(result);
1315  srp->header.msg_status = msg_byte(result);
1316  srp->header.host_status = host_byte(result);
1317  srp->header.driver_status = driver_byte(result);
1318  if ((sdp->sgdebug > 0) &&
1319  ((CHECK_CONDITION == srp->header.masked_status) ||
1321  __scsi_print_sense("sg_cmd_done", sense,
1323 
1324  /* Following if statement is a patch supplied by Eric Youngdale */
1325  if (driver_byte(result) != 0
1326  && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
1327  && !scsi_sense_is_deferred(&sshdr)
1328  && sshdr.sense_key == UNIT_ATTENTION
1329  && sdp->device->removable) {
1330  /* Detected possible disc change. Set the bit - this */
1331  /* may be used if there are filesystems using this device */
1332  sdp->device->changed = 1;
1333  }
1334  }
1335  /* Rely on write phase to clean out srp status values, so no "else" */
1336 
1337  write_lock_irqsave(&sfp->rq_list_lock, iflags);
1338  if (unlikely(srp->orphan)) {
1339  if (sfp->keep_orphan)
1340  srp->sg_io_owned = 0;
1341  else
1342  done = 0;
1343  }
1344  srp->done = done;
1345  write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1346 
1347  if (likely(done)) {
1348  /* Now wake up any sg_read() that is waiting for this
1349  * packet.
1350  */
1353  kref_put(&sfp->f_ref, sg_remove_sfp);
1354  } else {
1355  INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
1356  schedule_work(&srp->ew.work);
1357  }
1358 }
1359 
1360 static const struct file_operations sg_fops = {
1361  .owner = THIS_MODULE,
1362  .read = sg_read,
1363  .write = sg_write,
1364  .poll = sg_poll,
1365  .unlocked_ioctl = sg_ioctl,
1366 #ifdef CONFIG_COMPAT
1367  .compat_ioctl = sg_compat_ioctl,
1368 #endif
1369  .open = sg_open,
1370  .mmap = sg_mmap,
1371  .release = sg_release,
1372  .fasync = sg_fasync,
1373  .llseek = no_llseek,
1374 };
1375 
1376 static struct class *sg_sysfs_class;
1377 
1378 static int sg_sysfs_valid = 0;
1379 
1380 static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
1381 {
1382  struct request_queue *q = scsidp->request_queue;
1383  Sg_device *sdp;
1384  unsigned long iflags;
1385  int error;
1386  u32 k;
1387 
1388  sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
1389  if (!sdp) {
1390  printk(KERN_WARNING "kmalloc Sg_device failure\n");
1391  return ERR_PTR(-ENOMEM);
1392  }
1393 
1394  if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
1395  printk(KERN_WARNING "idr expansion Sg_device failure\n");
1396  error = -ENOMEM;
1397  goto out;
1398  }
1399 
1400  write_lock_irqsave(&sg_index_lock, iflags);
1401 
1402  error = idr_get_new(&sg_index_idr, sdp, &k);
1403  if (error) {
1404  write_unlock_irqrestore(&sg_index_lock, iflags);
1405  printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
1406  error);
1407  goto out;
1408  }
1409 
1410  if (unlikely(k >= SG_MAX_DEVS))
1411  goto overflow;
1412 
1413  SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
1414  sprintf(disk->disk_name, "sg%d", k);
1415  disk->first_minor = k;
1416  sdp->disk = disk;
1417  sdp->device = scsidp;
1418  INIT_LIST_HEAD(&sdp->sfds);
1420  sdp->sg_tablesize = queue_max_segments(q);
1421  sdp->index = k;
1422  kref_init(&sdp->d_ref);
1423 
1424  write_unlock_irqrestore(&sg_index_lock, iflags);
1425 
1426  error = 0;
1427  out:
1428  if (error) {
1429  kfree(sdp);
1430  return ERR_PTR(error);
1431  }
1432  return sdp;
1433 
1434  overflow:
1435  idr_remove(&sg_index_idr, k);
1436  write_unlock_irqrestore(&sg_index_lock, iflags);
1437  sdev_printk(KERN_WARNING, scsidp,
1438  "Unable to attach sg device type=%d, minor "
1439  "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
1440  error = -ENODEV;
1441  goto out;
1442 }
1443 
1444 static int
1445 sg_add(struct device *cl_dev, struct class_interface *cl_intf)
1446 {
1447  struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1448  struct gendisk *disk;
1449  Sg_device *sdp = NULL;
1450  struct cdev * cdev = NULL;
1451  int error;
1452  unsigned long iflags;
1453 
1454  disk = alloc_disk(1);
1455  if (!disk) {
1456  printk(KERN_WARNING "alloc_disk failed\n");
1457  return -ENOMEM;
1458  }
1459  disk->major = SCSI_GENERIC_MAJOR;
1460 
1461  error = -ENOMEM;
1462  cdev = cdev_alloc();
1463  if (!cdev) {
1464  printk(KERN_WARNING "cdev_alloc failed\n");
1465  goto out;
1466  }
1467  cdev->owner = THIS_MODULE;
1468  cdev->ops = &sg_fops;
1469 
1470  sdp = sg_alloc(disk, scsidp);
1471  if (IS_ERR(sdp)) {
1472  printk(KERN_WARNING "sg_alloc failed\n");
1473  error = PTR_ERR(sdp);
1474  goto out;
1475  }
1476 
1477  error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
1478  if (error)
1479  goto cdev_add_err;
1480 
1481  sdp->cdev = cdev;
1482  if (sg_sysfs_valid) {
1483  struct device *sg_class_member;
1484 
1485  sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
1487  sdp->index),
1488  sdp, "%s", disk->disk_name);
1489  if (IS_ERR(sg_class_member)) {
1490  printk(KERN_ERR "sg_add: "
1491  "device_create failed\n");
1492  error = PTR_ERR(sg_class_member);
1493  goto cdev_add_err;
1494  }
1495  error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
1496  &sg_class_member->kobj, "generic");
1497  if (error)
1498  printk(KERN_ERR "sg_add: unable to make symlink "
1499  "'generic' back to sg%d\n", sdp->index);
1500  } else
1501  printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
1502 
1503  sdev_printk(KERN_NOTICE, scsidp,
1504  "Attached scsi generic sg%d type %d\n", sdp->index,
1505  scsidp->type);
1506 
1507  dev_set_drvdata(cl_dev, sdp);
1508 
1509  return 0;
1510 
1511 cdev_add_err:
1512  write_lock_irqsave(&sg_index_lock, iflags);
1513  idr_remove(&sg_index_idr, sdp->index);
1514  write_unlock_irqrestore(&sg_index_lock, iflags);
1515  kfree(sdp);
1516 
1517 out:
1518  put_disk(disk);
1519  if (cdev)
1520  cdev_del(cdev);
1521  return error;
1522 }
1523 
1524 static void sg_device_destroy(struct kref *kref)
1525 {
1526  struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
1527  unsigned long flags;
1528 
1529  /* CAUTION! Note that the device can still be found via idr_find()
1530  * even though the refcount is 0. Therefore, do idr_remove() BEFORE
1531  * any other cleanup.
1532  */
1533 
1534  write_lock_irqsave(&sg_index_lock, flags);
1535  idr_remove(&sg_index_idr, sdp->index);
1536  write_unlock_irqrestore(&sg_index_lock, flags);
1537 
1538  SCSI_LOG_TIMEOUT(3,
1539  printk("sg_device_destroy: %s\n",
1540  sdp->disk->disk_name));
1541 
1542  put_disk(sdp->disk);
1543  kfree(sdp);
1544 }
1545 
1546 static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
1547 {
1548  struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
1549  Sg_device *sdp = dev_get_drvdata(cl_dev);
1550  unsigned long iflags;
1551  Sg_fd *sfp;
1552 
1553  if (!sdp || sdp->detached)
1554  return;
1555 
1556  SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
1557 
1558  /* Need a write lock to set sdp->detached. */
1559  write_lock_irqsave(&sg_index_lock, iflags);
1560  sdp->detached = 1;
1561  list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
1564  }
1565  write_unlock_irqrestore(&sg_index_lock, iflags);
1566 
1567  sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
1568  device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
1569  cdev_del(sdp->cdev);
1570  sdp->cdev = NULL;
1571 
1572  sg_put_dev(sdp);
1573 }
1574 
1575 module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
1576 module_param_named(def_reserved_size, def_reserved_size, int,
1577  S_IRUGO | S_IWUSR);
1578 module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
1579 
1580 MODULE_AUTHOR("Douglas Gilbert");
1581 MODULE_DESCRIPTION("SCSI generic (sg) driver");
1582 MODULE_LICENSE("GPL");
1585 
1586 MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
1587  "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
1588 MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
1589 MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
1590 
1591 static int __init
1592 init_sg(void)
1593 {
1594  int rc;
1595 
1596  if (scatter_elem_sz < PAGE_SIZE) {
1597  scatter_elem_sz = PAGE_SIZE;
1598  scatter_elem_sz_prev = scatter_elem_sz;
1599  }
1600  if (def_reserved_size >= 0)
1601  sg_big_buff = def_reserved_size;
1602  else
1603  def_reserved_size = sg_big_buff;
1604 
1606  SG_MAX_DEVS, "sg");
1607  if (rc)
1608  return rc;
1609  sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
1610  if ( IS_ERR(sg_sysfs_class) ) {
1611  rc = PTR_ERR(sg_sysfs_class);
1612  goto err_out;
1613  }
1614  sg_sysfs_valid = 1;
1615  rc = scsi_register_interface(&sg_interface);
1616  if (0 == rc) {
1617 #ifdef CONFIG_SCSI_PROC_FS
1618  sg_proc_init();
1619 #endif /* CONFIG_SCSI_PROC_FS */
1620  return 0;
1621  }
1622  class_destroy(sg_sysfs_class);
1623 err_out:
1625  return rc;
1626 }
1627 
1628 static void __exit
1629 exit_sg(void)
1630 {
1631 #ifdef CONFIG_SCSI_PROC_FS
1632  sg_proc_cleanup();
1633 #endif /* CONFIG_SCSI_PROC_FS */
1634  scsi_unregister_interface(&sg_interface);
1635  class_destroy(sg_sysfs_class);
1636  sg_sysfs_valid = 0;
1638  SG_MAX_DEVS);
1639  idr_destroy(&sg_index_idr);
1640 }
1641 
1642 static int sg_start_req(Sg_request *srp, unsigned char *cmd)
1643 {
1644  int res;
1645  struct request *rq;
1646  Sg_fd *sfp = srp->parentfp;
1647  sg_io_hdr_t *hp = &srp->header;
1648  int dxfer_len = (int) hp->dxfer_len;
1649  int dxfer_dir = hp->dxfer_direction;
1650  unsigned int iov_count = hp->iovec_count;
1651  Sg_scatter_hold *req_schp = &srp->data;
1652  Sg_scatter_hold *rsv_schp = &sfp->reserve;
1653  struct request_queue *q = sfp->parentdp->device->request_queue;
1654  struct rq_map_data *md, map_data;
1655  int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
1656 
1657  SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
1658  dxfer_len));
1659 
1660  rq = blk_get_request(q, rw, GFP_ATOMIC);
1661  if (!rq)
1662  return -ENOMEM;
1663 
1664  memcpy(rq->cmd, cmd, hp->cmd_len);
1665 
1666  rq->cmd_len = hp->cmd_len;
1667  rq->cmd_type = REQ_TYPE_BLOCK_PC;
1668 
1669  srp->rq = rq;
1670  rq->end_io_data = srp;
1671  rq->sense = srp->sense_b;
1672  rq->retries = SG_DEFAULT_RETRIES;
1673 
1674  if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
1675  return 0;
1676 
1677  if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
1678  dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
1679  !sfp->parentdp->device->host->unchecked_isa_dma &&
1680  blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
1681  md = NULL;
1682  else
1683  md = &map_data;
1684 
1685  if (md) {
1686  if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
1687  sg_link_reserve(sfp, srp, dxfer_len);
1688  else {
1689  res = sg_build_indirect(req_schp, sfp, dxfer_len);
1690  if (res)
1691  return res;
1692  }
1693 
1694  md->pages = req_schp->pages;
1695  md->page_order = req_schp->page_order;
1696  md->nr_entries = req_schp->k_use_sg;
1697  md->offset = 0;
1698  md->null_mapped = hp->dxferp ? 0 : 1;
1699  if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
1700  md->from_user = 1;
1701  else
1702  md->from_user = 0;
1703  }
1704 
1705  if (iov_count) {
1706  int len, size = sizeof(struct sg_iovec) * iov_count;
1707  struct iovec *iov;
1708 
1709  iov = memdup_user(hp->dxferp, size);
1710  if (IS_ERR(iov))
1711  return PTR_ERR(iov);
1712 
1713  len = iov_length(iov, iov_count);
1714  if (hp->dxfer_len < len) {
1715  iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
1716  len = hp->dxfer_len;
1717  }
1718 
1719  res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
1720  iov_count,
1721  len, GFP_ATOMIC);
1722  kfree(iov);
1723  } else
1724  res = blk_rq_map_user(q, rq, md, hp->dxferp,
1725  hp->dxfer_len, GFP_ATOMIC);
1726 
1727  if (!res) {
1728  srp->bio = rq->bio;
1729 
1730  if (!md) {
1731  req_schp->dio_in_use = 1;
1732  hp->info |= SG_INFO_DIRECT_IO;
1733  }
1734  }
1735  return res;
1736 }
1737 
1738 static int sg_finish_rem_req(Sg_request * srp)
1739 {
1740  int ret = 0;
1741 
1742  Sg_fd *sfp = srp->parentfp;
1743  Sg_scatter_hold *req_schp = &srp->data;
1744 
1745  SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
1746  if (srp->rq) {
1747  if (srp->bio)
1748  ret = blk_rq_unmap_user(srp->bio);
1749 
1750  blk_put_request(srp->rq);
1751  }
1752 
1753  if (srp->res_used)
1754  sg_unlink_reserve(sfp, srp);
1755  else
1756  sg_remove_scat(req_schp);
1757 
1758  sg_remove_request(sfp, srp);
1759 
1760  return ret;
1761 }
1762 
1763 static int
1764 sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
1765 {
1766  int sg_bufflen = tablesize * sizeof(struct page *);
1767  gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
1768 
1769  schp->pages = kzalloc(sg_bufflen, gfp_flags);
1770  if (!schp->pages)
1771  return -ENOMEM;
1772  schp->sglist_len = sg_bufflen;
1773  return tablesize; /* number of scat_gath elements allocated */
1774 }
1775 
1776 static int
1777 sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1778 {
1779  int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
1780  int sg_tablesize = sfp->parentdp->sg_tablesize;
1781  int blk_size = buff_size, order;
1783 
1784  if (blk_size < 0)
1785  return -EFAULT;
1786  if (0 == blk_size)
1787  ++blk_size; /* don't know why */
1788  /* round request up to next highest SG_SECTOR_SZ byte boundary */
1789  blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
1790  SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
1791  buff_size, blk_size));
1792 
1793  /* N.B. ret_sz carried into this block ... */
1794  mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
1795  if (mx_sc_elems < 0)
1796  return mx_sc_elems; /* most likely -ENOMEM */
1797 
1798  num = scatter_elem_sz;
1799  if (unlikely(num != scatter_elem_sz_prev)) {
1800  if (num < PAGE_SIZE) {
1801  scatter_elem_sz = PAGE_SIZE;
1802  scatter_elem_sz_prev = PAGE_SIZE;
1803  } else
1804  scatter_elem_sz_prev = num;
1805  }
1806 
1807  if (sfp->low_dma)
1808  gfp_mask |= GFP_DMA;
1809 
1811  gfp_mask |= __GFP_ZERO;
1812 
1813  order = get_order(num);
1814 retry:
1815  ret_sz = 1 << (PAGE_SHIFT + order);
1816 
1817  for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
1818  k++, rem_sz -= ret_sz) {
1819 
1820  num = (rem_sz > scatter_elem_sz_prev) ?
1821  scatter_elem_sz_prev : rem_sz;
1822 
1823  schp->pages[k] = alloc_pages(gfp_mask, order);
1824  if (!schp->pages[k])
1825  goto out;
1826 
1827  if (num == scatter_elem_sz_prev) {
1828  if (unlikely(ret_sz > scatter_elem_sz_prev)) {
1829  scatter_elem_sz = ret_sz;
1830  scatter_elem_sz_prev = ret_sz;
1831  }
1832  }
1833 
1834  SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
1835  "ret_sz=%d\n", k, num, ret_sz));
1836  } /* end of for loop */
1837 
1838  schp->page_order = order;
1839  schp->k_use_sg = k;
1840  SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
1841  "rem_sz=%d\n", k, rem_sz));
1842 
1843  schp->bufflen = blk_size;
1844  if (rem_sz > 0) /* must have failed */
1845  return -ENOMEM;
1846  return 0;
1847 out:
1848  for (i = 0; i < k; i++)
1849  __free_pages(schp->pages[i], order);
1850 
1851  if (--order >= 0)
1852  goto retry;
1853 
1854  return -ENOMEM;
1855 }
1856 
1857 static void
1858 sg_remove_scat(Sg_scatter_hold * schp)
1859 {
1860  SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
1861  if (schp->pages && schp->sglist_len > 0) {
1862  if (!schp->dio_in_use) {
1863  int k;
1864 
1865  for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1867  "sg_remove_scat: k=%d, pg=0x%p\n",
1868  k, schp->pages[k]));
1869  __free_pages(schp->pages[k], schp->page_order);
1870  }
1871 
1872  kfree(schp->pages);
1873  }
1874  }
1875  memset(schp, 0, sizeof (*schp));
1876 }
1877 
1878 static int
1879 sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
1880 {
1881  Sg_scatter_hold *schp = &srp->data;
1882  int k, num;
1883 
1884  SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
1885  num_read_xfer));
1886  if ((!outp) || (num_read_xfer <= 0))
1887  return 0;
1888 
1889  num = 1 << (PAGE_SHIFT + schp->page_order);
1890  for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
1891  if (num > num_read_xfer) {
1892  if (__copy_to_user(outp, page_address(schp->pages[k]),
1893  num_read_xfer))
1894  return -EFAULT;
1895  break;
1896  } else {
1897  if (__copy_to_user(outp, page_address(schp->pages[k]),
1898  num))
1899  return -EFAULT;
1900  num_read_xfer -= num;
1901  if (num_read_xfer <= 0)
1902  break;
1903  outp += num;
1904  }
1905  }
1906 
1907  return 0;
1908 }
1909 
1910 static void
1911 sg_build_reserve(Sg_fd * sfp, int req_size)
1912 {
1913  Sg_scatter_hold *schp = &sfp->reserve;
1914 
1915  SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
1916  do {
1917  if (req_size < PAGE_SIZE)
1918  req_size = PAGE_SIZE;
1919  if (0 == sg_build_indirect(schp, sfp, req_size))
1920  return;
1921  else
1922  sg_remove_scat(schp);
1923  req_size >>= 1; /* divide by 2 */
1924  } while (req_size > (PAGE_SIZE / 2));
1925 }
1926 
1927 static void
1928 sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
1929 {
1930  Sg_scatter_hold *req_schp = &srp->data;
1931  Sg_scatter_hold *rsv_schp = &sfp->reserve;
1932  int k, num, rem;
1933 
1934  srp->res_used = 1;
1935  SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
1936  rem = size;
1937 
1938  num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
1939  for (k = 0; k < rsv_schp->k_use_sg; k++) {
1940  if (rem <= num) {
1941  req_schp->k_use_sg = k + 1;
1942  req_schp->sglist_len = rsv_schp->sglist_len;
1943  req_schp->pages = rsv_schp->pages;
1944 
1945  req_schp->bufflen = size;
1946  req_schp->page_order = rsv_schp->page_order;
1947  break;
1948  } else
1949  rem -= num;
1950  }
1951 
1952  if (k >= rsv_schp->k_use_sg)
1953  SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
1954 }
1955 
1956 static void
1957 sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
1958 {
1959  Sg_scatter_hold *req_schp = &srp->data;
1960 
1961  SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
1962  (int) req_schp->k_use_sg));
1963  req_schp->k_use_sg = 0;
1964  req_schp->bufflen = 0;
1965  req_schp->pages = NULL;
1966  req_schp->page_order = 0;
1967  req_schp->sglist_len = 0;
1968  sfp->save_scat_len = 0;
1969  srp->res_used = 0;
1970 }
1971 
1972 static Sg_request *
1973 sg_get_rq_mark(Sg_fd * sfp, int pack_id)
1974 {
1975  Sg_request *resp;
1976  unsigned long iflags;
1977 
1978  write_lock_irqsave(&sfp->rq_list_lock, iflags);
1979  for (resp = sfp->headrp; resp; resp = resp->nextrp) {
1980  /* look for requests that are ready + not SG_IO owned */
1981  if ((1 == resp->done) && (!resp->sg_io_owned) &&
1982  ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
1983  resp->done = 2; /* guard against other readers */
1984  break;
1985  }
1986  }
1987  write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1988  return resp;
1989 }
1990 
1991 /* always adds to end of list */
1992 static Sg_request *
1993 sg_add_request(Sg_fd * sfp)
1994 {
1995  int k;
1996  unsigned long iflags;
1997  Sg_request *resp;
1998  Sg_request *rp = sfp->req_arr;
1999 
2000  write_lock_irqsave(&sfp->rq_list_lock, iflags);
2001  resp = sfp->headrp;
2002  if (!resp) {
2003  memset(rp, 0, sizeof (Sg_request));
2004  rp->parentfp = sfp;
2005  resp = rp;
2006  sfp->headrp = resp;
2007  } else {
2008  if (0 == sfp->cmd_q)
2009  resp = NULL; /* command queuing disallowed */
2010  else {
2011  for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
2012  if (!rp->parentfp)
2013  break;
2014  }
2015  if (k < SG_MAX_QUEUE) {
2016  memset(rp, 0, sizeof (Sg_request));
2017  rp->parentfp = sfp;
2018  while (resp->nextrp)
2019  resp = resp->nextrp;
2020  resp->nextrp = rp;
2021  resp = rp;
2022  } else
2023  resp = NULL;
2024  }
2025  }
2026  if (resp) {
2027  resp->nextrp = NULL;
2028  resp->header.duration = jiffies_to_msecs(jiffies);
2029  }
2030  write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2031  return resp;
2032 }
2033 
2034 /* Return of 1 for found; 0 for not found */
2035 static int
2036 sg_remove_request(Sg_fd * sfp, Sg_request * srp)
2037 {
2038  Sg_request *prev_rp;
2039  Sg_request *rp;
2040  unsigned long iflags;
2041  int res = 0;
2042 
2043  if ((!sfp) || (!srp) || (!sfp->headrp))
2044  return res;
2045  write_lock_irqsave(&sfp->rq_list_lock, iflags);
2046  prev_rp = sfp->headrp;
2047  if (srp == prev_rp) {
2048  sfp->headrp = prev_rp->nextrp;
2049  prev_rp->parentfp = NULL;
2050  res = 1;
2051  } else {
2052  while ((rp = prev_rp->nextrp)) {
2053  if (srp == rp) {
2054  prev_rp->nextrp = rp->nextrp;
2055  rp->parentfp = NULL;
2056  res = 1;
2057  break;
2058  }
2059  prev_rp = rp;
2060  }
2061  }
2062  write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2063  return res;
2064 }
2065 
2066 static Sg_fd *
2067 sg_add_sfp(Sg_device * sdp, int dev)
2068 {
2069  Sg_fd *sfp;
2070  unsigned long iflags;
2071  int bufflen;
2072 
2073  sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
2074  if (!sfp)
2075  return NULL;
2076 
2078  rwlock_init(&sfp->rq_list_lock);
2079 
2080  kref_init(&sfp->f_ref);
2081  sfp->timeout = SG_DEFAULT_TIMEOUT;
2082  sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
2084  sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
2085  sdp->device->host->unchecked_isa_dma : 1;
2086  sfp->cmd_q = SG_DEF_COMMAND_Q;
2088  sfp->parentdp = sdp;
2089  write_lock_irqsave(&sg_index_lock, iflags);
2090  list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
2091  write_unlock_irqrestore(&sg_index_lock, iflags);
2092  SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
2093  if (unlikely(sg_big_buff != def_reserved_size))
2094  sg_big_buff = def_reserved_size;
2095 
2096  bufflen = min_t(int, sg_big_buff,
2097  queue_max_sectors(sdp->device->request_queue) * 512);
2098  sg_build_reserve(sfp, bufflen);
2099  SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
2100  sfp->reserve.bufflen, sfp->reserve.k_use_sg));
2101 
2102  kref_get(&sdp->d_ref);
2103  __module_get(THIS_MODULE);
2104  return sfp;
2105 }
2106 
2107 static void sg_remove_sfp_usercontext(struct work_struct *work)
2108 {
2109  struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
2110  struct sg_device *sdp = sfp->parentdp;
2111 
2112  /* Cleanup any responses which were never read(). */
2113  while (sfp->headrp)
2114  sg_finish_rem_req(sfp->headrp);
2115 
2116  if (sfp->reserve.bufflen > 0) {
2117  SCSI_LOG_TIMEOUT(6,
2118  printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
2119  (int) sfp->reserve.bufflen,
2120  (int) sfp->reserve.k_use_sg));
2121  sg_remove_scat(&sfp->reserve);
2122  }
2123 
2124  SCSI_LOG_TIMEOUT(6,
2125  printk("sg_remove_sfp: %s, sfp=0x%p\n",
2126  sdp->disk->disk_name,
2127  sfp));
2128  kfree(sfp);
2129 
2130  scsi_device_put(sdp->device);
2131  sg_put_dev(sdp);
2132  module_put(THIS_MODULE);
2133 }
2134 
2135 static void sg_remove_sfp(struct kref *kref)
2136 {
2137  struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
2138  struct sg_device *sdp = sfp->parentdp;
2139  unsigned long iflags;
2140 
2141  write_lock_irqsave(&sg_index_lock, iflags);
2142  list_del(&sfp->sfd_siblings);
2143  write_unlock_irqrestore(&sg_index_lock, iflags);
2145 
2146  INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
2147  schedule_work(&sfp->ew.work);
2148 }
2149 
2150 static int
2151 sg_res_in_use(Sg_fd * sfp)
2152 {
2153  const Sg_request *srp;
2154  unsigned long iflags;
2155 
2156  read_lock_irqsave(&sfp->rq_list_lock, iflags);
2157  for (srp = sfp->headrp; srp; srp = srp->nextrp)
2158  if (srp->res_used)
2159  break;
2160  read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
2161  return srp ? 1 : 0;
2162 }
2163 
2164 #ifdef CONFIG_SCSI_PROC_FS
2165 static int
2166 sg_idr_max_id(int id, void *p, void *data)
2167 {
2168  int *k = data;
2169 
2170  if (*k < id)
2171  *k = id;
2172 
2173  return 0;
2174 }
2175 
2176 static int
2177 sg_last_dev(void)
2178 {
2179  int k = -1;
2180  unsigned long iflags;
2181 
2182  read_lock_irqsave(&sg_index_lock, iflags);
2183  idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
2184  read_unlock_irqrestore(&sg_index_lock, iflags);
2185  return k + 1; /* origin 1 */
2186 }
2187 #endif
2188 
2189 /* must be called with sg_index_lock held */
2190 static Sg_device *sg_lookup_dev(int dev)
2191 {
2192  return idr_find(&sg_index_idr, dev);
2193 }
2194 
2195 static Sg_device *sg_get_dev(int dev)
2196 {
2197  struct sg_device *sdp;
2198  unsigned long flags;
2199 
2200  read_lock_irqsave(&sg_index_lock, flags);
2201  sdp = sg_lookup_dev(dev);
2202  if (!sdp)
2203  sdp = ERR_PTR(-ENXIO);
2204  else if (sdp->detached) {
2205  /* If sdp->detached, then the refcount may already be 0, in
2206  * which case it would be a bug to do kref_get().
2207  */
2208  sdp = ERR_PTR(-ENODEV);
2209  } else
2210  kref_get(&sdp->d_ref);
2211  read_unlock_irqrestore(&sg_index_lock, flags);
2212 
2213  return sdp;
2214 }
2215 
2216 static void sg_put_dev(struct sg_device *sdp)
2217 {
2218  kref_put(&sdp->d_ref, sg_device_destroy);
2219 }
2220 
2221 #ifdef CONFIG_SCSI_PROC_FS
2222 
2223 static struct proc_dir_entry *sg_proc_sgp = NULL;
2224 
2225 static char sg_proc_sg_dirname[] = "scsi/sg";
2226 
2227 static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2228 
2229 static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2230 static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2231  size_t count, loff_t *off);
2232 static const struct file_operations adio_fops = {
2233  .owner = THIS_MODULE,
2234  .open = sg_proc_single_open_adio,
2235  .read = seq_read,
2236  .llseek = seq_lseek,
2237  .write = sg_proc_write_adio,
2238  .release = single_release,
2239 };
2240 
2241 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2242 static ssize_t sg_proc_write_dressz(struct file *filp,
2243  const char __user *buffer, size_t count, loff_t *off);
2244 static const struct file_operations dressz_fops = {
2245  .owner = THIS_MODULE,
2246  .open = sg_proc_single_open_dressz,
2247  .read = seq_read,
2248  .llseek = seq_lseek,
2249  .write = sg_proc_write_dressz,
2250  .release = single_release,
2251 };
2252 
2253 static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2254 static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2255 static const struct file_operations version_fops = {
2256  .owner = THIS_MODULE,
2257  .open = sg_proc_single_open_version,
2258  .read = seq_read,
2259  .llseek = seq_lseek,
2260  .release = single_release,
2261 };
2262 
2263 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2264 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2265 static const struct file_operations devhdr_fops = {
2266  .owner = THIS_MODULE,
2267  .open = sg_proc_single_open_devhdr,
2268  .read = seq_read,
2269  .llseek = seq_lseek,
2270  .release = single_release,
2271 };
2272 
2273 static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
2274 static int sg_proc_open_dev(struct inode *inode, struct file *file);
2275 static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2276 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2277 static void dev_seq_stop(struct seq_file *s, void *v);
2278 static const struct file_operations dev_fops = {
2279  .owner = THIS_MODULE,
2280  .open = sg_proc_open_dev,
2281  .read = seq_read,
2282  .llseek = seq_lseek,
2283  .release = seq_release,
2284 };
2285 static const struct seq_operations dev_seq_ops = {
2286  .start = dev_seq_start,
2287  .next = dev_seq_next,
2288  .stop = dev_seq_stop,
2289  .show = sg_proc_seq_show_dev,
2290 };
2291 
2292 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2293 static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2294 static const struct file_operations devstrs_fops = {
2295  .owner = THIS_MODULE,
2296  .open = sg_proc_open_devstrs,
2297  .read = seq_read,
2298  .llseek = seq_lseek,
2299  .release = seq_release,
2300 };
2301 static const struct seq_operations devstrs_seq_ops = {
2302  .start = dev_seq_start,
2303  .next = dev_seq_next,
2304  .stop = dev_seq_stop,
2305  .show = sg_proc_seq_show_devstrs,
2306 };
2307 
2308 static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2309 static int sg_proc_open_debug(struct inode *inode, struct file *file);
2310 static const struct file_operations debug_fops = {
2311  .owner = THIS_MODULE,
2312  .open = sg_proc_open_debug,
2313  .read = seq_read,
2314  .llseek = seq_lseek,
2315  .release = seq_release,
2316 };
2317 static const struct seq_operations debug_seq_ops = {
2318  .start = dev_seq_start,
2319  .next = dev_seq_next,
2320  .stop = dev_seq_stop,
2321  .show = sg_proc_seq_show_debug,
2322 };
2323 
2324 
2325 struct sg_proc_leaf {
2326  const char * name;
2327  const struct file_operations * fops;
2328 };
2329 
2330 static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
2331  {"allow_dio", &adio_fops},
2332  {"debug", &debug_fops},
2333  {"def_reserved_size", &dressz_fops},
2334  {"device_hdr", &devhdr_fops},
2335  {"devices", &dev_fops},
2336  {"device_strs", &devstrs_fops},
2337  {"version", &version_fops}
2338 };
2339 
2340 static int
2341 sg_proc_init(void)
2342 {
2343  int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2344  int k;
2345 
2346  sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
2347  if (!sg_proc_sgp)
2348  return 1;
2349  for (k = 0; k < num_leaves; ++k) {
2350  const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
2351  umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2352  proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2353  }
2354  return 0;
2355 }
2356 
2357 static void
2358 sg_proc_cleanup(void)
2359 {
2360  int k;
2361  int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
2362 
2363  if (!sg_proc_sgp)
2364  return;
2365  for (k = 0; k < num_leaves; ++k)
2366  remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
2367  remove_proc_entry(sg_proc_sg_dirname, NULL);
2368 }
2369 
2370 
2371 static int sg_proc_seq_show_int(struct seq_file *s, void *v)
2372 {
2373  seq_printf(s, "%d\n", *((int *)s->private));
2374  return 0;
2375 }
2376 
2377 static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
2378 {
2379  return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
2380 }
2381 
2382 static ssize_t
2383 sg_proc_write_adio(struct file *filp, const char __user *buffer,
2384  size_t count, loff_t *off)
2385 {
2386  int err;
2387  unsigned long num;
2388 
2390  return -EACCES;
2391  err = kstrtoul_from_user(buffer, count, 0, &num);
2392  if (err)
2393  return err;
2394  sg_allow_dio = num ? 1 : 0;
2395  return count;
2396 }
2397 
2398 static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
2399 {
2400  return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
2401 }
2402 
2403 static ssize_t
2404 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
2405  size_t count, loff_t *off)
2406 {
2407  int err;
2408  unsigned long k = ULONG_MAX;
2409 
2411  return -EACCES;
2412 
2413  err = kstrtoul_from_user(buffer, count, 0, &k);
2414  if (err)
2415  return err;
2416  if (k <= 1048576) { /* limit "big buff" to 1 MB */
2417  sg_big_buff = k;
2418  return count;
2419  }
2420  return -ERANGE;
2421 }
2422 
2423 static int sg_proc_seq_show_version(struct seq_file *s, void *v)
2424 {
2425  seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
2426  sg_version_date);
2427  return 0;
2428 }
2429 
2430 static int sg_proc_single_open_version(struct inode *inode, struct file *file)
2431 {
2432  return single_open(file, sg_proc_seq_show_version, NULL);
2433 }
2434 
2435 static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
2436 {
2437  seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
2438  "online\n");
2439  return 0;
2440 }
2441 
2442 static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
2443 {
2444  return single_open(file, sg_proc_seq_show_devhdr, NULL);
2445 }
2446 
2447 struct sg_proc_deviter {
2448  loff_t index;
2449  size_t max;
2450 };
2451 
2452 static void * dev_seq_start(struct seq_file *s, loff_t *pos)
2453 {
2454  struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
2455 
2456  s->private = it;
2457  if (! it)
2458  return NULL;
2459 
2460  it->index = *pos;
2461  it->max = sg_last_dev();
2462  if (it->index >= it->max)
2463  return NULL;
2464  return it;
2465 }
2466 
2467 static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
2468 {
2469  struct sg_proc_deviter * it = s->private;
2470 
2471  *pos = ++it->index;
2472  return (it->index < it->max) ? it : NULL;
2473 }
2474 
2475 static void dev_seq_stop(struct seq_file *s, void *v)
2476 {
2477  kfree(s->private);
2478 }
2479 
2480 static int sg_proc_open_dev(struct inode *inode, struct file *file)
2481 {
2482  return seq_open(file, &dev_seq_ops);
2483 }
2484 
2485 static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
2486 {
2487  struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2488  Sg_device *sdp;
2489  struct scsi_device *scsidp;
2490  unsigned long iflags;
2491 
2492  read_lock_irqsave(&sg_index_lock, iflags);
2493  sdp = it ? sg_lookup_dev(it->index) : NULL;
2494  if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2495  seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
2496  scsidp->host->host_no, scsidp->channel,
2497  scsidp->id, scsidp->lun, (int) scsidp->type,
2498  1,
2499  (int) scsidp->queue_depth,
2500  (int) scsidp->device_busy,
2501  (int) scsi_device_online(scsidp));
2502  else
2503  seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
2504  read_unlock_irqrestore(&sg_index_lock, iflags);
2505  return 0;
2506 }
2507 
2508 static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
2509 {
2510  return seq_open(file, &devstrs_seq_ops);
2511 }
2512 
2513 static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
2514 {
2515  struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2516  Sg_device *sdp;
2517  struct scsi_device *scsidp;
2518  unsigned long iflags;
2519 
2520  read_lock_irqsave(&sg_index_lock, iflags);
2521  sdp = it ? sg_lookup_dev(it->index) : NULL;
2522  if (sdp && (scsidp = sdp->device) && (!sdp->detached))
2523  seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
2524  scsidp->vendor, scsidp->model, scsidp->rev);
2525  else
2526  seq_printf(s, "<no active device>\n");
2527  read_unlock_irqrestore(&sg_index_lock, iflags);
2528  return 0;
2529 }
2530 
2531 /* must be called while holding sg_index_lock */
2532 static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
2533 {
2534  int k, m, new_interface, blen, usg;
2535  Sg_request *srp;
2536  Sg_fd *fp;
2537  const sg_io_hdr_t *hp;
2538  const char * cp;
2539  unsigned int ms;
2540 
2541  k = 0;
2542  list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
2543  k++;
2544  read_lock(&fp->rq_list_lock); /* irqs already disabled */
2545  seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
2546  "(res)sgat=%d low_dma=%d\n", k,
2548  fp->reserve.bufflen,
2549  (int) fp->reserve.k_use_sg,
2550  (int) fp->low_dma);
2551  seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
2552  (int) fp->cmd_q, (int) fp->force_packid,
2553  (int) fp->keep_orphan);
2554  for (m = 0, srp = fp->headrp;
2555  srp != NULL;
2556  ++m, srp = srp->nextrp) {
2557  hp = &srp->header;
2558  new_interface = (hp->interface_id == '\0') ? 0 : 1;
2559  if (srp->res_used) {
2560  if (new_interface &&
2561  (SG_FLAG_MMAP_IO & hp->flags))
2562  cp = " mmap>> ";
2563  else
2564  cp = " rb>> ";
2565  } else {
2566  if (SG_INFO_DIRECT_IO_MASK & hp->info)
2567  cp = " dio>> ";
2568  else
2569  cp = " ";
2570  }
2571  seq_printf(s, cp);
2572  blen = srp->data.bufflen;
2573  usg = srp->data.k_use_sg;
2574  seq_printf(s, srp->done ?
2575  ((1 == srp->done) ? "rcv:" : "fin:")
2576  : "act:");
2577  seq_printf(s, " id=%d blen=%d",
2578  srp->header.pack_id, blen);
2579  if (srp->done)
2580  seq_printf(s, " dur=%d", hp->duration);
2581  else {
2582  ms = jiffies_to_msecs(jiffies);
2583  seq_printf(s, " t_o/elap=%d/%d",
2584  (new_interface ? hp->timeout :
2585  jiffies_to_msecs(fp->timeout)),
2586  (ms > hp->duration ? ms - hp->duration : 0));
2587  }
2588  seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
2589  (int) srp->data.cmd_opcode);
2590  }
2591  if (0 == m)
2592  seq_printf(s, " No requests active\n");
2593  read_unlock(&fp->rq_list_lock);
2594  }
2595 }
2596 
2597 static int sg_proc_open_debug(struct inode *inode, struct file *file)
2598 {
2599  return seq_open(file, &debug_seq_ops);
2600 }
2601 
2602 static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
2603 {
2604  struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
2605  Sg_device *sdp;
2606  unsigned long iflags;
2607 
2608  if (it && (0 == it->index)) {
2609  seq_printf(s, "max_active_device=%d(origin 1)\n",
2610  (int)it->max);
2611  seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
2612  }
2613 
2614  read_lock_irqsave(&sg_index_lock, iflags);
2615  sdp = it ? sg_lookup_dev(it->index) : NULL;
2616  if (sdp && !list_empty(&sdp->sfds)) {
2617  struct scsi_device *scsidp = sdp->device;
2618 
2619  seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
2620  if (sdp->detached)
2621  seq_printf(s, "detached pending close ");
2622  else
2623  seq_printf
2624  (s, "scsi%d chan=%d id=%d lun=%d em=%d",
2625  scsidp->host->host_no,
2626  scsidp->channel, scsidp->id,
2627  scsidp->lun,
2628  scsidp->host->hostt->emulated);
2629  seq_printf(s, " sg_tablesize=%d excl=%d\n",
2630  sdp->sg_tablesize, get_exclude(sdp));
2631  sg_proc_debug_helper(s, sdp);
2632  }
2633  read_unlock_irqrestore(&sg_index_lock, iflags);
2634  return 0;
2635 }
2636 
2637 #endif /* CONFIG_SCSI_PROC_FS */
2638 
2639 module_init(init_sg);
2640 module_exit(exit_sg);