Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
storvsc_drv.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  * Haiyang Zhang <[email protected]>
19  * Hank Janssen <[email protected]>
20  * K. Y. Srinivasan <[email protected]>
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/sched.h>
26 #include <linux/completion.h>
27 #include <linux/string.h>
28 #include <linux/mm.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/module.h>
33 #include <linux/device.h>
34 #include <linux/hyperv.h>
35 #include <linux/mempool.h>
36 #include <scsi/scsi.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_host.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_tcq.h>
41 #include <scsi/scsi_eh.h>
42 #include <scsi/scsi_devinfo.h>
43 #include <scsi/scsi_dbg.h>
44 
45 /*
46  * All wire protocol details (storage protocol between the guest and the host)
47  * are consolidated here.
48  *
49  * Begin protocol definitions.
50  */
51 
52 /*
53  * Version history:
54  * V1 Beta: 0.1
55  * V1 RC < 2008/1/31: 1.0
56  * V1 RC > 2008/1/31: 2.0
57  * Win7: 4.2
58  */
59 
60 #define VMSTOR_CURRENT_MAJOR 4
61 #define VMSTOR_CURRENT_MINOR 2
62 
63 
64 /* Packet structure describing virtual storage requests. */
78 };
79 
80 /*
81  * Platform neutral description of a scsi request -
82  * this remains the same across the write regardless of 32/64 bit
83  * note: it's patterned off the SCSI_PASS_THROUGH structure
84  */
85 #define STORVSC_MAX_CMD_LEN 0x10
86 #define STORVSC_SENSE_BUFFER_SIZE 0x12
87 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14
88 
93 
98 
103 
105 
106  union {
110  };
111 } __attribute((packed));
114 /*
115  * This structure is sent during the intialization phase to get the different
116  * properties of the channel.
117  */
123  /* Note: port number is only really known on the client side */
127 
128  /*
129  * This id is unique for each channel and will correspond with
130  * vendor specific data in the inquiry data.
131  */
132 
134 } __packed;
135 
136 /* This structure is sent during the storage protocol negotiations. */
138  /* Major (MSW) and minor (LSW) version numbers. */
140 
141  /*
142  * Revision number is auto-incremented whenever this file is changed
143  * (See FILL_VMSTOR_REVISION macro above). Mismatch does not
144  * definitely indicate incompatibility--but it does indicate mismatched
145  * builds.
146  * This is only used on the windows side. Just set it to 0.
147  */
149 } __packed;
150 
151 /* Channel Property Flags */
152 #define STORAGE_CHANNEL_REMOVABLE_FLAG 0x1
153 #define STORAGE_CHANNEL_EMULATED_IDE_FLAG 0x2
154 
155 struct vstor_packet {
156  /* Requested operation type */
158 
159  /* Flags - see below for values */
161 
162  /* Status of the request returned from the server side. */
164 
165  /* Data payload area */
166  union {
167  /*
168  * Structure used to forward SCSI commands from the
169  * client to the server.
170  */
172 
173  /* Structure used to query channel properties. */
175 
176  /* Used during version negotiations. */
178  };
179 } __packed;
180 
181 /*
182  * Packet Flags:
183  *
184  * This flag indicates that the server should send back a completion for this
185  * packet.
186  */
187 
188 #define REQUEST_COMPLETION_FLAG 0x1
189 
190 /* Matches Windows-end */
195 };
196 
197 /*
198  * SRB status codes and masks; a subset of the codes used here.
199  */
200 
201 #define SRB_STATUS_AUTOSENSE_VALID 0x80
202 #define SRB_STATUS_INVALID_LUN 0x20
203 #define SRB_STATUS_SUCCESS 0x01
204 #define SRB_STATUS_ERROR 0x04
205 
206 /*
207  * This is the end of Protocol specific defines.
208  */
209 
210 
211 /*
212  * We setup a mempool to allocate request structures for this driver
213  * on a per-lun basis. The following define specifies the number of
214  * elements in the pool.
215  */
216 
217 #define STORVSC_MIN_BUF_NR 64
218 static int storvsc_ringbuffer_size = (20 * PAGE_SIZE);
219 
220 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
221 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
222 
223 #define STORVSC_MAX_IO_REQUESTS 128
224 
225 /*
226  * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
227  * reality, the path/target is not used (ie always set to 0) so our
228  * scsi host adapter essentially has 1 bus with 1 target that contains
229  * up to 256 luns.
230  */
231 #define STORVSC_MAX_LUNS_PER_TARGET 64
232 #define STORVSC_MAX_TARGETS 1
233 #define STORVSC_MAX_CHANNELS 1
234 
235 
236 
238  struct list_head entry;
239  struct scsi_cmnd *cmd;
240 
241  unsigned int bounce_sgl_count;
243 
244  struct hv_device *device;
245 
246  /* Synchronize the request/response if needed */
248 
249  unsigned char *sense_buffer;
250  struct hv_multipage_buffer data_buffer;
252 };
253 
254 
255 /* A storvsc device is a device object that contains a vmbus channel */
257  struct hv_device *device;
258 
259  bool destroy;
262  struct Scsi_Host *host;
263 
265 
266  /*
267  * Each unique Port/Path/Target represents 1 channel ie scsi
268  * controller. In reality, the pathid, targetid is always 0
269  * and the port is set by us
270  */
271  unsigned int port_number;
272  unsigned char path_id;
273  unsigned char target_id;
274 
275  /* Used for vsc/vsp channel reset process */
278 };
279 
283 };
284 
286  struct hv_device *dev;
287  unsigned int port;
288  unsigned char path;
289  unsigned char target;
290 };
291 
294  struct Scsi_Host *host;
296 };
297 
298 static void storvsc_bus_scan(struct work_struct *work)
299 {
300  struct storvsc_scan_work *wrk;
301  int id, order_id;
302 
303  wrk = container_of(work, struct storvsc_scan_work, work);
304  for (id = 0; id < wrk->host->max_id; ++id) {
305  if (wrk->host->reverse_ordering)
306  order_id = wrk->host->max_id - id - 1;
307  else
308  order_id = id;
309 
310  scsi_scan_target(&wrk->host->shost_gendev, 0,
311  order_id, SCAN_WILD_CARD, 1);
312  }
313  kfree(wrk);
314 }
315 
316 static void storvsc_remove_lun(struct work_struct *work)
317 {
318  struct storvsc_scan_work *wrk;
319  struct scsi_device *sdev;
320 
321  wrk = container_of(work, struct storvsc_scan_work, work);
322  if (!scsi_host_get(wrk->host))
323  goto done;
324 
325  sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
326 
327  if (sdev) {
328  scsi_remove_device(sdev);
329  scsi_device_put(sdev);
330  }
331  scsi_host_put(wrk->host);
332 
333 done:
334  kfree(wrk);
335 }
336 
337 /*
338  * Major/minor macros. Minor version is in LSB, meaning that earlier flat
339  * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
340  */
341 
342 static inline u16 storvsc_get_version(u8 major, u8 minor)
343 {
344  u16 version;
345 
346  version = ((major << 8) | minor);
347  return version;
348 }
349 
350 /*
351  * We can get incoming messages from the host that are not in response to
352  * messages that we have sent out. An example of this would be messages
353  * received by the guest to notify dynamic addition/removal of LUNs. To
354  * deal with potential race conditions where the driver may be in the
355  * midst of being unloaded when we might receive an unsolicited message
356  * from the host, we have implemented a mechanism to gurantee sequential
357  * consistency:
358  *
359  * 1) Once the device is marked as being destroyed, we will fail all
360  * outgoing messages.
361  * 2) We permit incoming messages when the device is being destroyed,
362  * only to properly account for messages already sent out.
363  */
364 
365 static inline struct storvsc_device *get_out_stor_device(
366  struct hv_device *device)
367 {
368  struct storvsc_device *stor_device;
369 
370  stor_device = hv_get_drvdata(device);
371 
372  if (stor_device && stor_device->destroy)
373  stor_device = NULL;
374 
375  return stor_device;
376 }
377 
378 
379 static inline void storvsc_wait_to_drain(struct storvsc_device *dev)
380 {
381  dev->drain_notify = true;
383  atomic_read(&dev->num_outstanding_req) == 0);
384  dev->drain_notify = false;
385 }
386 
387 static inline struct storvsc_device *get_in_stor_device(
388  struct hv_device *device)
389 {
390  struct storvsc_device *stor_device;
391 
392  stor_device = hv_get_drvdata(device);
393 
394  if (!stor_device)
395  goto get_in_err;
396 
397  /*
398  * If the device is being destroyed; allow incoming
399  * traffic only to cleanup outstanding requests.
400  */
401 
402  if (stor_device->destroy &&
403  (atomic_read(&stor_device->num_outstanding_req) == 0))
404  stor_device = NULL;
405 
406 get_in_err:
407  return stor_device;
408 
409 }
410 
411 static void destroy_bounce_buffer(struct scatterlist *sgl,
412  unsigned int sg_count)
413 {
414  int i;
415  struct page *page_buf;
416 
417  for (i = 0; i < sg_count; i++) {
418  page_buf = sg_page((&sgl[i]));
419  if (page_buf != NULL)
420  __free_page(page_buf);
421  }
422 
423  kfree(sgl);
424 }
425 
426 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
427 {
428  int i;
429 
430  /* No need to check */
431  if (sg_count < 2)
432  return -1;
433 
434  /* We have at least 2 sg entries */
435  for (i = 0; i < sg_count; i++) {
436  if (i == 0) {
437  /* make sure 1st one does not have hole */
438  if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
439  return i;
440  } else if (i == sg_count - 1) {
441  /* make sure last one does not have hole */
442  if (sgl[i].offset != 0)
443  return i;
444  } else {
445  /* make sure no hole in the middle */
446  if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
447  return i;
448  }
449  }
450  return -1;
451 }
452 
453 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
454  unsigned int sg_count,
455  unsigned int len,
456  int write)
457 {
458  int i;
459  int num_pages;
460  struct scatterlist *bounce_sgl;
461  struct page *page_buf;
462  unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
463 
464  num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
465 
466  bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
467  if (!bounce_sgl)
468  return NULL;
469 
470  for (i = 0; i < num_pages; i++) {
471  page_buf = alloc_page(GFP_ATOMIC);
472  if (!page_buf)
473  goto cleanup;
474  sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
475  }
476 
477  return bounce_sgl;
478 
479 cleanup:
480  destroy_bounce_buffer(bounce_sgl, num_pages);
481  return NULL;
482 }
483 
484 /* Disgusting wrapper functions */
485 static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
486 {
487  void *addr = kmap_atomic(sg_page(sgl + idx));
488  return (unsigned long)addr;
489 }
490 
491 static inline void sg_kunmap_atomic(unsigned long addr)
492 {
493  kunmap_atomic((void *)addr);
494 }
495 
496 
497 /* Assume the original sgl has enough room */
498 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
499  struct scatterlist *bounce_sgl,
500  unsigned int orig_sgl_count,
501  unsigned int bounce_sgl_count)
502 {
503  int i;
504  int j = 0;
505  unsigned long src, dest;
506  unsigned int srclen, destlen, copylen;
507  unsigned int total_copied = 0;
508  unsigned long bounce_addr = 0;
509  unsigned long dest_addr = 0;
510  unsigned long flags;
511 
512  local_irq_save(flags);
513 
514  for (i = 0; i < orig_sgl_count; i++) {
515  dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
516  dest = dest_addr;
517  destlen = orig_sgl[i].length;
518 
519  if (bounce_addr == 0)
520  bounce_addr = sg_kmap_atomic(bounce_sgl,j);
521 
522  while (destlen) {
523  src = bounce_addr + bounce_sgl[j].offset;
524  srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
525 
526  copylen = min(srclen, destlen);
527  memcpy((void *)dest, (void *)src, copylen);
528 
529  total_copied += copylen;
530  bounce_sgl[j].offset += copylen;
531  destlen -= copylen;
532  dest += copylen;
533 
534  if (bounce_sgl[j].offset == bounce_sgl[j].length) {
535  /* full */
536  sg_kunmap_atomic(bounce_addr);
537  j++;
538 
539  /*
540  * It is possible that the number of elements
541  * in the bounce buffer may not be equal to
542  * the number of elements in the original
543  * scatter list. Handle this correctly.
544  */
545 
546  if (j == bounce_sgl_count) {
547  /*
548  * We are done; cleanup and return.
549  */
550  sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
551  local_irq_restore(flags);
552  return total_copied;
553  }
554 
555  /* if we need to use another bounce buffer */
556  if (destlen || i != orig_sgl_count - 1)
557  bounce_addr = sg_kmap_atomic(bounce_sgl,j);
558  } else if (destlen == 0 && i == orig_sgl_count - 1) {
559  /* unmap the last bounce that is < PAGE_SIZE */
560  sg_kunmap_atomic(bounce_addr);
561  }
562  }
563 
564  sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
565  }
566 
567  local_irq_restore(flags);
568 
569  return total_copied;
570 }
571 
572 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
573 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
574  struct scatterlist *bounce_sgl,
575  unsigned int orig_sgl_count)
576 {
577  int i;
578  int j = 0;
579  unsigned long src, dest;
580  unsigned int srclen, destlen, copylen;
581  unsigned int total_copied = 0;
582  unsigned long bounce_addr = 0;
583  unsigned long src_addr = 0;
584  unsigned long flags;
585 
586  local_irq_save(flags);
587 
588  for (i = 0; i < orig_sgl_count; i++) {
589  src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
590  src = src_addr;
591  srclen = orig_sgl[i].length;
592 
593  if (bounce_addr == 0)
594  bounce_addr = sg_kmap_atomic(bounce_sgl,j);
595 
596  while (srclen) {
597  /* assume bounce offset always == 0 */
598  dest = bounce_addr + bounce_sgl[j].length;
599  destlen = PAGE_SIZE - bounce_sgl[j].length;
600 
601  copylen = min(srclen, destlen);
602  memcpy((void *)dest, (void *)src, copylen);
603 
604  total_copied += copylen;
605  bounce_sgl[j].length += copylen;
606  srclen -= copylen;
607  src += copylen;
608 
609  if (bounce_sgl[j].length == PAGE_SIZE) {
610  /* full..move to next entry */
611  sg_kunmap_atomic(bounce_addr);
612  j++;
613 
614  /* if we need to use another bounce buffer */
615  if (srclen || i != orig_sgl_count - 1)
616  bounce_addr = sg_kmap_atomic(bounce_sgl,j);
617 
618  } else if (srclen == 0 && i == orig_sgl_count - 1) {
619  /* unmap the last bounce that is < PAGE_SIZE */
620  sg_kunmap_atomic(bounce_addr);
621  }
622  }
623 
624  sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
625  }
626 
627  local_irq_restore(flags);
628 
629  return total_copied;
630 }
631 
632 static int storvsc_channel_init(struct hv_device *device)
633 {
634  struct storvsc_device *stor_device;
636  struct vstor_packet *vstor_packet;
637  int ret, t;
638 
639  stor_device = get_out_stor_device(device);
640  if (!stor_device)
641  return -ENODEV;
642 
643  request = &stor_device->init_request;
644  vstor_packet = &request->vstor_packet;
645 
646  /*
647  * Now, initiate the vsc/vsp initialization protocol on the open
648  * channel
649  */
650  memset(request, 0, sizeof(struct storvsc_cmd_request));
651  init_completion(&request->wait_event);
653  vstor_packet->flags = REQUEST_COMPLETION_FLAG;
654 
655  ret = vmbus_sendpacket(device->channel, vstor_packet,
656  sizeof(struct vstor_packet),
657  (unsigned long)request,
658  VM_PKT_DATA_INBAND,
659  VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
660  if (ret != 0)
661  goto cleanup;
662 
663  t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
664  if (t == 0) {
665  ret = -ETIMEDOUT;
666  goto cleanup;
667  }
668 
669  if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
670  vstor_packet->status != 0)
671  goto cleanup;
672 
673 
674  /* reuse the packet for version range supported */
675  memset(vstor_packet, 0, sizeof(struct vstor_packet));
677  vstor_packet->flags = REQUEST_COMPLETION_FLAG;
678 
679  vstor_packet->version.major_minor =
680  storvsc_get_version(VMSTOR_CURRENT_MAJOR, VMSTOR_CURRENT_MINOR);
681 
682  /*
683  * The revision number is only used in Windows; set it to 0.
684  */
685  vstor_packet->version.revision = 0;
686 
687  ret = vmbus_sendpacket(device->channel, vstor_packet,
688  sizeof(struct vstor_packet),
689  (unsigned long)request,
690  VM_PKT_DATA_INBAND,
691  VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
692  if (ret != 0)
693  goto cleanup;
694 
695  t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
696  if (t == 0) {
697  ret = -ETIMEDOUT;
698  goto cleanup;
699  }
700 
701  if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
702  vstor_packet->status != 0)
703  goto cleanup;
704 
705 
706  memset(vstor_packet, 0, sizeof(struct vstor_packet));
708  vstor_packet->flags = REQUEST_COMPLETION_FLAG;
709  vstor_packet->storage_channel_properties.port_number =
710  stor_device->port_number;
711 
712  ret = vmbus_sendpacket(device->channel, vstor_packet,
713  sizeof(struct vstor_packet),
714  (unsigned long)request,
715  VM_PKT_DATA_INBAND,
716  VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
717 
718  if (ret != 0)
719  goto cleanup;
720 
721  t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
722  if (t == 0) {
723  ret = -ETIMEDOUT;
724  goto cleanup;
725  }
726 
727  if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
728  vstor_packet->status != 0)
729  goto cleanup;
730 
731  stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
732  stor_device->target_id
733  = vstor_packet->storage_channel_properties.target_id;
734 
735  memset(vstor_packet, 0, sizeof(struct vstor_packet));
737  vstor_packet->flags = REQUEST_COMPLETION_FLAG;
738 
739  ret = vmbus_sendpacket(device->channel, vstor_packet,
740  sizeof(struct vstor_packet),
741  (unsigned long)request,
742  VM_PKT_DATA_INBAND,
743  VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
744 
745  if (ret != 0)
746  goto cleanup;
747 
748  t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
749  if (t == 0) {
750  ret = -ETIMEDOUT;
751  goto cleanup;
752  }
753 
754  if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
755  vstor_packet->status != 0)
756  goto cleanup;
757 
758 
759 cleanup:
760  return ret;
761 }
762 
763 
764 static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
765 {
766  struct scsi_cmnd *scmnd = cmd_request->cmd;
767  struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
768  void (*scsi_done_fn)(struct scsi_cmnd *);
769  struct scsi_sense_hdr sense_hdr;
770  struct vmscsi_request *vm_srb;
771  struct storvsc_scan_work *wrk;
772  struct stor_mem_pools *memp = scmnd->device->hostdata;
773 
774  vm_srb = &cmd_request->vstor_packet.vm_srb;
775  if (cmd_request->bounce_sgl_count) {
776  if (vm_srb->data_in == READ_TYPE)
777  copy_from_bounce_buffer(scsi_sglist(scmnd),
778  cmd_request->bounce_sgl,
779  scsi_sg_count(scmnd),
780  cmd_request->bounce_sgl_count);
781  destroy_bounce_buffer(cmd_request->bounce_sgl,
782  cmd_request->bounce_sgl_count);
783  }
784 
785  /*
786  * If there is an error; offline the device since all
787  * error recovery strategies would have already been
788  * deployed on the host side. However, if the command
789  * were a pass-through command deal with it appropriately.
790  */
791  scmnd->result = vm_srb->scsi_status;
792 
793  if (vm_srb->srb_status == SRB_STATUS_ERROR) {
794  switch (scmnd->cmnd[0]) {
795  case ATA_16:
796  case ATA_12:
797  set_host_byte(scmnd, DID_PASSTHROUGH);
798  break;
799  default:
800  set_host_byte(scmnd, DID_TARGET_FAILURE);
801  }
802  }
803 
804 
805  /*
806  * If the LUN is invalid; remove the device.
807  */
808  if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
809  struct storvsc_device *stor_dev;
810  struct hv_device *dev = host_dev->dev;
811  struct Scsi_Host *host;
812 
813  stor_dev = get_in_stor_device(dev);
814  host = stor_dev->host;
815 
816  wrk = kmalloc(sizeof(struct storvsc_scan_work),
817  GFP_ATOMIC);
818  if (!wrk) {
819  scmnd->result = DID_TARGET_FAILURE << 16;
820  } else {
821  wrk->host = host;
822  wrk->lun = vm_srb->lun;
823  INIT_WORK(&wrk->work, storvsc_remove_lun);
824  schedule_work(&wrk->work);
825  }
826  }
827 
828  if (scmnd->result) {
830  SCSI_SENSE_BUFFERSIZE, &sense_hdr))
831  scsi_print_sense_hdr("storvsc", &sense_hdr);
832  }
833 
834  scsi_set_resid(scmnd,
835  cmd_request->data_buffer.len -
836  vm_srb->data_transfer_length);
837 
838  scsi_done_fn = scmnd->scsi_done;
839 
840  scmnd->host_scribble = NULL;
841  scmnd->scsi_done = NULL;
842 
843  scsi_done_fn(scmnd);
844 
845  mempool_free(cmd_request, memp->request_mempool);
846 }
847 
848 static void storvsc_on_io_completion(struct hv_device *device,
849  struct vstor_packet *vstor_packet,
850  struct storvsc_cmd_request *request)
851 {
852  struct storvsc_device *stor_device;
853  struct vstor_packet *stor_pkt;
854 
855  stor_device = hv_get_drvdata(device);
856  stor_pkt = &request->vstor_packet;
857 
858  /*
859  * The current SCSI handling on the host side does
860  * not correctly handle:
861  * INQUIRY command with page code parameter set to 0x80
862  * MODE_SENSE command with cmd[2] == 0x1c
863  *
864  * Setup srb and scsi status so this won't be fatal.
865  * We do this so we can distinguish truly fatal failues
866  * (srb status == 0x4) and off-line the device in that case.
867  */
868 
869  if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
870  (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) {
871  vstor_packet->vm_srb.scsi_status = 0;
872  vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS;
873  }
874 
875 
876  /* Copy over the status...etc */
877  stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
878  stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
879  stor_pkt->vm_srb.sense_info_length =
880  vstor_packet->vm_srb.sense_info_length;
881 
882  if (vstor_packet->vm_srb.scsi_status != 0 ||
883  vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
884  dev_warn(&device->device,
885  "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
886  stor_pkt->vm_srb.cdb[0],
887  vstor_packet->vm_srb.scsi_status,
888  vstor_packet->vm_srb.srb_status);
889  }
890 
891  if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
892  /* CHECK_CONDITION */
893  if (vstor_packet->vm_srb.srb_status &
895  /* autosense data available */
896  dev_warn(&device->device,
897  "stor pkt %p autosense data valid - len %d\n",
898  request,
899  vstor_packet->vm_srb.sense_info_length);
900 
901  memcpy(request->sense_buffer,
902  vstor_packet->vm_srb.sense_data,
903  vstor_packet->vm_srb.sense_info_length);
904 
905  }
906  }
907 
908  stor_pkt->vm_srb.data_transfer_length =
909  vstor_packet->vm_srb.data_transfer_length;
910 
911  storvsc_command_completion(request);
912 
913  if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
914  stor_device->drain_notify)
915  wake_up(&stor_device->waiting_to_drain);
916 
917 
918 }
919 
920 static void storvsc_on_receive(struct hv_device *device,
921  struct vstor_packet *vstor_packet,
922  struct storvsc_cmd_request *request)
923 {
924  struct storvsc_scan_work *work;
925  struct storvsc_device *stor_device;
926 
927  switch (vstor_packet->operation) {
929  storvsc_on_io_completion(device, vstor_packet, request);
930  break;
931 
934  stor_device = get_in_stor_device(device);
935  work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
936  if (!work)
937  return;
938 
939  INIT_WORK(&work->work, storvsc_bus_scan);
940  work->host = stor_device->host;
941  schedule_work(&work->work);
942  break;
943 
944  default:
945  break;
946  }
947 }
948 
949 static void storvsc_on_channel_callback(void *context)
950 {
951  struct hv_device *device = (struct hv_device *)context;
952  struct storvsc_device *stor_device;
953  u32 bytes_recvd;
954  u64 request_id;
955  unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
957  int ret;
958 
959 
960  stor_device = get_in_stor_device(device);
961  if (!stor_device)
962  return;
963 
964  do {
965  ret = vmbus_recvpacket(device->channel, packet,
966  ALIGN(sizeof(struct vstor_packet), 8),
967  &bytes_recvd, &request_id);
968  if (ret == 0 && bytes_recvd > 0) {
969 
970  request = (struct storvsc_cmd_request *)
971  (unsigned long)request_id;
972 
973  if ((request == &stor_device->init_request) ||
974  (request == &stor_device->reset_request)) {
975 
976  memcpy(&request->vstor_packet, packet,
977  sizeof(struct vstor_packet));
978  complete(&request->wait_event);
979  } else {
980  storvsc_on_receive(device,
981  (struct vstor_packet *)packet,
982  request);
983  }
984  } else {
985  break;
986  }
987  } while (1);
988 
989  return;
990 }
991 
992 static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
993 {
994  struct vmstorage_channel_properties props;
995  int ret;
996 
997  memset(&props, 0, sizeof(struct vmstorage_channel_properties));
998 
999  ret = vmbus_open(device->channel,
1000  ring_size,
1001  ring_size,
1002  (void *)&props,
1003  sizeof(struct vmstorage_channel_properties),
1004  storvsc_on_channel_callback, device);
1005 
1006  if (ret != 0)
1007  return ret;
1008 
1009  ret = storvsc_channel_init(device);
1010 
1011  return ret;
1012 }
1013 
1014 static int storvsc_dev_remove(struct hv_device *device)
1015 {
1016  struct storvsc_device *stor_device;
1017  unsigned long flags;
1018 
1019  stor_device = hv_get_drvdata(device);
1020 
1021  spin_lock_irqsave(&device->channel->inbound_lock, flags);
1022  stor_device->destroy = true;
1023  spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1024 
1025  /*
1026  * At this point, all outbound traffic should be disable. We
1027  * only allow inbound traffic (responses) to proceed so that
1028  * outstanding requests can be completed.
1029  */
1030 
1031  storvsc_wait_to_drain(stor_device);
1032 
1033  /*
1034  * Since we have already drained, we don't need to busy wait
1035  * as was done in final_release_stor_device()
1036  * Note that we cannot set the ext pointer to NULL until
1037  * we have drained - to drain the outgoing packets, we need to
1038  * allow incoming packets.
1039  */
1040  spin_lock_irqsave(&device->channel->inbound_lock, flags);
1041  hv_set_drvdata(device, NULL);
1042  spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
1043 
1044  /* Close the channel */
1045  vmbus_close(device->channel);
1046 
1047  kfree(stor_device);
1048  return 0;
1049 }
1050 
1051 static int storvsc_do_io(struct hv_device *device,
1052  struct storvsc_cmd_request *request)
1053 {
1054  struct storvsc_device *stor_device;
1055  struct vstor_packet *vstor_packet;
1056  int ret = 0;
1057 
1058  vstor_packet = &request->vstor_packet;
1059  stor_device = get_out_stor_device(device);
1060 
1061  if (!stor_device)
1062  return -ENODEV;
1063 
1064 
1065  request->device = device;
1066 
1067 
1068  vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
1069 
1070  vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
1071 
1072 
1073  vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE;
1074 
1075 
1076  vstor_packet->vm_srb.data_transfer_length =
1077  request->data_buffer.len;
1078 
1079  vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
1080 
1081  if (request->data_buffer.len) {
1082  ret = vmbus_sendpacket_multipagebuffer(device->channel,
1083  &request->data_buffer,
1084  vstor_packet,
1085  sizeof(struct vstor_packet),
1086  (unsigned long)request);
1087  } else {
1088  ret = vmbus_sendpacket(device->channel, vstor_packet,
1089  sizeof(struct vstor_packet),
1090  (unsigned long)request,
1091  VM_PKT_DATA_INBAND,
1092  VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1093  }
1094 
1095  if (ret != 0)
1096  return ret;
1097 
1098  atomic_inc(&stor_device->num_outstanding_req);
1099 
1100  return ret;
1101 }
1102 
1103 static int storvsc_device_alloc(struct scsi_device *sdevice)
1104 {
1105  struct stor_mem_pools *memp;
1106  int number = STORVSC_MIN_BUF_NR;
1107 
1108  memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
1109  if (!memp)
1110  return -ENOMEM;
1111 
1112  memp->request_pool =
1113  kmem_cache_create(dev_name(&sdevice->sdev_dev),
1114  sizeof(struct storvsc_cmd_request), 0,
1116 
1117  if (!memp->request_pool)
1118  goto err0;
1119 
1122  memp->request_pool);
1123 
1124  if (!memp->request_mempool)
1125  goto err1;
1126 
1127  sdevice->hostdata = memp;
1128 
1129  return 0;
1130 
1131 err1:
1133 
1134 err0:
1135  kfree(memp);
1136  return -ENOMEM;
1137 }
1138 
1139 static void storvsc_device_destroy(struct scsi_device *sdevice)
1140 {
1141  struct stor_mem_pools *memp = sdevice->hostdata;
1142 
1145  kfree(memp);
1146  sdevice->hostdata = NULL;
1147 }
1148 
1149 static int storvsc_device_configure(struct scsi_device *sdevice)
1150 {
1153 
1155 
1156  blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
1157 
1158  return 0;
1159 }
1160 
1161 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
1162  sector_t capacity, int *info)
1163 {
1166  int heads, sectors_pt;
1167 
1168  /*
1169  * We are making up these values; let us keep it simple.
1170  */
1171  heads = 0xff;
1172  sectors_pt = 0x3f; /* Sectors per track */
1173  sector_div(cylinders, heads * sectors_pt);
1174  if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect)
1175  cylinders = 0xffff;
1176 
1177  info[0] = heads;
1178  info[1] = sectors_pt;
1179  info[2] = (int)cylinders;
1180 
1181  return 0;
1182 }
1183 
1184 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
1185 {
1186  struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
1187  struct hv_device *device = host_dev->dev;
1188 
1189  struct storvsc_device *stor_device;
1190  struct storvsc_cmd_request *request;
1191  struct vstor_packet *vstor_packet;
1192  int ret, t;
1193 
1194 
1195  stor_device = get_out_stor_device(device);
1196  if (!stor_device)
1197  return FAILED;
1198 
1199  request = &stor_device->reset_request;
1200  vstor_packet = &request->vstor_packet;
1201 
1202  init_completion(&request->wait_event);
1203 
1204  vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
1205  vstor_packet->flags = REQUEST_COMPLETION_FLAG;
1206  vstor_packet->vm_srb.path_id = stor_device->path_id;
1207 
1208  ret = vmbus_sendpacket(device->channel, vstor_packet,
1209  sizeof(struct vstor_packet),
1210  (unsigned long)&stor_device->reset_request,
1211  VM_PKT_DATA_INBAND,
1212  VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1213  if (ret != 0)
1214  return FAILED;
1215 
1216  t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
1217  if (t == 0)
1218  return TIMEOUT_ERROR;
1219 
1220 
1221  /*
1222  * At this point, all outstanding requests in the adapter
1223  * should have been flushed out and return to us
1224  * There is a potential race here where the host may be in
1225  * the process of responding when we return from here.
1226  * Just wait for all in-transit packets to be accounted for
1227  * before we return from here.
1228  */
1229  storvsc_wait_to_drain(stor_device);
1230 
1231  return SUCCESS;
1232 }
1233 
1234 static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
1235 {
1236  bool allowed = true;
1237  u8 scsi_op = scmnd->cmnd[0];
1238 
1239  switch (scsi_op) {
1240  /*
1241  * smartd sends this command and the host does not handle
1242  * this. So, don't send it.
1243  */
1244  case SET_WINDOW:
1245  scmnd->result = ILLEGAL_REQUEST << 16;
1246  allowed = false;
1247  break;
1248  default:
1249  break;
1250  }
1251  return allowed;
1252 }
1253 
1254 static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1255 {
1256  int ret;
1257  struct hv_host_device *host_dev = shost_priv(host);
1258  struct hv_device *dev = host_dev->dev;
1259  struct storvsc_cmd_request *cmd_request;
1260  unsigned int request_size = 0;
1261  int i;
1262  struct scatterlist *sgl;
1263  unsigned int sg_count = 0;
1264  struct vmscsi_request *vm_srb;
1265  struct stor_mem_pools *memp = scmnd->device->hostdata;
1266 
1267  if (!storvsc_scsi_cmd_ok(scmnd)) {
1268  scmnd->scsi_done(scmnd);
1269  return 0;
1270  }
1271 
1272  request_size = sizeof(struct storvsc_cmd_request);
1273 
1274  cmd_request = mempool_alloc(memp->request_mempool,
1275  GFP_ATOMIC);
1276 
1277  /*
1278  * We might be invoked in an interrupt context; hence
1279  * mempool_alloc() can fail.
1280  */
1281  if (!cmd_request)
1282  return SCSI_MLQUEUE_DEVICE_BUSY;
1283 
1284  memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
1285 
1286  /* Setup the cmd request */
1287  cmd_request->cmd = scmnd;
1288 
1289  scmnd->host_scribble = (unsigned char *)cmd_request;
1290 
1291  vm_srb = &cmd_request->vstor_packet.vm_srb;
1292 
1293 
1294  /* Build the SRB */
1295  switch (scmnd->sc_data_direction) {
1296  case DMA_TO_DEVICE:
1297  vm_srb->data_in = WRITE_TYPE;
1298  break;
1299  case DMA_FROM_DEVICE:
1300  vm_srb->data_in = READ_TYPE;
1301  break;
1302  default:
1303  vm_srb->data_in = UNKNOWN_TYPE;
1304  break;
1305  }
1306 
1307 
1308  vm_srb->port_number = host_dev->port;
1309  vm_srb->path_id = scmnd->device->channel;
1310  vm_srb->target_id = scmnd->device->id;
1311  vm_srb->lun = scmnd->device->lun;
1312 
1313  vm_srb->cdb_length = scmnd->cmd_len;
1314 
1315  memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
1316 
1317  cmd_request->sense_buffer = scmnd->sense_buffer;
1318 
1319 
1320  cmd_request->data_buffer.len = scsi_bufflen(scmnd);
1321  if (scsi_sg_count(scmnd)) {
1322  sgl = (struct scatterlist *)scsi_sglist(scmnd);
1323  sg_count = scsi_sg_count(scmnd);
1324 
1325  /* check if we need to bounce the sgl */
1326  if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1327  cmd_request->bounce_sgl =
1328  create_bounce_buffer(sgl, scsi_sg_count(scmnd),
1329  scsi_bufflen(scmnd),
1330  vm_srb->data_in);
1331  if (!cmd_request->bounce_sgl) {
1332  ret = SCSI_MLQUEUE_HOST_BUSY;
1333  goto queue_error;
1334  }
1335 
1336  cmd_request->bounce_sgl_count =
1337  ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
1338  PAGE_SHIFT;
1339 
1340  if (vm_srb->data_in == WRITE_TYPE)
1341  copy_to_bounce_buffer(sgl,
1342  cmd_request->bounce_sgl,
1343  scsi_sg_count(scmnd));
1344 
1345  sgl = cmd_request->bounce_sgl;
1346  sg_count = cmd_request->bounce_sgl_count;
1347  }
1348 
1349  cmd_request->data_buffer.offset = sgl[0].offset;
1350 
1351  for (i = 0; i < sg_count; i++)
1352  cmd_request->data_buffer.pfn_array[i] =
1353  page_to_pfn(sg_page((&sgl[i])));
1354 
1355  } else if (scsi_sglist(scmnd)) {
1356  cmd_request->data_buffer.offset =
1357  virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
1358  cmd_request->data_buffer.pfn_array[0] =
1359  virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
1360  }
1361 
1362  /* Invokes the vsc to start an IO */
1363  ret = storvsc_do_io(dev, cmd_request);
1364 
1365  if (ret == -EAGAIN) {
1366  /* no more space */
1367 
1368  if (cmd_request->bounce_sgl_count) {
1369  destroy_bounce_buffer(cmd_request->bounce_sgl,
1370  cmd_request->bounce_sgl_count);
1371 
1373  goto queue_error;
1374  }
1375  }
1376 
1377  return 0;
1378 
1379 queue_error:
1380  mempool_free(cmd_request, memp->request_mempool);
1381  scmnd->host_scribble = NULL;
1382  return ret;
1383 }
1384 
1385 static struct scsi_host_template scsi_driver = {
1386  .module = THIS_MODULE,
1387  .name = "storvsc_host_t",
1388  .bios_param = storvsc_get_chs,
1389  .queuecommand = storvsc_queuecommand,
1390  .eh_host_reset_handler = storvsc_host_reset_handler,
1391  .slave_alloc = storvsc_device_alloc,
1392  .slave_destroy = storvsc_device_destroy,
1393  .slave_configure = storvsc_device_configure,
1394  .cmd_per_lun = 1,
1395  /* 64 max_queue * 1 target */
1397  .this_id = -1,
1398  /* no use setting to 0 since ll_blk_rw reset it to 1 */
1399  /* currently 32 */
1400  .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
1401  .use_clustering = DISABLE_CLUSTERING,
1402  /* Make sure we dont get a sg segment crosses a page boundary */
1403  .dma_boundary = PAGE_SIZE-1,
1404 };
1405 
1406 enum {
1409 };
1410 
1411 static const struct hv_vmbus_device_id id_table[] = {
1412  /* SCSI guid */
1413  { VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
1414  0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1415  .driver_data = SCSI_GUID },
1416  /* IDE guid */
1417  { VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
1418  0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1419  .driver_data = IDE_GUID },
1420  { },
1421 };
1422 
1423 MODULE_DEVICE_TABLE(vmbus, id_table);
1424 
1425 static int storvsc_probe(struct hv_device *device,
1426  const struct hv_vmbus_device_id *dev_id)
1427 {
1428  int ret;
1429  struct Scsi_Host *host;
1430  struct hv_host_device *host_dev;
1431  bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
1432  int target = 0;
1433  struct storvsc_device *stor_device;
1434 
1435  host = scsi_host_alloc(&scsi_driver,
1436  sizeof(struct hv_host_device));
1437  if (!host)
1438  return -ENOMEM;
1439 
1440  host_dev = shost_priv(host);
1441  memset(host_dev, 0, sizeof(struct hv_host_device));
1442 
1443  host_dev->port = host->host_no;
1444  host_dev->dev = device;
1445 
1446 
1447  stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
1448  if (!stor_device) {
1449  ret = -ENOMEM;
1450  goto err_out0;
1451  }
1452 
1453  stor_device->destroy = false;
1454  init_waitqueue_head(&stor_device->waiting_to_drain);
1455  stor_device->device = device;
1456  stor_device->host = host;
1457  hv_set_drvdata(device, stor_device);
1458 
1459  stor_device->port_number = host->host_no;
1460  ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
1461  if (ret)
1462  goto err_out1;
1463 
1464  host_dev->path = stor_device->path_id;
1465  host_dev->target = stor_device->target_id;
1466 
1467  /* max # of devices per target */
1469  /* max # of targets per channel */
1470  host->max_id = STORVSC_MAX_TARGETS;
1471  /* max # of channels */
1472  host->max_channel = STORVSC_MAX_CHANNELS - 1;
1473  /* max cmd length */
1475 
1476  /* Register the HBA and start the scsi bus scan */
1477  ret = scsi_add_host(host, &device->device);
1478  if (ret != 0)
1479  goto err_out2;
1480 
1481  if (!dev_is_ide) {
1482  scsi_scan_host(host);
1483  } else {
1484  target = (device->dev_instance.b[5] << 8 |
1485  device->dev_instance.b[4]);
1486  ret = scsi_add_device(host, 0, target, 0);
1487  if (ret) {
1488  scsi_remove_host(host);
1489  goto err_out2;
1490  }
1491  }
1492  return 0;
1493 
1494 err_out2:
1495  /*
1496  * Once we have connected with the host, we would need to
1497  * to invoke storvsc_dev_remove() to rollback this state and
1498  * this call also frees up the stor_device; hence the jump around
1499  * err_out1 label.
1500  */
1501  storvsc_dev_remove(device);
1502  goto err_out0;
1503 
1504 err_out1:
1505  kfree(stor_device);
1506 
1507 err_out0:
1508  scsi_host_put(host);
1509  return ret;
1510 }
1511 
1512 static int storvsc_remove(struct hv_device *dev)
1513 {
1514  struct storvsc_device *stor_device = hv_get_drvdata(dev);
1515  struct Scsi_Host *host = stor_device->host;
1516 
1517  scsi_remove_host(host);
1518  storvsc_dev_remove(dev);
1519  scsi_host_put(host);
1520 
1521  return 0;
1522 }
1523 
1524 static struct hv_driver storvsc_drv = {
1525  .name = KBUILD_MODNAME,
1526  .id_table = id_table,
1527  .probe = storvsc_probe,
1528  .remove = storvsc_remove,
1529 };
1530 
1531 static int __init storvsc_drv_init(void)
1532 {
1533  u32 max_outstanding_req_per_channel;
1534 
1535  /*
1536  * Divide the ring buffer data size (which is 1 page less
1537  * than the ring buffer size since that page is reserved for
1538  * the ring buffer indices) by the max request size (which is
1539  * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
1540  */
1541  max_outstanding_req_per_channel =
1542  ((storvsc_ringbuffer_size - PAGE_SIZE) /
1543  ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
1544  sizeof(struct vstor_packet) + sizeof(u64),
1545  sizeof(u64)));
1546 
1547  if (max_outstanding_req_per_channel <
1549  return -EINVAL;
1550 
1551  return vmbus_driver_register(&storvsc_drv);
1552 }
1553 
1554 static void __exit storvsc_drv_exit(void)
1555 {
1556  vmbus_driver_unregister(&storvsc_drv);
1557 }
1558 
1559 MODULE_LICENSE("GPL");
1560 MODULE_VERSION(HV_DRV_VERSION);
1561 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
1562 module_init(storvsc_drv_init);
1563 module_exit(storvsc_drv_exit);