Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
target_core_sbc.c
Go to the documentation of this file.
1 /*
2  * SCSI Block Commands (SBC) parsing and emulation.
3  *
4  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
5  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
6  * Copyright (c) 2007-2010 Rising Tide Systems
7  * Copyright (c) 2008-2010 Linux-iSCSI.org
8  *
9  * Nicholas A. Bellinger <[email protected]>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24  */
25 
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/ratelimit.h>
29 #include <asm/unaligned.h>
30 #include <scsi/scsi.h>
31 
35 
36 #include "target_core_internal.h"
37 #include "target_core_ua.h"
38 
39 
40 static int sbc_emulate_readcapacity(struct se_cmd *cmd)
41 {
42  struct se_device *dev = cmd->se_dev;
43  unsigned long long blocks_long = dev->transport->get_blocks(dev);
44  unsigned char *rbuf;
45  unsigned char buf[8];
46  u32 blocks;
47 
48  if (blocks_long >= 0x00000000ffffffff)
49  blocks = 0xffffffff;
50  else
51  blocks = (u32)blocks_long;
52 
53  buf[0] = (blocks >> 24) & 0xff;
54  buf[1] = (blocks >> 16) & 0xff;
55  buf[2] = (blocks >> 8) & 0xff;
56  buf[3] = blocks & 0xff;
57  buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
58  buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
59  buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
60  buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
61 
62  rbuf = transport_kmap_data_sg(cmd);
63  if (rbuf) {
64  memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
66  }
67 
69  return 0;
70 }
71 
72 static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
73 {
74  struct se_device *dev = cmd->se_dev;
75  unsigned char *rbuf;
76  unsigned char buf[32];
77  unsigned long long blocks = dev->transport->get_blocks(dev);
78 
79  memset(buf, 0, sizeof(buf));
80  buf[0] = (blocks >> 56) & 0xff;
81  buf[1] = (blocks >> 48) & 0xff;
82  buf[2] = (blocks >> 40) & 0xff;
83  buf[3] = (blocks >> 32) & 0xff;
84  buf[4] = (blocks >> 24) & 0xff;
85  buf[5] = (blocks >> 16) & 0xff;
86  buf[6] = (blocks >> 8) & 0xff;
87  buf[7] = blocks & 0xff;
88  buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
89  buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
90  buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
91  buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
92  /*
93  * Set Thin Provisioning Enable bit following sbc3r22 in section
94  * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
95  */
96  if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
97  buf[14] = 0x80;
98 
99  rbuf = transport_kmap_data_sg(cmd);
100  if (rbuf) {
101  memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
103  }
104 
106  return 0;
107 }
108 
110 {
111  u32 num_blocks;
112 
113  if (cmd->t_task_cdb[0] == WRITE_SAME)
114  num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
115  else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
116  num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
117  else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
118  num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
119 
120  /*
121  * Use the explicit range when non zero is supplied, otherwise calculate
122  * the remaining range based on ->get_blocks() - starting LBA.
123  */
124  if (num_blocks)
125  return num_blocks;
126 
127  return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
128  cmd->t_task_lba + 1;
129 }
131 
132 static int sbc_emulate_verify(struct se_cmd *cmd)
133 {
135  return 0;
136 }
137 
138 static int sbc_emulate_noop(struct se_cmd *cmd)
139 {
141  return 0;
142 }
143 
144 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
145 {
146  return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
147 }
148 
149 static int sbc_check_valid_sectors(struct se_cmd *cmd)
150 {
151  struct se_device *dev = cmd->se_dev;
152  unsigned long long end_lba;
153  u32 sectors;
154 
155  sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
156  end_lba = dev->transport->get_blocks(dev) + 1;
157 
158  if (cmd->t_task_lba + sectors > end_lba) {
159  pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
160  cmd->t_task_lba, sectors, end_lba);
161  return -EINVAL;
162  }
163 
164  return 0;
165 }
166 
167 static inline u32 transport_get_sectors_6(unsigned char *cdb)
168 {
169  /*
170  * Use 8-bit sector value. SBC-3 says:
171  *
172  * A TRANSFER LENGTH field set to zero specifies that 256
173  * logical blocks shall be written. Any other value
174  * specifies the number of logical blocks that shall be
175  * written.
176  */
177  return cdb[4] ? : 256;
178 }
179 
180 static inline u32 transport_get_sectors_10(unsigned char *cdb)
181 {
182  return (u32)(cdb[7] << 8) + cdb[8];
183 }
184 
185 static inline u32 transport_get_sectors_12(unsigned char *cdb)
186 {
187  return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
188 }
189 
190 static inline u32 transport_get_sectors_16(unsigned char *cdb)
191 {
192  return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
193  (cdb[12] << 8) + cdb[13];
194 }
195 
196 /*
197  * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
198  */
199 static inline u32 transport_get_sectors_32(unsigned char *cdb)
200 {
201  return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
202  (cdb[30] << 8) + cdb[31];
203 
204 }
205 
206 static inline u32 transport_lba_21(unsigned char *cdb)
207 {
208  return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
209 }
210 
211 static inline u32 transport_lba_32(unsigned char *cdb)
212 {
213  return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
214 }
215 
216 static inline unsigned long long transport_lba_64(unsigned char *cdb)
217 {
218  unsigned int __v1, __v2;
219 
220  __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
221  __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
222 
223  return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
224 }
225 
226 /*
227  * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
228  */
229 static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
230 {
231  unsigned int __v1, __v2;
232 
233  __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
234  __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
235 
236  return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
237 }
238 
239 static int sbc_write_same_supported(struct se_device *dev,
240  unsigned char *flags)
241 {
242  if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
243  pr_err("WRITE_SAME PBDATA and LBDATA"
244  " bits not supported for Block Discard"
245  " Emulation\n");
246  return -ENOSYS;
247  }
248 
249  /*
250  * Currently for the emulated case we only accept
251  * tpws with the UNMAP=1 bit set.
252  */
253  if (!(flags[0] & 0x08)) {
254  pr_err("WRITE_SAME w/o UNMAP bit not"
255  " supported for Block Discard Emulation\n");
256  return -ENOSYS;
257  }
258 
259  return 0;
260 }
261 
262 static void xdreadwrite_callback(struct se_cmd *cmd)
263 {
264  unsigned char *buf, *addr;
265  struct scatterlist *sg;
266  unsigned int offset;
267  int i;
268  int count;
269  /*
270  * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
271  *
272  * 1) read the specified logical block(s);
273  * 2) transfer logical blocks from the data-out buffer;
274  * 3) XOR the logical blocks transferred from the data-out buffer with
275  * the logical blocks read, storing the resulting XOR data in a buffer;
276  * 4) if the DISABLE WRITE bit is set to zero, then write the logical
277  * blocks transferred from the data-out buffer; and
278  * 5) transfer the resulting XOR data to the data-in buffer.
279  */
280  buf = kmalloc(cmd->data_length, GFP_KERNEL);
281  if (!buf) {
282  pr_err("Unable to allocate xor_callback buf\n");
283  return;
284  }
285  /*
286  * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
287  * into the locally allocated *buf
288  */
290  cmd->t_data_nents,
291  buf,
292  cmd->data_length);
293 
294  /*
295  * Now perform the XOR against the BIDI read memory located at
296  * cmd->t_mem_bidi_list
297  */
298 
299  offset = 0;
300  for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
301  addr = kmap_atomic(sg_page(sg));
302  if (!addr)
303  goto out;
304 
305  for (i = 0; i < sg->length; i++)
306  *(addr + sg->offset + i) ^= *(buf + offset + i);
307 
308  offset += sg->length;
309  kunmap_atomic(addr);
310  }
311 
312 out:
313  kfree(buf);
314 }
315 
316 int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
317 {
318  struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
319  struct se_device *dev = cmd->se_dev;
320  unsigned char *cdb = cmd->t_task_cdb;
321  unsigned int size;
322  u32 sectors = 0;
323  int ret;
324 
325  switch (cdb[0]) {
326  case READ_6:
327  sectors = transport_get_sectors_6(cdb);
328  cmd->t_task_lba = transport_lba_21(cdb);
330  cmd->execute_cmd = ops->execute_rw;
331  break;
332  case READ_10:
333  sectors = transport_get_sectors_10(cdb);
334  cmd->t_task_lba = transport_lba_32(cdb);
336  cmd->execute_cmd = ops->execute_rw;
337  break;
338  case READ_12:
339  sectors = transport_get_sectors_12(cdb);
340  cmd->t_task_lba = transport_lba_32(cdb);
342  cmd->execute_cmd = ops->execute_rw;
343  break;
344  case READ_16:
345  sectors = transport_get_sectors_16(cdb);
346  cmd->t_task_lba = transport_lba_64(cdb);
348  cmd->execute_cmd = ops->execute_rw;
349  break;
350  case WRITE_6:
351  sectors = transport_get_sectors_6(cdb);
352  cmd->t_task_lba = transport_lba_21(cdb);
354  cmd->execute_cmd = ops->execute_rw;
355  break;
356  case WRITE_10:
357  case WRITE_VERIFY:
358  sectors = transport_get_sectors_10(cdb);
359  cmd->t_task_lba = transport_lba_32(cdb);
360  if (cdb[1] & 0x8)
361  cmd->se_cmd_flags |= SCF_FUA;
363  cmd->execute_cmd = ops->execute_rw;
364  break;
365  case WRITE_12:
366  sectors = transport_get_sectors_12(cdb);
367  cmd->t_task_lba = transport_lba_32(cdb);
368  if (cdb[1] & 0x8)
369  cmd->se_cmd_flags |= SCF_FUA;
371  cmd->execute_cmd = ops->execute_rw;
372  break;
373  case WRITE_16:
374  sectors = transport_get_sectors_16(cdb);
375  cmd->t_task_lba = transport_lba_64(cdb);
376  if (cdb[1] & 0x8)
377  cmd->se_cmd_flags |= SCF_FUA;
379  cmd->execute_cmd = ops->execute_rw;
380  break;
381  case XDWRITEREAD_10:
382  if ((cmd->data_direction != DMA_TO_DEVICE) ||
383  !(cmd->se_cmd_flags & SCF_BIDI))
384  goto out_invalid_cdb_field;
385  sectors = transport_get_sectors_10(cdb);
386 
387  cmd->t_task_lba = transport_lba_32(cdb);
389 
390  /*
391  * Setup BIDI XOR callback to be run after I/O completion.
392  */
393  cmd->execute_cmd = ops->execute_rw;
394  cmd->transport_complete_callback = &xdreadwrite_callback;
395  if (cdb[1] & 0x8)
396  cmd->se_cmd_flags |= SCF_FUA;
397  break;
398  case VARIABLE_LENGTH_CMD:
399  {
400  u16 service_action = get_unaligned_be16(&cdb[8]);
401  switch (service_action) {
402  case XDWRITEREAD_32:
403  sectors = transport_get_sectors_32(cdb);
404 
405  /*
406  * Use WRITE_32 and READ_32 opcodes for the emulated
407  * XDWRITE_READ_32 logic.
408  */
409  cmd->t_task_lba = transport_lba_64_ext(cdb);
411 
412  /*
413  * Setup BIDI XOR callback to be run during after I/O
414  * completion.
415  */
416  cmd->execute_cmd = ops->execute_rw;
417  cmd->transport_complete_callback = &xdreadwrite_callback;
418  if (cdb[1] & 0x8)
419  cmd->se_cmd_flags |= SCF_FUA;
420  break;
421  case WRITE_SAME_32:
422  if (!ops->execute_write_same)
423  goto out_unsupported_cdb;
424 
425  sectors = transport_get_sectors_32(cdb);
426  if (!sectors) {
427  pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
428  " supported\n");
429  goto out_invalid_cdb_field;
430  }
431 
432  size = sbc_get_size(cmd, 1);
433  cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
434 
435  if (sbc_write_same_supported(dev, &cdb[10]) < 0)
436  goto out_unsupported_cdb;
437  cmd->execute_cmd = ops->execute_write_same;
438  break;
439  default:
440  pr_err("VARIABLE_LENGTH_CMD service action"
441  " 0x%04x not supported\n", service_action);
442  goto out_unsupported_cdb;
443  }
444  break;
445  }
446  case READ_CAPACITY:
447  size = READ_CAP_LEN;
448  cmd->execute_cmd = sbc_emulate_readcapacity;
449  break;
450  case SERVICE_ACTION_IN:
451  switch (cmd->t_task_cdb[1] & 0x1f) {
453  cmd->execute_cmd = sbc_emulate_readcapacity_16;
454  break;
455  default:
456  pr_err("Unsupported SA: 0x%02x\n",
457  cmd->t_task_cdb[1] & 0x1f);
458  goto out_invalid_cdb_field;
459  }
460  size = (cdb[10] << 24) | (cdb[11] << 16) |
461  (cdb[12] << 8) | cdb[13];
462  break;
463  case SYNCHRONIZE_CACHE:
465  if (!ops->execute_sync_cache)
466  goto out_unsupported_cdb;
467 
468  /*
469  * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
470  */
471  if (cdb[0] == SYNCHRONIZE_CACHE) {
472  sectors = transport_get_sectors_10(cdb);
473  cmd->t_task_lba = transport_lba_32(cdb);
474  } else {
475  sectors = transport_get_sectors_16(cdb);
476  cmd->t_task_lba = transport_lba_64(cdb);
477  }
478 
479  size = sbc_get_size(cmd, sectors);
480 
481  /*
482  * Check to ensure that LBA + Range does not exceed past end of
483  * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
484  */
485  if (cmd->t_task_lba || sectors) {
486  if (sbc_check_valid_sectors(cmd) < 0)
487  goto out_invalid_cdb_field;
488  }
489  cmd->execute_cmd = ops->execute_sync_cache;
490  break;
491  case UNMAP:
492  if (!ops->execute_unmap)
493  goto out_unsupported_cdb;
494 
495  size = get_unaligned_be16(&cdb[7]);
496  cmd->execute_cmd = ops->execute_unmap;
497  break;
498  case WRITE_SAME_16:
499  if (!ops->execute_write_same)
500  goto out_unsupported_cdb;
501 
502  sectors = transport_get_sectors_16(cdb);
503  if (!sectors) {
504  pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
505  goto out_invalid_cdb_field;
506  }
507 
508  size = sbc_get_size(cmd, 1);
509  cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
510 
511  if (sbc_write_same_supported(dev, &cdb[1]) < 0)
512  goto out_unsupported_cdb;
513  cmd->execute_cmd = ops->execute_write_same;
514  break;
515  case WRITE_SAME:
516  if (!ops->execute_write_same)
517  goto out_unsupported_cdb;
518 
519  sectors = transport_get_sectors_10(cdb);
520  if (!sectors) {
521  pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
522  goto out_invalid_cdb_field;
523  }
524 
525  size = sbc_get_size(cmd, 1);
526  cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
527 
528  /*
529  * Follow sbcr26 with WRITE_SAME (10) and check for the existence
530  * of byte 1 bit 3 UNMAP instead of original reserved field
531  */
532  if (sbc_write_same_supported(dev, &cdb[1]) < 0)
533  goto out_unsupported_cdb;
534  cmd->execute_cmd = ops->execute_write_same;
535  break;
536  case VERIFY:
537  size = 0;
538  cmd->execute_cmd = sbc_emulate_verify;
539  break;
540  case REZERO_UNIT:
541  case SEEK_6:
542  case SEEK_10:
543  /*
544  * There are still clients out there which use these old SCSI-2
545  * commands. This mainly happens when running VMs with legacy
546  * guest systems, connected via SCSI command pass-through to
547  * iSCSI targets. Make them happy and return status GOOD.
548  */
549  size = 0;
550  cmd->execute_cmd = sbc_emulate_noop;
551  break;
552  default:
553  ret = spc_parse_cdb(cmd, &size);
554  if (ret)
555  return ret;
556  }
557 
558  /* reject any command that we don't have a handler for */
559  if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
560  goto out_unsupported_cdb;
561 
562  if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
563  unsigned long long end_lba;
564 
565  if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
566  printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
567  " big sectors %u exceeds fabric_max_sectors:"
568  " %u\n", cdb[0], sectors,
569  su_dev->se_dev_attrib.fabric_max_sectors);
570  goto out_invalid_cdb_field;
571  }
572  if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
573  printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
574  " big sectors %u exceeds backend hw_max_sectors:"
575  " %u\n", cdb[0], sectors,
576  su_dev->se_dev_attrib.hw_max_sectors);
577  goto out_invalid_cdb_field;
578  }
579 
580  end_lba = dev->transport->get_blocks(dev) + 1;
581  if (cmd->t_task_lba + sectors > end_lba) {
582  pr_err("cmd exceeds last lba %llu "
583  "(lba %llu, sectors %u)\n",
584  end_lba, cmd->t_task_lba, sectors);
585  goto out_invalid_cdb_field;
586  }
587 
588  size = sbc_get_size(cmd, sectors);
589  }
590 
591  ret = target_cmd_size_check(cmd, size);
592  if (ret < 0)
593  return ret;
594 
595  return 0;
596 
597 out_unsupported_cdb:
600  return -EINVAL;
601 out_invalid_cdb_field:
604  return -EINVAL;
605 }