arm: omap: switch over to gpio_set_debounce
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / scsi_debug.c
blob136329b4027bfc4201874f726911ae87290527b4
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
58 #include "sd.h"
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92 #define DEF_DELAY 1
93 #define DEF_DEV_SIZE_MB 8
94 #define DEF_EVERY_NTH 0
95 #define DEF_NUM_PARTS 0
96 #define DEF_OPTS 0
97 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
98 #define DEF_PTYPE 0
99 #define DEF_D_SENSE 0
100 #define DEF_NO_LUN_0 0
101 #define DEF_VIRTUAL_GB 0
102 #define DEF_FAKE_RW 0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
105 #define DEF_DIX 0
106 #define DEF_DIF 0
107 #define DEF_GUARD 0
108 #define DEF_ATO 1
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_OPT_BLKS 64
112 #define DEF_UNMAP_MAX_BLOCKS 0
113 #define DEF_UNMAP_MAX_DESC 0
114 #define DEF_UNMAP_GRANULARITY 0
115 #define DEF_UNMAP_ALIGNMENT 0
117 /* bit mask values for scsi_debug_opts */
118 #define SCSI_DEBUG_OPT_NOISE 1
119 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
120 #define SCSI_DEBUG_OPT_TIMEOUT 4
121 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
122 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
123 #define SCSI_DEBUG_OPT_DIF_ERR 32
124 #define SCSI_DEBUG_OPT_DIX_ERR 64
125 /* When "every_nth" > 0 then modulo "every_nth" commands:
126 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
127 * - a RECOVERED_ERROR is simulated on successful read and write
128 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
129 * - a TRANSPORT_ERROR is simulated on successful read and write
130 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
132 * When "every_nth" < 0 then after "- every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
138 * This will continue until some other action occurs (e.g. the user
139 * writing a new value (other than -1 or 1) to every_nth via sysfs).
142 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
143 * sector on read commands: */
144 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
146 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
147 * or "peripheral device" addressing (value 0) */
148 #define SAM2_LUN_ADDRESS_METHOD 0
149 #define SAM2_WLUN_REPORT_LUNS 0xc101
151 /* Can queue up to this number of commands. Typically commands that
152 * that have a non-zero delay are queued. */
153 #define SCSI_DEBUG_CANQUEUE 255
155 static int scsi_debug_add_host = DEF_NUM_HOST;
156 static int scsi_debug_delay = DEF_DELAY;
157 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
158 static int scsi_debug_every_nth = DEF_EVERY_NTH;
159 static int scsi_debug_max_luns = DEF_MAX_LUNS;
160 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
161 static int scsi_debug_num_parts = DEF_NUM_PARTS;
162 static int scsi_debug_no_uld = 0;
163 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
164 static int scsi_debug_opts = DEF_OPTS;
165 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
166 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
167 static int scsi_debug_dsense = DEF_D_SENSE;
168 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
169 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
170 static int scsi_debug_fake_rw = DEF_FAKE_RW;
171 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
172 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
173 static int scsi_debug_dix = DEF_DIX;
174 static int scsi_debug_dif = DEF_DIF;
175 static int scsi_debug_guard = DEF_GUARD;
176 static int scsi_debug_ato = DEF_ATO;
177 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
178 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
179 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
180 static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
181 static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
182 static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
183 static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
185 static int scsi_debug_cmnd_count = 0;
187 #define DEV_READONLY(TGT) (0)
188 #define DEV_REMOVEABLE(TGT) (0)
190 static unsigned int sdebug_store_sectors;
191 static sector_t sdebug_capacity; /* in sectors */
193 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
194 may still need them */
195 static int sdebug_heads; /* heads per disk */
196 static int sdebug_cylinders_per; /* cylinders per surface */
197 static int sdebug_sectors_per; /* sectors per cylinder */
199 #define SDEBUG_MAX_PARTS 4
201 #define SDEBUG_SENSE_LEN 32
203 #define SCSI_DEBUG_MAX_CMD_LEN 32
205 struct sdebug_dev_info {
206 struct list_head dev_list;
207 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
208 unsigned int channel;
209 unsigned int target;
210 unsigned int lun;
211 struct sdebug_host_info *sdbg_host;
212 unsigned int wlun;
213 char reset;
214 char stopped;
215 char used;
218 struct sdebug_host_info {
219 struct list_head host_list;
220 struct Scsi_Host *shost;
221 struct device dev;
222 struct list_head dev_info_list;
225 #define to_sdebug_host(d) \
226 container_of(d, struct sdebug_host_info, dev)
228 static LIST_HEAD(sdebug_host_list);
229 static DEFINE_SPINLOCK(sdebug_host_list_lock);
231 typedef void (* done_funct_t) (struct scsi_cmnd *);
233 struct sdebug_queued_cmd {
234 int in_use;
235 struct timer_list cmnd_timer;
236 done_funct_t done_funct;
237 struct scsi_cmnd * a_cmnd;
238 int scsi_result;
240 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
242 static unsigned char * fake_storep; /* ramdisk storage */
243 static unsigned char *dif_storep; /* protection info */
244 static void *map_storep; /* provisioning map */
246 static unsigned long map_size;
247 static int num_aborts = 0;
248 static int num_dev_resets = 0;
249 static int num_bus_resets = 0;
250 static int num_host_resets = 0;
251 static int dix_writes;
252 static int dix_reads;
253 static int dif_errors;
255 static DEFINE_SPINLOCK(queued_arr_lock);
256 static DEFINE_RWLOCK(atomic_rw);
258 static char sdebug_proc_name[] = "scsi_debug";
260 static struct bus_type pseudo_lld_bus;
262 static inline sector_t dif_offset(sector_t sector)
264 return sector << 3;
267 static struct device_driver sdebug_driverfs_driver = {
268 .name = sdebug_proc_name,
269 .bus = &pseudo_lld_bus,
272 static const int check_condition_result =
273 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
275 static const int illegal_condition_result =
276 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
278 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
279 0, 0, 0x2, 0x4b};
280 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
281 0, 0, 0x0, 0x0};
283 static int sdebug_add_adapter(void);
284 static void sdebug_remove_adapter(void);
286 static void sdebug_max_tgts_luns(void)
288 struct sdebug_host_info *sdbg_host;
289 struct Scsi_Host *hpnt;
291 spin_lock(&sdebug_host_list_lock);
292 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
293 hpnt = sdbg_host->shost;
294 if ((hpnt->this_id >= 0) &&
295 (scsi_debug_num_tgts > hpnt->this_id))
296 hpnt->max_id = scsi_debug_num_tgts + 1;
297 else
298 hpnt->max_id = scsi_debug_num_tgts;
299 /* scsi_debug_max_luns; */
300 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
302 spin_unlock(&sdebug_host_list_lock);
305 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
306 int asc, int asq)
308 unsigned char *sbuff;
310 sbuff = devip->sense_buff;
311 memset(sbuff, 0, SDEBUG_SENSE_LEN);
313 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
315 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
316 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
317 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
320 static void get_data_transfer_info(unsigned char *cmd,
321 unsigned long long *lba, unsigned int *num,
322 u32 *ei_lba)
324 *ei_lba = 0;
326 switch (*cmd) {
327 case VARIABLE_LENGTH_CMD:
328 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
329 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
330 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
331 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
333 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
334 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
336 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
337 (u32)cmd[28] << 24;
338 break;
340 case WRITE_SAME_16:
341 case WRITE_16:
342 case READ_16:
343 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
344 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
345 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
346 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
348 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
349 (u32)cmd[10] << 24;
350 break;
351 case WRITE_12:
352 case READ_12:
353 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
354 (u32)cmd[2] << 24;
356 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
357 (u32)cmd[6] << 24;
358 break;
359 case WRITE_SAME:
360 case WRITE_10:
361 case READ_10:
362 case XDWRITEREAD_10:
363 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
364 (u32)cmd[2] << 24;
366 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
367 break;
368 case WRITE_6:
369 case READ_6:
370 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
371 (u32)(cmd[1] & 0x1f) << 16;
372 *num = (0 == cmd[4]) ? 256 : cmd[4];
373 break;
374 default:
375 break;
379 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
381 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
382 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
384 return -EINVAL;
385 /* return -ENOTTY; // correct return but upsets fdisk */
388 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
389 struct sdebug_dev_info * devip)
391 if (devip->reset) {
392 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
393 printk(KERN_INFO "scsi_debug: Reporting Unit "
394 "attention: power on reset\n");
395 devip->reset = 0;
396 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
397 return check_condition_result;
399 if ((0 == reset_only) && devip->stopped) {
400 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
401 printk(KERN_INFO "scsi_debug: Reporting Not "
402 "ready: initializing command required\n");
403 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
404 0x2);
405 return check_condition_result;
407 return 0;
410 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
411 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
412 int arr_len)
414 int act_len;
415 struct scsi_data_buffer *sdb = scsi_in(scp);
417 if (!sdb->length)
418 return 0;
419 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
420 return (DID_ERROR << 16);
422 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
423 arr, arr_len);
424 if (sdb->resid)
425 sdb->resid -= act_len;
426 else
427 sdb->resid = scsi_bufflen(scp) - act_len;
429 return 0;
432 /* Returns number of bytes fetched into 'arr' or -1 if error. */
433 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
434 int arr_len)
436 if (!scsi_bufflen(scp))
437 return 0;
438 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
439 return -1;
441 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
445 static const char * inq_vendor_id = "Linux ";
446 static const char * inq_product_id = "scsi_debug ";
447 static const char * inq_product_rev = "0004";
449 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
450 int target_dev_id, int dev_id_num,
451 const char * dev_id_str,
452 int dev_id_str_len)
454 int num, port_a;
455 char b[32];
457 port_a = target_dev_id + 1;
458 /* T10 vendor identifier field format (faked) */
459 arr[0] = 0x2; /* ASCII */
460 arr[1] = 0x1;
461 arr[2] = 0x0;
462 memcpy(&arr[4], inq_vendor_id, 8);
463 memcpy(&arr[12], inq_product_id, 16);
464 memcpy(&arr[28], dev_id_str, dev_id_str_len);
465 num = 8 + 16 + dev_id_str_len;
466 arr[3] = num;
467 num += 4;
468 if (dev_id_num >= 0) {
469 /* NAA-5, Logical unit identifier (binary) */
470 arr[num++] = 0x1; /* binary (not necessarily sas) */
471 arr[num++] = 0x3; /* PIV=0, lu, naa */
472 arr[num++] = 0x0;
473 arr[num++] = 0x8;
474 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
475 arr[num++] = 0x33;
476 arr[num++] = 0x33;
477 arr[num++] = 0x30;
478 arr[num++] = (dev_id_num >> 24);
479 arr[num++] = (dev_id_num >> 16) & 0xff;
480 arr[num++] = (dev_id_num >> 8) & 0xff;
481 arr[num++] = dev_id_num & 0xff;
482 /* Target relative port number */
483 arr[num++] = 0x61; /* proto=sas, binary */
484 arr[num++] = 0x94; /* PIV=1, target port, rel port */
485 arr[num++] = 0x0; /* reserved */
486 arr[num++] = 0x4; /* length */
487 arr[num++] = 0x0; /* reserved */
488 arr[num++] = 0x0; /* reserved */
489 arr[num++] = 0x0;
490 arr[num++] = 0x1; /* relative port A */
492 /* NAA-5, Target port identifier */
493 arr[num++] = 0x61; /* proto=sas, binary */
494 arr[num++] = 0x93; /* piv=1, target port, naa */
495 arr[num++] = 0x0;
496 arr[num++] = 0x8;
497 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
498 arr[num++] = 0x22;
499 arr[num++] = 0x22;
500 arr[num++] = 0x20;
501 arr[num++] = (port_a >> 24);
502 arr[num++] = (port_a >> 16) & 0xff;
503 arr[num++] = (port_a >> 8) & 0xff;
504 arr[num++] = port_a & 0xff;
505 /* NAA-5, Target port group identifier */
506 arr[num++] = 0x61; /* proto=sas, binary */
507 arr[num++] = 0x95; /* piv=1, target port group id */
508 arr[num++] = 0x0;
509 arr[num++] = 0x4;
510 arr[num++] = 0;
511 arr[num++] = 0;
512 arr[num++] = (port_group_id >> 8) & 0xff;
513 arr[num++] = port_group_id & 0xff;
514 /* NAA-5, Target device identifier */
515 arr[num++] = 0x61; /* proto=sas, binary */
516 arr[num++] = 0xa3; /* piv=1, target device, naa */
517 arr[num++] = 0x0;
518 arr[num++] = 0x8;
519 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
520 arr[num++] = 0x22;
521 arr[num++] = 0x22;
522 arr[num++] = 0x20;
523 arr[num++] = (target_dev_id >> 24);
524 arr[num++] = (target_dev_id >> 16) & 0xff;
525 arr[num++] = (target_dev_id >> 8) & 0xff;
526 arr[num++] = target_dev_id & 0xff;
527 /* SCSI name string: Target device identifier */
528 arr[num++] = 0x63; /* proto=sas, UTF-8 */
529 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
530 arr[num++] = 0x0;
531 arr[num++] = 24;
532 memcpy(arr + num, "naa.52222220", 12);
533 num += 12;
534 snprintf(b, sizeof(b), "%08X", target_dev_id);
535 memcpy(arr + num, b, 8);
536 num += 8;
537 memset(arr + num, 0, 4);
538 num += 4;
539 return num;
543 static unsigned char vpd84_data[] = {
544 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
545 0x22,0x22,0x22,0x0,0xbb,0x1,
546 0x22,0x22,0x22,0x0,0xbb,0x2,
549 static int inquiry_evpd_84(unsigned char * arr)
551 memcpy(arr, vpd84_data, sizeof(vpd84_data));
552 return sizeof(vpd84_data);
555 static int inquiry_evpd_85(unsigned char * arr)
557 int num = 0;
558 const char * na1 = "https://www.kernel.org/config";
559 const char * na2 = "http://www.kernel.org/log";
560 int plen, olen;
562 arr[num++] = 0x1; /* lu, storage config */
563 arr[num++] = 0x0; /* reserved */
564 arr[num++] = 0x0;
565 olen = strlen(na1);
566 plen = olen + 1;
567 if (plen % 4)
568 plen = ((plen / 4) + 1) * 4;
569 arr[num++] = plen; /* length, null termianted, padded */
570 memcpy(arr + num, na1, olen);
571 memset(arr + num + olen, 0, plen - olen);
572 num += plen;
574 arr[num++] = 0x4; /* lu, logging */
575 arr[num++] = 0x0; /* reserved */
576 arr[num++] = 0x0;
577 olen = strlen(na2);
578 plen = olen + 1;
579 if (plen % 4)
580 plen = ((plen / 4) + 1) * 4;
581 arr[num++] = plen; /* length, null terminated, padded */
582 memcpy(arr + num, na2, olen);
583 memset(arr + num + olen, 0, plen - olen);
584 num += plen;
586 return num;
589 /* SCSI ports VPD page */
590 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
592 int num = 0;
593 int port_a, port_b;
595 port_a = target_dev_id + 1;
596 port_b = port_a + 1;
597 arr[num++] = 0x0; /* reserved */
598 arr[num++] = 0x0; /* reserved */
599 arr[num++] = 0x0;
600 arr[num++] = 0x1; /* relative port 1 (primary) */
601 memset(arr + num, 0, 6);
602 num += 6;
603 arr[num++] = 0x0;
604 arr[num++] = 12; /* length tp descriptor */
605 /* naa-5 target port identifier (A) */
606 arr[num++] = 0x61; /* proto=sas, binary */
607 arr[num++] = 0x93; /* PIV=1, target port, NAA */
608 arr[num++] = 0x0; /* reserved */
609 arr[num++] = 0x8; /* length */
610 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
611 arr[num++] = 0x22;
612 arr[num++] = 0x22;
613 arr[num++] = 0x20;
614 arr[num++] = (port_a >> 24);
615 arr[num++] = (port_a >> 16) & 0xff;
616 arr[num++] = (port_a >> 8) & 0xff;
617 arr[num++] = port_a & 0xff;
619 arr[num++] = 0x0; /* reserved */
620 arr[num++] = 0x0; /* reserved */
621 arr[num++] = 0x0;
622 arr[num++] = 0x2; /* relative port 2 (secondary) */
623 memset(arr + num, 0, 6);
624 num += 6;
625 arr[num++] = 0x0;
626 arr[num++] = 12; /* length tp descriptor */
627 /* naa-5 target port identifier (B) */
628 arr[num++] = 0x61; /* proto=sas, binary */
629 arr[num++] = 0x93; /* PIV=1, target port, NAA */
630 arr[num++] = 0x0; /* reserved */
631 arr[num++] = 0x8; /* length */
632 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
633 arr[num++] = 0x22;
634 arr[num++] = 0x22;
635 arr[num++] = 0x20;
636 arr[num++] = (port_b >> 24);
637 arr[num++] = (port_b >> 16) & 0xff;
638 arr[num++] = (port_b >> 8) & 0xff;
639 arr[num++] = port_b & 0xff;
641 return num;
645 static unsigned char vpd89_data[] = {
646 /* from 4th byte */ 0,0,0,0,
647 'l','i','n','u','x',' ',' ',' ',
648 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
649 '1','2','3','4',
650 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
651 0xec,0,0,0,
652 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
653 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
654 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
655 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
656 0x53,0x41,
657 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
658 0x20,0x20,
659 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
660 0x10,0x80,
661 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
662 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
663 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
664 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
665 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
666 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
667 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
668 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
669 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
670 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
671 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
672 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
673 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
674 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
675 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
676 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
677 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
680 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
681 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
685 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
689 static int inquiry_evpd_89(unsigned char * arr)
691 memcpy(arr, vpd89_data, sizeof(vpd89_data));
692 return sizeof(vpd89_data);
696 /* Block limits VPD page (SBC-3) */
697 static unsigned char vpdb0_data[] = {
698 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
699 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
700 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
701 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 static int inquiry_evpd_b0(unsigned char * arr)
706 unsigned int gran;
708 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
710 /* Optimal transfer length granularity */
711 gran = 1 << scsi_debug_physblk_exp;
712 arr[2] = (gran >> 8) & 0xff;
713 arr[3] = gran & 0xff;
715 /* Maximum Transfer Length */
716 if (sdebug_store_sectors > 0x400) {
717 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
718 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
719 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
720 arr[7] = sdebug_store_sectors & 0xff;
723 /* Optimal Transfer Length */
724 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
726 if (scsi_debug_unmap_max_desc) {
727 unsigned int blocks;
729 if (scsi_debug_unmap_max_blocks)
730 blocks = scsi_debug_unmap_max_blocks;
731 else
732 blocks = 0xffffffff;
734 /* Maximum Unmap LBA Count */
735 put_unaligned_be32(blocks, &arr[16]);
737 /* Maximum Unmap Block Descriptor Count */
738 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
741 /* Unmap Granularity Alignment */
742 if (scsi_debug_unmap_alignment) {
743 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
744 arr[28] |= 0x80; /* UGAVALID */
747 /* Optimal Unmap Granularity */
748 if (scsi_debug_unmap_granularity) {
749 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
750 return 0x3c; /* Mandatory page length for thin provisioning */
753 return sizeof(vpdb0_data);
756 /* Block device characteristics VPD page (SBC-3) */
757 static int inquiry_evpd_b1(unsigned char *arr)
759 memset(arr, 0, 0x3c);
760 arr[0] = 0;
761 arr[1] = 1; /* non rotating medium (e.g. solid state) */
762 arr[2] = 0;
763 arr[3] = 5; /* less than 1.8" */
765 return 0x3c;
768 #define SDEBUG_LONG_INQ_SZ 96
769 #define SDEBUG_MAX_INQ_ARR_SZ 584
771 static int resp_inquiry(struct scsi_cmnd * scp, int target,
772 struct sdebug_dev_info * devip)
774 unsigned char pq_pdt;
775 unsigned char * arr;
776 unsigned char *cmd = (unsigned char *)scp->cmnd;
777 int alloc_len, n, ret;
779 alloc_len = (cmd[3] << 8) + cmd[4];
780 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
781 if (! arr)
782 return DID_REQUEUE << 16;
783 if (devip->wlun)
784 pq_pdt = 0x1e; /* present, wlun */
785 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
786 pq_pdt = 0x7f; /* not present, no device type */
787 else
788 pq_pdt = (scsi_debug_ptype & 0x1f);
789 arr[0] = pq_pdt;
790 if (0x2 & cmd[1]) { /* CMDDT bit set */
791 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
793 kfree(arr);
794 return check_condition_result;
795 } else if (0x1 & cmd[1]) { /* EVPD bit set */
796 int lu_id_num, port_group_id, target_dev_id, len;
797 char lu_id_str[6];
798 int host_no = devip->sdbg_host->shost->host_no;
800 port_group_id = (((host_no + 1) & 0x7f) << 8) +
801 (devip->channel & 0x7f);
802 if (0 == scsi_debug_vpd_use_hostno)
803 host_no = 0;
804 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
805 (devip->target * 1000) + devip->lun);
806 target_dev_id = ((host_no + 1) * 2000) +
807 (devip->target * 1000) - 3;
808 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
809 if (0 == cmd[2]) { /* supported vital product data pages */
810 arr[1] = cmd[2]; /*sanity */
811 n = 4;
812 arr[n++] = 0x0; /* this page */
813 arr[n++] = 0x80; /* unit serial number */
814 arr[n++] = 0x83; /* device identification */
815 arr[n++] = 0x84; /* software interface ident. */
816 arr[n++] = 0x85; /* management network addresses */
817 arr[n++] = 0x86; /* extended inquiry */
818 arr[n++] = 0x87; /* mode page policy */
819 arr[n++] = 0x88; /* SCSI ports */
820 arr[n++] = 0x89; /* ATA information */
821 arr[n++] = 0xb0; /* Block limits (SBC) */
822 arr[n++] = 0xb1; /* Block characteristics (SBC) */
823 arr[3] = n - 4; /* number of supported VPD pages */
824 } else if (0x80 == cmd[2]) { /* unit serial number */
825 arr[1] = cmd[2]; /*sanity */
826 arr[3] = len;
827 memcpy(&arr[4], lu_id_str, len);
828 } else if (0x83 == cmd[2]) { /* device identification */
829 arr[1] = cmd[2]; /*sanity */
830 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
831 target_dev_id, lu_id_num,
832 lu_id_str, len);
833 } else if (0x84 == cmd[2]) { /* Software interface ident. */
834 arr[1] = cmd[2]; /*sanity */
835 arr[3] = inquiry_evpd_84(&arr[4]);
836 } else if (0x85 == cmd[2]) { /* Management network addresses */
837 arr[1] = cmd[2]; /*sanity */
838 arr[3] = inquiry_evpd_85(&arr[4]);
839 } else if (0x86 == cmd[2]) { /* extended inquiry */
840 arr[1] = cmd[2]; /*sanity */
841 arr[3] = 0x3c; /* number of following entries */
842 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
843 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
844 else if (scsi_debug_dif)
845 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
846 else
847 arr[4] = 0x0; /* no protection stuff */
848 arr[5] = 0x7; /* head of q, ordered + simple q's */
849 } else if (0x87 == cmd[2]) { /* mode page policy */
850 arr[1] = cmd[2]; /*sanity */
851 arr[3] = 0x8; /* number of following entries */
852 arr[4] = 0x2; /* disconnect-reconnect mp */
853 arr[6] = 0x80; /* mlus, shared */
854 arr[8] = 0x18; /* protocol specific lu */
855 arr[10] = 0x82; /* mlus, per initiator port */
856 } else if (0x88 == cmd[2]) { /* SCSI Ports */
857 arr[1] = cmd[2]; /*sanity */
858 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
859 } else if (0x89 == cmd[2]) { /* ATA information */
860 arr[1] = cmd[2]; /*sanity */
861 n = inquiry_evpd_89(&arr[4]);
862 arr[2] = (n >> 8);
863 arr[3] = (n & 0xff);
864 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
865 arr[1] = cmd[2]; /*sanity */
866 arr[3] = inquiry_evpd_b0(&arr[4]);
867 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
868 arr[1] = cmd[2]; /*sanity */
869 arr[3] = inquiry_evpd_b1(&arr[4]);
870 } else {
871 /* Illegal request, invalid field in cdb */
872 mk_sense_buffer(devip, ILLEGAL_REQUEST,
873 INVALID_FIELD_IN_CDB, 0);
874 kfree(arr);
875 return check_condition_result;
877 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
878 ret = fill_from_dev_buffer(scp, arr,
879 min(len, SDEBUG_MAX_INQ_ARR_SZ));
880 kfree(arr);
881 return ret;
883 /* drops through here for a standard inquiry */
884 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
885 arr[2] = scsi_debug_scsi_level;
886 arr[3] = 2; /* response_data_format==2 */
887 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
888 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
889 if (0 == scsi_debug_vpd_use_hostno)
890 arr[5] = 0x10; /* claim: implicit TGPS */
891 arr[6] = 0x10; /* claim: MultiP */
892 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
893 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
894 memcpy(&arr[8], inq_vendor_id, 8);
895 memcpy(&arr[16], inq_product_id, 16);
896 memcpy(&arr[32], inq_product_rev, 4);
897 /* version descriptors (2 bytes each) follow */
898 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
899 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
900 n = 62;
901 if (scsi_debug_ptype == 0) {
902 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
903 } else if (scsi_debug_ptype == 1) {
904 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
906 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
907 ret = fill_from_dev_buffer(scp, arr,
908 min(alloc_len, SDEBUG_LONG_INQ_SZ));
909 kfree(arr);
910 return ret;
913 static int resp_requests(struct scsi_cmnd * scp,
914 struct sdebug_dev_info * devip)
916 unsigned char * sbuff;
917 unsigned char *cmd = (unsigned char *)scp->cmnd;
918 unsigned char arr[SDEBUG_SENSE_LEN];
919 int want_dsense;
920 int len = 18;
922 memset(arr, 0, sizeof(arr));
923 if (devip->reset == 1)
924 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
925 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
926 sbuff = devip->sense_buff;
927 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
928 if (want_dsense) {
929 arr[0] = 0x72;
930 arr[1] = 0x0; /* NO_SENSE in sense_key */
931 arr[2] = THRESHOLD_EXCEEDED;
932 arr[3] = 0xff; /* TEST set and MRIE==6 */
933 } else {
934 arr[0] = 0x70;
935 arr[2] = 0x0; /* NO_SENSE in sense_key */
936 arr[7] = 0xa; /* 18 byte sense buffer */
937 arr[12] = THRESHOLD_EXCEEDED;
938 arr[13] = 0xff; /* TEST set and MRIE==6 */
940 } else {
941 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
942 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
943 /* DESC bit set and sense_buff in fixed format */
944 memset(arr, 0, sizeof(arr));
945 arr[0] = 0x72;
946 arr[1] = sbuff[2]; /* sense key */
947 arr[2] = sbuff[12]; /* asc */
948 arr[3] = sbuff[13]; /* ascq */
949 len = 8;
952 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
953 return fill_from_dev_buffer(scp, arr, len);
956 static int resp_start_stop(struct scsi_cmnd * scp,
957 struct sdebug_dev_info * devip)
959 unsigned char *cmd = (unsigned char *)scp->cmnd;
960 int power_cond, errsts, start;
962 if ((errsts = check_readiness(scp, 1, devip)))
963 return errsts;
964 power_cond = (cmd[4] & 0xf0) >> 4;
965 if (power_cond) {
966 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
968 return check_condition_result;
970 start = cmd[4] & 1;
971 if (start == devip->stopped)
972 devip->stopped = !start;
973 return 0;
976 static sector_t get_sdebug_capacity(void)
978 if (scsi_debug_virtual_gb > 0)
979 return (sector_t)scsi_debug_virtual_gb *
980 (1073741824 / scsi_debug_sector_size);
981 else
982 return sdebug_store_sectors;
985 #define SDEBUG_READCAP_ARR_SZ 8
986 static int resp_readcap(struct scsi_cmnd * scp,
987 struct sdebug_dev_info * devip)
989 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
990 unsigned int capac;
991 int errsts;
993 if ((errsts = check_readiness(scp, 1, devip)))
994 return errsts;
995 /* following just in case virtual_gb changed */
996 sdebug_capacity = get_sdebug_capacity();
997 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
998 if (sdebug_capacity < 0xffffffff) {
999 capac = (unsigned int)sdebug_capacity - 1;
1000 arr[0] = (capac >> 24);
1001 arr[1] = (capac >> 16) & 0xff;
1002 arr[2] = (capac >> 8) & 0xff;
1003 arr[3] = capac & 0xff;
1004 } else {
1005 arr[0] = 0xff;
1006 arr[1] = 0xff;
1007 arr[2] = 0xff;
1008 arr[3] = 0xff;
1010 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1011 arr[7] = scsi_debug_sector_size & 0xff;
1012 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1015 #define SDEBUG_READCAP16_ARR_SZ 32
1016 static int resp_readcap16(struct scsi_cmnd * scp,
1017 struct sdebug_dev_info * devip)
1019 unsigned char *cmd = (unsigned char *)scp->cmnd;
1020 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1021 unsigned long long capac;
1022 int errsts, k, alloc_len;
1024 if ((errsts = check_readiness(scp, 1, devip)))
1025 return errsts;
1026 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1027 + cmd[13]);
1028 /* following just in case virtual_gb changed */
1029 sdebug_capacity = get_sdebug_capacity();
1030 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1031 capac = sdebug_capacity - 1;
1032 for (k = 0; k < 8; ++k, capac >>= 8)
1033 arr[7 - k] = capac & 0xff;
1034 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1035 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1036 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1037 arr[11] = scsi_debug_sector_size & 0xff;
1038 arr[13] = scsi_debug_physblk_exp & 0xf;
1039 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1041 if (scsi_debug_unmap_granularity)
1042 arr[14] |= 0x80; /* TPE */
1044 arr[15] = scsi_debug_lowest_aligned & 0xff;
1046 if (scsi_debug_dif) {
1047 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1048 arr[12] |= 1; /* PROT_EN */
1051 return fill_from_dev_buffer(scp, arr,
1052 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1055 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1057 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1058 struct sdebug_dev_info * devip)
1060 unsigned char *cmd = (unsigned char *)scp->cmnd;
1061 unsigned char * arr;
1062 int host_no = devip->sdbg_host->shost->host_no;
1063 int n, ret, alen, rlen;
1064 int port_group_a, port_group_b, port_a, port_b;
1066 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1067 + cmd[9]);
1069 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1070 if (! arr)
1071 return DID_REQUEUE << 16;
1073 * EVPD page 0x88 states we have two ports, one
1074 * real and a fake port with no device connected.
1075 * So we create two port groups with one port each
1076 * and set the group with port B to unavailable.
1078 port_a = 0x1; /* relative port A */
1079 port_b = 0x2; /* relative port B */
1080 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1081 (devip->channel & 0x7f);
1082 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1083 (devip->channel & 0x7f) + 0x80;
1086 * The asymmetric access state is cycled according to the host_id.
1088 n = 4;
1089 if (0 == scsi_debug_vpd_use_hostno) {
1090 arr[n++] = host_no % 3; /* Asymm access state */
1091 arr[n++] = 0x0F; /* claim: all states are supported */
1092 } else {
1093 arr[n++] = 0x0; /* Active/Optimized path */
1094 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1096 arr[n++] = (port_group_a >> 8) & 0xff;
1097 arr[n++] = port_group_a & 0xff;
1098 arr[n++] = 0; /* Reserved */
1099 arr[n++] = 0; /* Status code */
1100 arr[n++] = 0; /* Vendor unique */
1101 arr[n++] = 0x1; /* One port per group */
1102 arr[n++] = 0; /* Reserved */
1103 arr[n++] = 0; /* Reserved */
1104 arr[n++] = (port_a >> 8) & 0xff;
1105 arr[n++] = port_a & 0xff;
1106 arr[n++] = 3; /* Port unavailable */
1107 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1108 arr[n++] = (port_group_b >> 8) & 0xff;
1109 arr[n++] = port_group_b & 0xff;
1110 arr[n++] = 0; /* Reserved */
1111 arr[n++] = 0; /* Status code */
1112 arr[n++] = 0; /* Vendor unique */
1113 arr[n++] = 0x1; /* One port per group */
1114 arr[n++] = 0; /* Reserved */
1115 arr[n++] = 0; /* Reserved */
1116 arr[n++] = (port_b >> 8) & 0xff;
1117 arr[n++] = port_b & 0xff;
1119 rlen = n - 4;
1120 arr[0] = (rlen >> 24) & 0xff;
1121 arr[1] = (rlen >> 16) & 0xff;
1122 arr[2] = (rlen >> 8) & 0xff;
1123 arr[3] = rlen & 0xff;
1126 * Return the smallest value of either
1127 * - The allocated length
1128 * - The constructed command length
1129 * - The maximum array size
1131 rlen = min(alen,n);
1132 ret = fill_from_dev_buffer(scp, arr,
1133 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1134 kfree(arr);
1135 return ret;
1138 /* <<Following mode page info copied from ST318451LW>> */
1140 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1141 { /* Read-Write Error Recovery page for mode_sense */
1142 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1143 5, 0, 0xff, 0xff};
1145 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1146 if (1 == pcontrol)
1147 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1148 return sizeof(err_recov_pg);
1151 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1152 { /* Disconnect-Reconnect page for mode_sense */
1153 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1154 0, 0, 0, 0, 0, 0, 0, 0};
1156 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1157 if (1 == pcontrol)
1158 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1159 return sizeof(disconnect_pg);
1162 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1163 { /* Format device page for mode_sense */
1164 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1165 0, 0, 0, 0, 0, 0, 0, 0,
1166 0, 0, 0, 0, 0x40, 0, 0, 0};
1168 memcpy(p, format_pg, sizeof(format_pg));
1169 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1170 p[11] = sdebug_sectors_per & 0xff;
1171 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1172 p[13] = scsi_debug_sector_size & 0xff;
1173 if (DEV_REMOVEABLE(target))
1174 p[20] |= 0x20; /* should agree with INQUIRY */
1175 if (1 == pcontrol)
1176 memset(p + 2, 0, sizeof(format_pg) - 2);
1177 return sizeof(format_pg);
1180 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1181 { /* Caching page for mode_sense */
1182 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1183 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1185 memcpy(p, caching_pg, sizeof(caching_pg));
1186 if (1 == pcontrol)
1187 memset(p + 2, 0, sizeof(caching_pg) - 2);
1188 return sizeof(caching_pg);
1191 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1192 { /* Control mode page for mode_sense */
1193 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1194 0, 0, 0, 0};
1195 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1196 0, 0, 0x2, 0x4b};
1198 if (scsi_debug_dsense)
1199 ctrl_m_pg[2] |= 0x4;
1200 else
1201 ctrl_m_pg[2] &= ~0x4;
1203 if (scsi_debug_ato)
1204 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1206 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1207 if (1 == pcontrol)
1208 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1209 else if (2 == pcontrol)
1210 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1211 return sizeof(ctrl_m_pg);
1215 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1216 { /* Informational Exceptions control mode page for mode_sense */
1217 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1218 0, 0, 0x0, 0x0};
1219 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1220 0, 0, 0x0, 0x0};
1222 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1223 if (1 == pcontrol)
1224 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1225 else if (2 == pcontrol)
1226 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1227 return sizeof(iec_m_pg);
1230 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1231 { /* SAS SSP mode page - short format for mode_sense */
1232 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1233 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1235 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1236 if (1 == pcontrol)
1237 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1238 return sizeof(sas_sf_m_pg);
1242 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1243 int target_dev_id)
1244 { /* SAS phy control and discover mode page for mode_sense */
1245 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1246 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1247 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1248 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1249 0x2, 0, 0, 0, 0, 0, 0, 0,
1250 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1251 0, 0, 0, 0, 0, 0, 0, 0,
1252 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1253 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1254 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1255 0x3, 0, 0, 0, 0, 0, 0, 0,
1256 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1257 0, 0, 0, 0, 0, 0, 0, 0,
1259 int port_a, port_b;
1261 port_a = target_dev_id + 1;
1262 port_b = port_a + 1;
1263 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1264 p[20] = (port_a >> 24);
1265 p[21] = (port_a >> 16) & 0xff;
1266 p[22] = (port_a >> 8) & 0xff;
1267 p[23] = port_a & 0xff;
1268 p[48 + 20] = (port_b >> 24);
1269 p[48 + 21] = (port_b >> 16) & 0xff;
1270 p[48 + 22] = (port_b >> 8) & 0xff;
1271 p[48 + 23] = port_b & 0xff;
1272 if (1 == pcontrol)
1273 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1274 return sizeof(sas_pcd_m_pg);
1277 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1278 { /* SAS SSP shared protocol specific port mode subpage */
1279 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1280 0, 0, 0, 0, 0, 0, 0, 0,
1283 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1284 if (1 == pcontrol)
1285 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1286 return sizeof(sas_sha_m_pg);
1289 #define SDEBUG_MAX_MSENSE_SZ 256
1291 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1292 struct sdebug_dev_info * devip)
1294 unsigned char dbd, llbaa;
1295 int pcontrol, pcode, subpcode, bd_len;
1296 unsigned char dev_spec;
1297 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1298 unsigned char * ap;
1299 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1300 unsigned char *cmd = (unsigned char *)scp->cmnd;
1302 if ((errsts = check_readiness(scp, 1, devip)))
1303 return errsts;
1304 dbd = !!(cmd[1] & 0x8);
1305 pcontrol = (cmd[2] & 0xc0) >> 6;
1306 pcode = cmd[2] & 0x3f;
1307 subpcode = cmd[3];
1308 msense_6 = (MODE_SENSE == cmd[0]);
1309 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1310 if ((0 == scsi_debug_ptype) && (0 == dbd))
1311 bd_len = llbaa ? 16 : 8;
1312 else
1313 bd_len = 0;
1314 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1315 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1316 if (0x3 == pcontrol) { /* Saving values not supported */
1317 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1319 return check_condition_result;
1321 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1322 (devip->target * 1000) - 3;
1323 /* set DPOFUA bit for disks */
1324 if (0 == scsi_debug_ptype)
1325 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1326 else
1327 dev_spec = 0x0;
1328 if (msense_6) {
1329 arr[2] = dev_spec;
1330 arr[3] = bd_len;
1331 offset = 4;
1332 } else {
1333 arr[3] = dev_spec;
1334 if (16 == bd_len)
1335 arr[4] = 0x1; /* set LONGLBA bit */
1336 arr[7] = bd_len; /* assume 255 or less */
1337 offset = 8;
1339 ap = arr + offset;
1340 if ((bd_len > 0) && (!sdebug_capacity))
1341 sdebug_capacity = get_sdebug_capacity();
1343 if (8 == bd_len) {
1344 if (sdebug_capacity > 0xfffffffe) {
1345 ap[0] = 0xff;
1346 ap[1] = 0xff;
1347 ap[2] = 0xff;
1348 ap[3] = 0xff;
1349 } else {
1350 ap[0] = (sdebug_capacity >> 24) & 0xff;
1351 ap[1] = (sdebug_capacity >> 16) & 0xff;
1352 ap[2] = (sdebug_capacity >> 8) & 0xff;
1353 ap[3] = sdebug_capacity & 0xff;
1355 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1356 ap[7] = scsi_debug_sector_size & 0xff;
1357 offset += bd_len;
1358 ap = arr + offset;
1359 } else if (16 == bd_len) {
1360 unsigned long long capac = sdebug_capacity;
1362 for (k = 0; k < 8; ++k, capac >>= 8)
1363 ap[7 - k] = capac & 0xff;
1364 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1365 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1366 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1367 ap[15] = scsi_debug_sector_size & 0xff;
1368 offset += bd_len;
1369 ap = arr + offset;
1372 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1373 /* TODO: Control Extension page */
1374 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1376 return check_condition_result;
1378 switch (pcode) {
1379 case 0x1: /* Read-Write error recovery page, direct access */
1380 len = resp_err_recov_pg(ap, pcontrol, target);
1381 offset += len;
1382 break;
1383 case 0x2: /* Disconnect-Reconnect page, all devices */
1384 len = resp_disconnect_pg(ap, pcontrol, target);
1385 offset += len;
1386 break;
1387 case 0x3: /* Format device page, direct access */
1388 len = resp_format_pg(ap, pcontrol, target);
1389 offset += len;
1390 break;
1391 case 0x8: /* Caching page, direct access */
1392 len = resp_caching_pg(ap, pcontrol, target);
1393 offset += len;
1394 break;
1395 case 0xa: /* Control Mode page, all devices */
1396 len = resp_ctrl_m_pg(ap, pcontrol, target);
1397 offset += len;
1398 break;
1399 case 0x19: /* if spc==1 then sas phy, control+discover */
1400 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1401 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1402 INVALID_FIELD_IN_CDB, 0);
1403 return check_condition_result;
1405 len = 0;
1406 if ((0x0 == subpcode) || (0xff == subpcode))
1407 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1408 if ((0x1 == subpcode) || (0xff == subpcode))
1409 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1410 target_dev_id);
1411 if ((0x2 == subpcode) || (0xff == subpcode))
1412 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1413 offset += len;
1414 break;
1415 case 0x1c: /* Informational Exceptions Mode page, all devices */
1416 len = resp_iec_m_pg(ap, pcontrol, target);
1417 offset += len;
1418 break;
1419 case 0x3f: /* Read all Mode pages */
1420 if ((0 == subpcode) || (0xff == subpcode)) {
1421 len = resp_err_recov_pg(ap, pcontrol, target);
1422 len += resp_disconnect_pg(ap + len, pcontrol, target);
1423 len += resp_format_pg(ap + len, pcontrol, target);
1424 len += resp_caching_pg(ap + len, pcontrol, target);
1425 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1426 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1427 if (0xff == subpcode) {
1428 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1429 target, target_dev_id);
1430 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1432 len += resp_iec_m_pg(ap + len, pcontrol, target);
1433 } else {
1434 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1435 INVALID_FIELD_IN_CDB, 0);
1436 return check_condition_result;
1438 offset += len;
1439 break;
1440 default:
1441 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1443 return check_condition_result;
1445 if (msense_6)
1446 arr[0] = offset - 1;
1447 else {
1448 arr[0] = ((offset - 2) >> 8) & 0xff;
1449 arr[1] = (offset - 2) & 0xff;
1451 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1454 #define SDEBUG_MAX_MSELECT_SZ 512
1456 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1457 struct sdebug_dev_info * devip)
1459 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1460 int param_len, res, errsts, mpage;
1461 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1462 unsigned char *cmd = (unsigned char *)scp->cmnd;
1464 if ((errsts = check_readiness(scp, 1, devip)))
1465 return errsts;
1466 memset(arr, 0, sizeof(arr));
1467 pf = cmd[1] & 0x10;
1468 sp = cmd[1] & 0x1;
1469 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1470 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1471 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1472 INVALID_FIELD_IN_CDB, 0);
1473 return check_condition_result;
1475 res = fetch_to_dev_buffer(scp, arr, param_len);
1476 if (-1 == res)
1477 return (DID_ERROR << 16);
1478 else if ((res < param_len) &&
1479 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1480 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1481 " IO sent=%d bytes\n", param_len, res);
1482 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1483 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1484 if (md_len > 2) {
1485 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1486 INVALID_FIELD_IN_PARAM_LIST, 0);
1487 return check_condition_result;
1489 off = bd_len + (mselect6 ? 4 : 8);
1490 mpage = arr[off] & 0x3f;
1491 ps = !!(arr[off] & 0x80);
1492 if (ps) {
1493 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1494 INVALID_FIELD_IN_PARAM_LIST, 0);
1495 return check_condition_result;
1497 spf = !!(arr[off] & 0x40);
1498 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1499 (arr[off + 1] + 2);
1500 if ((pg_len + off) > param_len) {
1501 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1502 PARAMETER_LIST_LENGTH_ERR, 0);
1503 return check_condition_result;
1505 switch (mpage) {
1506 case 0xa: /* Control Mode page */
1507 if (ctrl_m_pg[1] == arr[off + 1]) {
1508 memcpy(ctrl_m_pg + 2, arr + off + 2,
1509 sizeof(ctrl_m_pg) - 2);
1510 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1511 return 0;
1513 break;
1514 case 0x1c: /* Informational Exceptions Mode page */
1515 if (iec_m_pg[1] == arr[off + 1]) {
1516 memcpy(iec_m_pg + 2, arr + off + 2,
1517 sizeof(iec_m_pg) - 2);
1518 return 0;
1520 break;
1521 default:
1522 break;
1524 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1525 INVALID_FIELD_IN_PARAM_LIST, 0);
1526 return check_condition_result;
1529 static int resp_temp_l_pg(unsigned char * arr)
1531 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1532 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1535 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1536 return sizeof(temp_l_pg);
1539 static int resp_ie_l_pg(unsigned char * arr)
1541 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1544 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1545 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1546 arr[4] = THRESHOLD_EXCEEDED;
1547 arr[5] = 0xff;
1549 return sizeof(ie_l_pg);
1552 #define SDEBUG_MAX_LSENSE_SZ 512
1554 static int resp_log_sense(struct scsi_cmnd * scp,
1555 struct sdebug_dev_info * devip)
1557 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1558 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1559 unsigned char *cmd = (unsigned char *)scp->cmnd;
1561 if ((errsts = check_readiness(scp, 1, devip)))
1562 return errsts;
1563 memset(arr, 0, sizeof(arr));
1564 ppc = cmd[1] & 0x2;
1565 sp = cmd[1] & 0x1;
1566 if (ppc || sp) {
1567 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1568 INVALID_FIELD_IN_CDB, 0);
1569 return check_condition_result;
1571 pcontrol = (cmd[2] & 0xc0) >> 6;
1572 pcode = cmd[2] & 0x3f;
1573 subpcode = cmd[3] & 0xff;
1574 alloc_len = (cmd[7] << 8) + cmd[8];
1575 arr[0] = pcode;
1576 if (0 == subpcode) {
1577 switch (pcode) {
1578 case 0x0: /* Supported log pages log page */
1579 n = 4;
1580 arr[n++] = 0x0; /* this page */
1581 arr[n++] = 0xd; /* Temperature */
1582 arr[n++] = 0x2f; /* Informational exceptions */
1583 arr[3] = n - 4;
1584 break;
1585 case 0xd: /* Temperature log page */
1586 arr[3] = resp_temp_l_pg(arr + 4);
1587 break;
1588 case 0x2f: /* Informational exceptions log page */
1589 arr[3] = resp_ie_l_pg(arr + 4);
1590 break;
1591 default:
1592 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1593 INVALID_FIELD_IN_CDB, 0);
1594 return check_condition_result;
1596 } else if (0xff == subpcode) {
1597 arr[0] |= 0x40;
1598 arr[1] = subpcode;
1599 switch (pcode) {
1600 case 0x0: /* Supported log pages and subpages log page */
1601 n = 4;
1602 arr[n++] = 0x0;
1603 arr[n++] = 0x0; /* 0,0 page */
1604 arr[n++] = 0x0;
1605 arr[n++] = 0xff; /* this page */
1606 arr[n++] = 0xd;
1607 arr[n++] = 0x0; /* Temperature */
1608 arr[n++] = 0x2f;
1609 arr[n++] = 0x0; /* Informational exceptions */
1610 arr[3] = n - 4;
1611 break;
1612 case 0xd: /* Temperature subpages */
1613 n = 4;
1614 arr[n++] = 0xd;
1615 arr[n++] = 0x0; /* Temperature */
1616 arr[3] = n - 4;
1617 break;
1618 case 0x2f: /* Informational exceptions subpages */
1619 n = 4;
1620 arr[n++] = 0x2f;
1621 arr[n++] = 0x0; /* Informational exceptions */
1622 arr[3] = n - 4;
1623 break;
1624 default:
1625 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1626 INVALID_FIELD_IN_CDB, 0);
1627 return check_condition_result;
1629 } else {
1630 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1631 INVALID_FIELD_IN_CDB, 0);
1632 return check_condition_result;
1634 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1635 return fill_from_dev_buffer(scp, arr,
1636 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1639 static int check_device_access_params(struct sdebug_dev_info *devi,
1640 unsigned long long lba, unsigned int num)
1642 if (lba + num > sdebug_capacity) {
1643 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1644 return check_condition_result;
1646 /* transfer length excessive (tie in to block limits VPD page) */
1647 if (num > sdebug_store_sectors) {
1648 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1649 return check_condition_result;
1651 return 0;
1654 static int do_device_access(struct scsi_cmnd *scmd,
1655 struct sdebug_dev_info *devi,
1656 unsigned long long lba, unsigned int num, int write)
1658 int ret;
1659 unsigned int block, rest = 0;
1660 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1662 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1664 block = do_div(lba, sdebug_store_sectors);
1665 if (block + num > sdebug_store_sectors)
1666 rest = block + num - sdebug_store_sectors;
1668 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1669 (num - rest) * scsi_debug_sector_size);
1670 if (!ret && rest)
1671 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1673 return ret;
1676 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1677 unsigned int sectors, u32 ei_lba)
1679 unsigned int i, resid;
1680 struct scatterlist *psgl;
1681 struct sd_dif_tuple *sdt;
1682 sector_t sector;
1683 sector_t tmp_sec = start_sec;
1684 void *paddr;
1686 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1688 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1690 for (i = 0 ; i < sectors ; i++) {
1691 u16 csum;
1693 if (sdt[i].app_tag == 0xffff)
1694 continue;
1696 sector = start_sec + i;
1698 switch (scsi_debug_guard) {
1699 case 1:
1700 csum = ip_compute_csum(fake_storep +
1701 sector * scsi_debug_sector_size,
1702 scsi_debug_sector_size);
1703 break;
1704 case 0:
1705 csum = crc_t10dif(fake_storep +
1706 sector * scsi_debug_sector_size,
1707 scsi_debug_sector_size);
1708 csum = cpu_to_be16(csum);
1709 break;
1710 default:
1711 BUG();
1714 if (sdt[i].guard_tag != csum) {
1715 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1716 " rcvd 0x%04x, data 0x%04x\n", __func__,
1717 (unsigned long)sector,
1718 be16_to_cpu(sdt[i].guard_tag),
1719 be16_to_cpu(csum));
1720 dif_errors++;
1721 return 0x01;
1724 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1725 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1726 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1727 __func__, (unsigned long)sector);
1728 dif_errors++;
1729 return 0x03;
1732 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1733 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1734 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1735 __func__, (unsigned long)sector);
1736 dif_errors++;
1737 return 0x03;
1740 ei_lba++;
1743 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1744 sector = start_sec;
1746 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1747 int len = min(psgl->length, resid);
1749 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1750 memcpy(paddr, dif_storep + dif_offset(sector), len);
1752 sector += len >> 3;
1753 if (sector >= sdebug_store_sectors) {
1754 /* Force wrap */
1755 tmp_sec = sector;
1756 sector = do_div(tmp_sec, sdebug_store_sectors);
1758 resid -= len;
1759 kunmap_atomic(paddr, KM_IRQ0);
1762 dix_reads++;
1764 return 0;
1767 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1768 unsigned int num, struct sdebug_dev_info *devip,
1769 u32 ei_lba)
1771 unsigned long iflags;
1772 int ret;
1774 ret = check_device_access_params(devip, lba, num);
1775 if (ret)
1776 return ret;
1778 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1779 (lba <= OPT_MEDIUM_ERR_ADDR) &&
1780 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1781 /* claim unrecoverable read error */
1782 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
1784 /* set info field and valid bit for fixed descriptor */
1785 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1786 devip->sense_buff[0] |= 0x80; /* Valid bit */
1787 ret = OPT_MEDIUM_ERR_ADDR;
1788 devip->sense_buff[3] = (ret >> 24) & 0xff;
1789 devip->sense_buff[4] = (ret >> 16) & 0xff;
1790 devip->sense_buff[5] = (ret >> 8) & 0xff;
1791 devip->sense_buff[6] = ret & 0xff;
1793 return check_condition_result;
1796 /* DIX + T10 DIF */
1797 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1798 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1800 if (prot_ret) {
1801 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1802 return illegal_condition_result;
1806 read_lock_irqsave(&atomic_rw, iflags);
1807 ret = do_device_access(SCpnt, devip, lba, num, 0);
1808 read_unlock_irqrestore(&atomic_rw, iflags);
1809 return ret;
1812 void dump_sector(unsigned char *buf, int len)
1814 int i, j;
1816 printk(KERN_ERR ">>> Sector Dump <<<\n");
1818 for (i = 0 ; i < len ; i += 16) {
1819 printk(KERN_ERR "%04d: ", i);
1821 for (j = 0 ; j < 16 ; j++) {
1822 unsigned char c = buf[i+j];
1823 if (c >= 0x20 && c < 0x7e)
1824 printk(" %c ", buf[i+j]);
1825 else
1826 printk("%02x ", buf[i+j]);
1829 printk("\n");
1833 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1834 unsigned int sectors, u32 ei_lba)
1836 int i, j, ret;
1837 struct sd_dif_tuple *sdt;
1838 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1839 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1840 void *daddr, *paddr;
1841 sector_t tmp_sec = start_sec;
1842 sector_t sector;
1843 int ppage_offset;
1844 unsigned short csum;
1846 sector = do_div(tmp_sec, sdebug_store_sectors);
1848 BUG_ON(scsi_sg_count(SCpnt) == 0);
1849 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1851 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1852 ppage_offset = 0;
1854 /* For each data page */
1855 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1856 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1858 /* For each sector-sized chunk in data page */
1859 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1861 /* If we're at the end of the current
1862 * protection page advance to the next one
1864 if (ppage_offset >= psgl->length) {
1865 kunmap_atomic(paddr, KM_IRQ1);
1866 psgl = sg_next(psgl);
1867 BUG_ON(psgl == NULL);
1868 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1869 + psgl->offset;
1870 ppage_offset = 0;
1873 sdt = paddr + ppage_offset;
1875 switch (scsi_debug_guard) {
1876 case 1:
1877 csum = ip_compute_csum(daddr,
1878 scsi_debug_sector_size);
1879 break;
1880 case 0:
1881 csum = cpu_to_be16(crc_t10dif(daddr,
1882 scsi_debug_sector_size));
1883 break;
1884 default:
1885 BUG();
1886 ret = 0;
1887 goto out;
1890 if (sdt->guard_tag != csum) {
1891 printk(KERN_ERR
1892 "%s: GUARD check failed on sector %lu " \
1893 "rcvd 0x%04x, calculated 0x%04x\n",
1894 __func__, (unsigned long)sector,
1895 be16_to_cpu(sdt->guard_tag),
1896 be16_to_cpu(csum));
1897 ret = 0x01;
1898 dump_sector(daddr, scsi_debug_sector_size);
1899 goto out;
1902 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1903 be32_to_cpu(sdt->ref_tag)
1904 != (start_sec & 0xffffffff)) {
1905 printk(KERN_ERR
1906 "%s: REF check failed on sector %lu\n",
1907 __func__, (unsigned long)sector);
1908 ret = 0x03;
1909 dump_sector(daddr, scsi_debug_sector_size);
1910 goto out;
1913 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1914 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1915 printk(KERN_ERR
1916 "%s: REF check failed on sector %lu\n",
1917 __func__, (unsigned long)sector);
1918 ret = 0x03;
1919 dump_sector(daddr, scsi_debug_sector_size);
1920 goto out;
1923 /* Would be great to copy this in bigger
1924 * chunks. However, for the sake of
1925 * correctness we need to verify each sector
1926 * before writing it to "stable" storage
1928 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1930 sector++;
1932 if (sector == sdebug_store_sectors)
1933 sector = 0; /* Force wrap */
1935 start_sec++;
1936 ei_lba++;
1937 daddr += scsi_debug_sector_size;
1938 ppage_offset += sizeof(struct sd_dif_tuple);
1941 kunmap_atomic(daddr, KM_IRQ0);
1944 kunmap_atomic(paddr, KM_IRQ1);
1946 dix_writes++;
1948 return 0;
1950 out:
1951 dif_errors++;
1952 kunmap_atomic(daddr, KM_IRQ0);
1953 kunmap_atomic(paddr, KM_IRQ1);
1954 return ret;
1957 static unsigned int map_state(sector_t lba, unsigned int *num)
1959 unsigned int granularity, alignment, mapped;
1960 sector_t block, next, end;
1962 granularity = scsi_debug_unmap_granularity;
1963 alignment = granularity - scsi_debug_unmap_alignment;
1964 block = lba + alignment;
1965 do_div(block, granularity);
1967 mapped = test_bit(block, map_storep);
1969 if (mapped)
1970 next = find_next_zero_bit(map_storep, map_size, block);
1971 else
1972 next = find_next_bit(map_storep, map_size, block);
1974 end = next * granularity - scsi_debug_unmap_alignment;
1975 *num = end - lba;
1977 return mapped;
1980 static void map_region(sector_t lba, unsigned int len)
1982 unsigned int granularity, alignment;
1983 sector_t end = lba + len;
1985 granularity = scsi_debug_unmap_granularity;
1986 alignment = granularity - scsi_debug_unmap_alignment;
1988 while (lba < end) {
1989 sector_t block, rem;
1991 block = lba + alignment;
1992 rem = do_div(block, granularity);
1994 set_bit(block, map_storep);
1996 lba += granularity - rem;
2000 static void unmap_region(sector_t lba, unsigned int len)
2002 unsigned int granularity, alignment;
2003 sector_t end = lba + len;
2005 granularity = scsi_debug_unmap_granularity;
2006 alignment = granularity - scsi_debug_unmap_alignment;
2008 while (lba < end) {
2009 sector_t block, rem;
2011 block = lba + alignment;
2012 rem = do_div(block, granularity);
2014 if (rem == 0 && lba + granularity <= end)
2015 clear_bit(block, map_storep);
2017 lba += granularity - rem;
2021 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2022 unsigned int num, struct sdebug_dev_info *devip,
2023 u32 ei_lba)
2025 unsigned long iflags;
2026 int ret;
2028 ret = check_device_access_params(devip, lba, num);
2029 if (ret)
2030 return ret;
2032 /* DIX + T10 DIF */
2033 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2034 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2036 if (prot_ret) {
2037 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2038 return illegal_condition_result;
2042 write_lock_irqsave(&atomic_rw, iflags);
2043 ret = do_device_access(SCpnt, devip, lba, num, 1);
2044 if (scsi_debug_unmap_granularity)
2045 map_region(lba, num);
2046 write_unlock_irqrestore(&atomic_rw, iflags);
2047 if (-1 == ret)
2048 return (DID_ERROR << 16);
2049 else if ((ret < (num * scsi_debug_sector_size)) &&
2050 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2051 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2052 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2054 return 0;
2057 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2058 unsigned int num, struct sdebug_dev_info *devip,
2059 u32 ei_lba, unsigned int unmap)
2061 unsigned long iflags;
2062 unsigned long long i;
2063 int ret;
2065 ret = check_device_access_params(devip, lba, num);
2066 if (ret)
2067 return ret;
2069 write_lock_irqsave(&atomic_rw, iflags);
2071 if (unmap && scsi_debug_unmap_granularity) {
2072 unmap_region(lba, num);
2073 goto out;
2076 /* Else fetch one logical block */
2077 ret = fetch_to_dev_buffer(scmd,
2078 fake_storep + (lba * scsi_debug_sector_size),
2079 scsi_debug_sector_size);
2081 if (-1 == ret) {
2082 write_unlock_irqrestore(&atomic_rw, iflags);
2083 return (DID_ERROR << 16);
2084 } else if ((ret < (num * scsi_debug_sector_size)) &&
2085 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2086 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2087 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2089 /* Copy first sector to remaining blocks */
2090 for (i = 1 ; i < num ; i++)
2091 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2092 fake_storep + (lba * scsi_debug_sector_size),
2093 scsi_debug_sector_size);
2095 if (scsi_debug_unmap_granularity)
2096 map_region(lba, num);
2097 out:
2098 write_unlock_irqrestore(&atomic_rw, iflags);
2100 return 0;
2103 struct unmap_block_desc {
2104 __be64 lba;
2105 __be32 blocks;
2106 __be32 __reserved;
2109 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2111 unsigned char *buf;
2112 struct unmap_block_desc *desc;
2113 unsigned int i, payload_len, descriptors;
2114 int ret;
2116 ret = check_readiness(scmd, 1, devip);
2117 if (ret)
2118 return ret;
2120 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2121 BUG_ON(scsi_bufflen(scmd) != payload_len);
2123 descriptors = (payload_len - 8) / 16;
2125 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2126 if (!buf)
2127 return check_condition_result;
2129 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2131 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2132 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2134 desc = (void *)&buf[8];
2136 for (i = 0 ; i < descriptors ; i++) {
2137 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2138 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2140 ret = check_device_access_params(devip, lba, num);
2141 if (ret)
2142 goto out;
2144 unmap_region(lba, num);
2147 ret = 0;
2149 out:
2150 kfree(buf);
2152 return ret;
2155 #define SDEBUG_GET_LBA_STATUS_LEN 32
2157 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2158 struct sdebug_dev_info * devip)
2160 unsigned long long lba;
2161 unsigned int alloc_len, mapped, num;
2162 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2163 int ret;
2165 ret = check_readiness(scmd, 1, devip);
2166 if (ret)
2167 return ret;
2169 lba = get_unaligned_be64(&scmd->cmnd[2]);
2170 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2172 if (alloc_len < 24)
2173 return 0;
2175 ret = check_device_access_params(devip, lba, 1);
2176 if (ret)
2177 return ret;
2179 mapped = map_state(lba, &num);
2181 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2182 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2183 put_unaligned_be64(lba, &arr[8]); /* LBA */
2184 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2185 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2187 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2190 #define SDEBUG_RLUN_ARR_SZ 256
2192 static int resp_report_luns(struct scsi_cmnd * scp,
2193 struct sdebug_dev_info * devip)
2195 unsigned int alloc_len;
2196 int lun_cnt, i, upper, num, n, wlun, lun;
2197 unsigned char *cmd = (unsigned char *)scp->cmnd;
2198 int select_report = (int)cmd[2];
2199 struct scsi_lun *one_lun;
2200 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2201 unsigned char * max_addr;
2203 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2204 if ((alloc_len < 4) || (select_report > 2)) {
2205 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2207 return check_condition_result;
2209 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2210 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2211 lun_cnt = scsi_debug_max_luns;
2212 if (1 == select_report)
2213 lun_cnt = 0;
2214 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2215 --lun_cnt;
2216 wlun = (select_report > 0) ? 1 : 0;
2217 num = lun_cnt + wlun;
2218 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2219 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2220 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2221 sizeof(struct scsi_lun)), num);
2222 if (n < num) {
2223 wlun = 0;
2224 lun_cnt = n;
2226 one_lun = (struct scsi_lun *) &arr[8];
2227 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2228 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2229 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2230 i++, lun++) {
2231 upper = (lun >> 8) & 0x3f;
2232 if (upper)
2233 one_lun[i].scsi_lun[0] =
2234 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2235 one_lun[i].scsi_lun[1] = lun & 0xff;
2237 if (wlun) {
2238 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2239 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2240 i++;
2242 alloc_len = (unsigned char *)(one_lun + i) - arr;
2243 return fill_from_dev_buffer(scp, arr,
2244 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2247 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2248 unsigned int num, struct sdebug_dev_info *devip)
2250 int i, j, ret = -1;
2251 unsigned char *kaddr, *buf;
2252 unsigned int offset;
2253 struct scatterlist *sg;
2254 struct scsi_data_buffer *sdb = scsi_in(scp);
2256 /* better not to use temporary buffer. */
2257 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2258 if (!buf)
2259 return ret;
2261 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2263 offset = 0;
2264 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2265 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2266 if (!kaddr)
2267 goto out;
2269 for (j = 0; j < sg->length; j++)
2270 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2272 offset += sg->length;
2273 kunmap_atomic(kaddr, KM_USER0);
2275 ret = 0;
2276 out:
2277 kfree(buf);
2279 return ret;
2282 /* When timer goes off this function is called. */
2283 static void timer_intr_handler(unsigned long indx)
2285 struct sdebug_queued_cmd * sqcp;
2286 unsigned long iflags;
2288 if (indx >= scsi_debug_max_queue) {
2289 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2290 "large\n");
2291 return;
2293 spin_lock_irqsave(&queued_arr_lock, iflags);
2294 sqcp = &queued_arr[(int)indx];
2295 if (! sqcp->in_use) {
2296 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2297 "interrupt\n");
2298 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2299 return;
2301 sqcp->in_use = 0;
2302 if (sqcp->done_funct) {
2303 sqcp->a_cmnd->result = sqcp->scsi_result;
2304 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2306 sqcp->done_funct = NULL;
2307 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2311 static struct sdebug_dev_info *
2312 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2314 struct sdebug_dev_info *devip;
2316 devip = kzalloc(sizeof(*devip), flags);
2317 if (devip) {
2318 devip->sdbg_host = sdbg_host;
2319 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2321 return devip;
2324 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2326 struct sdebug_host_info * sdbg_host;
2327 struct sdebug_dev_info * open_devip = NULL;
2328 struct sdebug_dev_info * devip =
2329 (struct sdebug_dev_info *)sdev->hostdata;
2331 if (devip)
2332 return devip;
2333 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2334 if (!sdbg_host) {
2335 printk(KERN_ERR "Host info NULL\n");
2336 return NULL;
2338 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2339 if ((devip->used) && (devip->channel == sdev->channel) &&
2340 (devip->target == sdev->id) &&
2341 (devip->lun == sdev->lun))
2342 return devip;
2343 else {
2344 if ((!devip->used) && (!open_devip))
2345 open_devip = devip;
2348 if (!open_devip) { /* try and make a new one */
2349 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2350 if (!open_devip) {
2351 printk(KERN_ERR "%s: out of memory at line %d\n",
2352 __func__, __LINE__);
2353 return NULL;
2357 open_devip->channel = sdev->channel;
2358 open_devip->target = sdev->id;
2359 open_devip->lun = sdev->lun;
2360 open_devip->sdbg_host = sdbg_host;
2361 open_devip->reset = 1;
2362 open_devip->used = 1;
2363 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2364 if (scsi_debug_dsense)
2365 open_devip->sense_buff[0] = 0x72;
2366 else {
2367 open_devip->sense_buff[0] = 0x70;
2368 open_devip->sense_buff[7] = 0xa;
2370 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2371 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2373 return open_devip;
2376 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2378 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2379 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2380 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2381 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2382 return 0;
2385 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2387 struct sdebug_dev_info *devip;
2389 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2390 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2391 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2392 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2393 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2394 devip = devInfoReg(sdp);
2395 if (NULL == devip)
2396 return 1; /* no resources, will be marked offline */
2397 sdp->hostdata = devip;
2398 if (sdp->host->cmd_per_lun)
2399 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2400 sdp->host->cmd_per_lun);
2401 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2402 if (scsi_debug_no_uld)
2403 sdp->no_uld_attach = 1;
2404 return 0;
2407 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2409 struct sdebug_dev_info *devip =
2410 (struct sdebug_dev_info *)sdp->hostdata;
2412 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2413 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2414 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2415 if (devip) {
2416 /* make this slot avaliable for re-use */
2417 devip->used = 0;
2418 sdp->hostdata = NULL;
2422 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2423 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2425 unsigned long iflags;
2426 int k;
2427 struct sdebug_queued_cmd *sqcp;
2429 spin_lock_irqsave(&queued_arr_lock, iflags);
2430 for (k = 0; k < scsi_debug_max_queue; ++k) {
2431 sqcp = &queued_arr[k];
2432 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2433 del_timer_sync(&sqcp->cmnd_timer);
2434 sqcp->in_use = 0;
2435 sqcp->a_cmnd = NULL;
2436 break;
2439 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2440 return (k < scsi_debug_max_queue) ? 1 : 0;
2443 /* Deletes (stops) timers of all queued commands */
2444 static void stop_all_queued(void)
2446 unsigned long iflags;
2447 int k;
2448 struct sdebug_queued_cmd *sqcp;
2450 spin_lock_irqsave(&queued_arr_lock, iflags);
2451 for (k = 0; k < scsi_debug_max_queue; ++k) {
2452 sqcp = &queued_arr[k];
2453 if (sqcp->in_use && sqcp->a_cmnd) {
2454 del_timer_sync(&sqcp->cmnd_timer);
2455 sqcp->in_use = 0;
2456 sqcp->a_cmnd = NULL;
2459 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2462 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2464 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2465 printk(KERN_INFO "scsi_debug: abort\n");
2466 ++num_aborts;
2467 stop_queued_cmnd(SCpnt);
2468 return SUCCESS;
2471 static int scsi_debug_biosparam(struct scsi_device *sdev,
2472 struct block_device * bdev, sector_t capacity, int *info)
2474 int res;
2475 unsigned char *buf;
2477 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2478 printk(KERN_INFO "scsi_debug: biosparam\n");
2479 buf = scsi_bios_ptable(bdev);
2480 if (buf) {
2481 res = scsi_partsize(buf, capacity,
2482 &info[2], &info[0], &info[1]);
2483 kfree(buf);
2484 if (! res)
2485 return res;
2487 info[0] = sdebug_heads;
2488 info[1] = sdebug_sectors_per;
2489 info[2] = sdebug_cylinders_per;
2490 return 0;
2493 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2495 struct sdebug_dev_info * devip;
2497 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2498 printk(KERN_INFO "scsi_debug: device_reset\n");
2499 ++num_dev_resets;
2500 if (SCpnt) {
2501 devip = devInfoReg(SCpnt->device);
2502 if (devip)
2503 devip->reset = 1;
2505 return SUCCESS;
2508 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2510 struct sdebug_host_info *sdbg_host;
2511 struct sdebug_dev_info * dev_info;
2512 struct scsi_device * sdp;
2513 struct Scsi_Host * hp;
2515 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2516 printk(KERN_INFO "scsi_debug: bus_reset\n");
2517 ++num_bus_resets;
2518 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2519 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2520 if (sdbg_host) {
2521 list_for_each_entry(dev_info,
2522 &sdbg_host->dev_info_list,
2523 dev_list)
2524 dev_info->reset = 1;
2527 return SUCCESS;
2530 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2532 struct sdebug_host_info * sdbg_host;
2533 struct sdebug_dev_info * dev_info;
2535 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2536 printk(KERN_INFO "scsi_debug: host_reset\n");
2537 ++num_host_resets;
2538 spin_lock(&sdebug_host_list_lock);
2539 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2540 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2541 dev_list)
2542 dev_info->reset = 1;
2544 spin_unlock(&sdebug_host_list_lock);
2545 stop_all_queued();
2546 return SUCCESS;
2549 /* Initializes timers in queued array */
2550 static void __init init_all_queued(void)
2552 unsigned long iflags;
2553 int k;
2554 struct sdebug_queued_cmd * sqcp;
2556 spin_lock_irqsave(&queued_arr_lock, iflags);
2557 for (k = 0; k < scsi_debug_max_queue; ++k) {
2558 sqcp = &queued_arr[k];
2559 init_timer(&sqcp->cmnd_timer);
2560 sqcp->in_use = 0;
2561 sqcp->a_cmnd = NULL;
2563 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2566 static void __init sdebug_build_parts(unsigned char *ramp,
2567 unsigned long store_size)
2569 struct partition * pp;
2570 int starts[SDEBUG_MAX_PARTS + 2];
2571 int sectors_per_part, num_sectors, k;
2572 int heads_by_sects, start_sec, end_sec;
2574 /* assume partition table already zeroed */
2575 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2576 return;
2577 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2578 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2579 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2580 "partitions to %d\n", SDEBUG_MAX_PARTS);
2582 num_sectors = (int)sdebug_store_sectors;
2583 sectors_per_part = (num_sectors - sdebug_sectors_per)
2584 / scsi_debug_num_parts;
2585 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2586 starts[0] = sdebug_sectors_per;
2587 for (k = 1; k < scsi_debug_num_parts; ++k)
2588 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2589 * heads_by_sects;
2590 starts[scsi_debug_num_parts] = num_sectors;
2591 starts[scsi_debug_num_parts + 1] = 0;
2593 ramp[510] = 0x55; /* magic partition markings */
2594 ramp[511] = 0xAA;
2595 pp = (struct partition *)(ramp + 0x1be);
2596 for (k = 0; starts[k + 1]; ++k, ++pp) {
2597 start_sec = starts[k];
2598 end_sec = starts[k + 1] - 1;
2599 pp->boot_ind = 0;
2601 pp->cyl = start_sec / heads_by_sects;
2602 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2603 / sdebug_sectors_per;
2604 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2606 pp->end_cyl = end_sec / heads_by_sects;
2607 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2608 / sdebug_sectors_per;
2609 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2611 pp->start_sect = start_sec;
2612 pp->nr_sects = end_sec - start_sec + 1;
2613 pp->sys_ind = 0x83; /* plain Linux partition */
2617 static int schedule_resp(struct scsi_cmnd * cmnd,
2618 struct sdebug_dev_info * devip,
2619 done_funct_t done, int scsi_result, int delta_jiff)
2621 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2622 if (scsi_result) {
2623 struct scsi_device * sdp = cmnd->device;
2625 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2626 "non-zero result=0x%x\n", sdp->host->host_no,
2627 sdp->channel, sdp->id, sdp->lun, scsi_result);
2630 if (cmnd && devip) {
2631 /* simulate autosense by this driver */
2632 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2633 memcpy(cmnd->sense_buffer, devip->sense_buff,
2634 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2635 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2637 if (delta_jiff <= 0) {
2638 if (cmnd)
2639 cmnd->result = scsi_result;
2640 if (done)
2641 done(cmnd);
2642 return 0;
2643 } else {
2644 unsigned long iflags;
2645 int k;
2646 struct sdebug_queued_cmd * sqcp = NULL;
2648 spin_lock_irqsave(&queued_arr_lock, iflags);
2649 for (k = 0; k < scsi_debug_max_queue; ++k) {
2650 sqcp = &queued_arr[k];
2651 if (! sqcp->in_use)
2652 break;
2654 if (k >= scsi_debug_max_queue) {
2655 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2656 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2657 return 1; /* report busy to mid level */
2659 sqcp->in_use = 1;
2660 sqcp->a_cmnd = cmnd;
2661 sqcp->scsi_result = scsi_result;
2662 sqcp->done_funct = done;
2663 sqcp->cmnd_timer.function = timer_intr_handler;
2664 sqcp->cmnd_timer.data = k;
2665 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2666 add_timer(&sqcp->cmnd_timer);
2667 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2668 if (cmnd)
2669 cmnd->result = 0;
2670 return 0;
2673 /* Note: The following macros create attribute files in the
2674 /sys/module/scsi_debug/parameters directory. Unfortunately this
2675 driver is unaware of a change and cannot trigger auxiliary actions
2676 as it can when the corresponding attribute in the
2677 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2679 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2680 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2681 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2682 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2683 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2684 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2685 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2686 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2687 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2688 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2689 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2690 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2691 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2692 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2693 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2694 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2695 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2696 S_IRUGO | S_IWUSR);
2697 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2698 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2699 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2700 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2701 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2702 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2703 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2704 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2705 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2706 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2707 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2708 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2710 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2711 MODULE_DESCRIPTION("SCSI debug adapter driver");
2712 MODULE_LICENSE("GPL");
2713 MODULE_VERSION(SCSI_DEBUG_VERSION);
2715 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2716 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2717 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2718 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2719 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2720 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2721 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2722 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2723 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2724 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2725 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2726 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2727 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2728 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2729 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2730 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2731 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2732 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2733 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2734 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2735 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2736 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2737 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2738 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2739 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2740 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)");
2741 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)");
2742 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)");
2743 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2745 static char sdebug_info[256];
2747 static const char * scsi_debug_info(struct Scsi_Host * shp)
2749 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2750 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2751 scsi_debug_version_date, scsi_debug_dev_size_mb,
2752 scsi_debug_opts);
2753 return sdebug_info;
2756 /* scsi_debug_proc_info
2757 * Used if the driver currently has no own support for /proc/scsi
2759 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2760 int length, int inout)
2762 int len, pos, begin;
2763 int orig_length;
2765 orig_length = length;
2767 if (inout == 1) {
2768 char arr[16];
2769 int minLen = length > 15 ? 15 : length;
2771 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2772 return -EACCES;
2773 memcpy(arr, buffer, minLen);
2774 arr[minLen] = '\0';
2775 if (1 != sscanf(arr, "%d", &pos))
2776 return -EINVAL;
2777 scsi_debug_opts = pos;
2778 if (scsi_debug_every_nth != 0)
2779 scsi_debug_cmnd_count = 0;
2780 return length;
2782 begin = 0;
2783 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2784 "%s [%s]\n"
2785 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2786 "every_nth=%d(curr:%d)\n"
2787 "delay=%d, max_luns=%d, scsi_level=%d\n"
2788 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2789 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2790 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2791 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2792 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2793 scsi_debug_cmnd_count, scsi_debug_delay,
2794 scsi_debug_max_luns, scsi_debug_scsi_level,
2795 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2796 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2797 num_host_resets, dix_reads, dix_writes, dif_errors);
2798 if (pos < offset) {
2799 len = 0;
2800 begin = pos;
2802 *start = buffer + (offset - begin); /* Start of wanted data */
2803 len -= (offset - begin);
2804 if (len > length)
2805 len = length;
2806 return len;
2809 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2811 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2814 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2815 const char * buf, size_t count)
2817 int delay;
2818 char work[20];
2820 if (1 == sscanf(buf, "%10s", work)) {
2821 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2822 scsi_debug_delay = delay;
2823 return count;
2826 return -EINVAL;
2828 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2829 sdebug_delay_store);
2831 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2833 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2836 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2837 const char * buf, size_t count)
2839 int opts;
2840 char work[20];
2842 if (1 == sscanf(buf, "%10s", work)) {
2843 if (0 == strnicmp(work,"0x", 2)) {
2844 if (1 == sscanf(&work[2], "%x", &opts))
2845 goto opts_done;
2846 } else {
2847 if (1 == sscanf(work, "%d", &opts))
2848 goto opts_done;
2851 return -EINVAL;
2852 opts_done:
2853 scsi_debug_opts = opts;
2854 scsi_debug_cmnd_count = 0;
2855 return count;
2857 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2858 sdebug_opts_store);
2860 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2862 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2864 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2865 const char * buf, size_t count)
2867 int n;
2869 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2870 scsi_debug_ptype = n;
2871 return count;
2873 return -EINVAL;
2875 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2877 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2879 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2881 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2882 const char * buf, size_t count)
2884 int n;
2886 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2887 scsi_debug_dsense = n;
2888 return count;
2890 return -EINVAL;
2892 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2893 sdebug_dsense_store);
2895 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2897 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2899 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2900 const char * buf, size_t count)
2902 int n;
2904 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2905 scsi_debug_fake_rw = n;
2906 return count;
2908 return -EINVAL;
2910 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2911 sdebug_fake_rw_store);
2913 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2915 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2917 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2918 const char * buf, size_t count)
2920 int n;
2922 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2923 scsi_debug_no_lun_0 = n;
2924 return count;
2926 return -EINVAL;
2928 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2929 sdebug_no_lun_0_store);
2931 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2933 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2935 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2936 const char * buf, size_t count)
2938 int n;
2940 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2941 scsi_debug_num_tgts = n;
2942 sdebug_max_tgts_luns();
2943 return count;
2945 return -EINVAL;
2947 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2948 sdebug_num_tgts_store);
2950 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
2952 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
2954 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
2956 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
2958 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
2960 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
2962 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
2964 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
2966 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
2967 const char * buf, size_t count)
2969 int nth;
2971 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
2972 scsi_debug_every_nth = nth;
2973 scsi_debug_cmnd_count = 0;
2974 return count;
2976 return -EINVAL;
2978 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
2979 sdebug_every_nth_store);
2981 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
2983 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
2985 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
2986 const char * buf, size_t count)
2988 int n;
2990 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2991 scsi_debug_max_luns = n;
2992 sdebug_max_tgts_luns();
2993 return count;
2995 return -EINVAL;
2997 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
2998 sdebug_max_luns_store);
3000 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3002 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3004 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3005 const char * buf, size_t count)
3007 int n;
3009 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3010 (n <= SCSI_DEBUG_CANQUEUE)) {
3011 scsi_debug_max_queue = n;
3012 return count;
3014 return -EINVAL;
3016 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3017 sdebug_max_queue_store);
3019 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3021 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3023 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3025 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3027 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3029 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3031 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3033 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3035 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3036 const char * buf, size_t count)
3038 int n;
3040 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3041 scsi_debug_virtual_gb = n;
3043 sdebug_capacity = get_sdebug_capacity();
3045 return count;
3047 return -EINVAL;
3049 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3050 sdebug_virtual_gb_store);
3052 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3054 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3057 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3058 const char * buf, size_t count)
3060 int delta_hosts;
3062 if (sscanf(buf, "%d", &delta_hosts) != 1)
3063 return -EINVAL;
3064 if (delta_hosts > 0) {
3065 do {
3066 sdebug_add_adapter();
3067 } while (--delta_hosts);
3068 } else if (delta_hosts < 0) {
3069 do {
3070 sdebug_remove_adapter();
3071 } while (++delta_hosts);
3073 return count;
3075 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3076 sdebug_add_host_store);
3078 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3079 char * buf)
3081 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3083 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3084 const char * buf, size_t count)
3086 int n;
3088 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3089 scsi_debug_vpd_use_hostno = n;
3090 return count;
3092 return -EINVAL;
3094 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3095 sdebug_vpd_use_hostno_store);
3097 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3099 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3101 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3103 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3105 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3107 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3109 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3111 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3113 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3115 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3117 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3119 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3121 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3123 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3125 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3127 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3129 ssize_t count;
3131 if (scsi_debug_unmap_granularity == 0)
3132 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3133 sdebug_store_sectors);
3135 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3137 buf[count++] = '\n';
3138 buf[count++] = 0;
3140 return count;
3142 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3145 /* Note: The following function creates attribute files in the
3146 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3147 files (over those found in the /sys/module/scsi_debug/parameters
3148 directory) is that auxiliary actions can be triggered when an attribute
3149 is changed. For example see: sdebug_add_host_store() above.
3151 static int do_create_driverfs_files(void)
3153 int ret;
3155 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3156 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3157 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3158 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3159 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3160 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3161 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3162 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3163 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3164 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3165 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3166 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3167 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3168 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3169 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3170 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3171 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3172 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3173 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3174 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3175 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3176 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3177 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3178 return ret;
3181 static void do_remove_driverfs_files(void)
3183 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3184 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3185 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3186 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3187 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3188 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3189 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3190 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3191 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3192 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3193 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3194 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3195 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3196 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3197 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3198 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3199 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3200 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3201 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3202 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3203 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3204 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3205 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3208 static void pseudo_0_release(struct device *dev)
3210 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3211 printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
3214 static struct device pseudo_primary = {
3215 .init_name = "pseudo_0",
3216 .release = pseudo_0_release,
3219 static int __init scsi_debug_init(void)
3221 unsigned long sz;
3222 int host_to_add;
3223 int k;
3224 int ret;
3226 switch (scsi_debug_sector_size) {
3227 case 512:
3228 case 1024:
3229 case 2048:
3230 case 4096:
3231 break;
3232 default:
3233 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3234 scsi_debug_sector_size);
3235 return -EINVAL;
3238 switch (scsi_debug_dif) {
3240 case SD_DIF_TYPE0_PROTECTION:
3241 case SD_DIF_TYPE1_PROTECTION:
3242 case SD_DIF_TYPE2_PROTECTION:
3243 case SD_DIF_TYPE3_PROTECTION:
3244 break;
3246 default:
3247 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3248 return -EINVAL;
3251 if (scsi_debug_guard > 1) {
3252 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3253 return -EINVAL;
3256 if (scsi_debug_ato > 1) {
3257 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3258 return -EINVAL;
3261 if (scsi_debug_physblk_exp > 15) {
3262 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3263 scsi_debug_physblk_exp);
3264 return -EINVAL;
3267 if (scsi_debug_lowest_aligned > 0x3fff) {
3268 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3269 scsi_debug_lowest_aligned);
3270 return -EINVAL;
3273 if (scsi_debug_dev_size_mb < 1)
3274 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3275 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3276 sdebug_store_sectors = sz / scsi_debug_sector_size;
3277 sdebug_capacity = get_sdebug_capacity();
3279 /* play around with geometry, don't waste too much on track 0 */
3280 sdebug_heads = 8;
3281 sdebug_sectors_per = 32;
3282 if (scsi_debug_dev_size_mb >= 16)
3283 sdebug_heads = 32;
3284 else if (scsi_debug_dev_size_mb >= 256)
3285 sdebug_heads = 64;
3286 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3287 (sdebug_sectors_per * sdebug_heads);
3288 if (sdebug_cylinders_per >= 1024) {
3289 /* other LLDs do this; implies >= 1GB ram disk ... */
3290 sdebug_heads = 255;
3291 sdebug_sectors_per = 63;
3292 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3293 (sdebug_sectors_per * sdebug_heads);
3296 fake_storep = vmalloc(sz);
3297 if (NULL == fake_storep) {
3298 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3299 return -ENOMEM;
3301 memset(fake_storep, 0, sz);
3302 if (scsi_debug_num_parts > 0)
3303 sdebug_build_parts(fake_storep, sz);
3305 if (scsi_debug_dif) {
3306 int dif_size;
3308 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3309 dif_storep = vmalloc(dif_size);
3311 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3312 dif_size, dif_storep);
3314 if (dif_storep == NULL) {
3315 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3316 ret = -ENOMEM;
3317 goto free_vm;
3320 memset(dif_storep, 0xff, dif_size);
3323 if (scsi_debug_unmap_granularity) {
3324 unsigned int map_bytes;
3326 if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3327 printk(KERN_ERR
3328 "%s: ERR: unmap_granularity < unmap_alignment\n",
3329 __func__);
3330 return -EINVAL;
3333 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3334 map_bytes = map_size >> 3;
3335 map_storep = vmalloc(map_bytes);
3337 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3338 map_size);
3340 if (map_storep == NULL) {
3341 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3342 ret = -ENOMEM;
3343 goto free_vm;
3346 memset(map_storep, 0x0, map_bytes);
3348 /* Map first 1KB for partition table */
3349 if (scsi_debug_num_parts)
3350 map_region(0, 2);
3353 ret = device_register(&pseudo_primary);
3354 if (ret < 0) {
3355 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
3356 ret);
3357 goto free_vm;
3359 ret = bus_register(&pseudo_lld_bus);
3360 if (ret < 0) {
3361 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3362 ret);
3363 goto dev_unreg;
3365 ret = driver_register(&sdebug_driverfs_driver);
3366 if (ret < 0) {
3367 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3368 ret);
3369 goto bus_unreg;
3371 ret = do_create_driverfs_files();
3372 if (ret < 0) {
3373 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3374 ret);
3375 goto del_files;
3378 init_all_queued();
3380 host_to_add = scsi_debug_add_host;
3381 scsi_debug_add_host = 0;
3383 for (k = 0; k < host_to_add; k++) {
3384 if (sdebug_add_adapter()) {
3385 printk(KERN_ERR "scsi_debug_init: "
3386 "sdebug_add_adapter failed k=%d\n", k);
3387 break;
3391 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3392 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3393 scsi_debug_add_host);
3395 return 0;
3397 del_files:
3398 do_remove_driverfs_files();
3399 driver_unregister(&sdebug_driverfs_driver);
3400 bus_unreg:
3401 bus_unregister(&pseudo_lld_bus);
3402 dev_unreg:
3403 device_unregister(&pseudo_primary);
3404 free_vm:
3405 if (map_storep)
3406 vfree(map_storep);
3407 if (dif_storep)
3408 vfree(dif_storep);
3409 vfree(fake_storep);
3411 return ret;
3414 static void __exit scsi_debug_exit(void)
3416 int k = scsi_debug_add_host;
3418 stop_all_queued();
3419 for (; k; k--)
3420 sdebug_remove_adapter();
3421 do_remove_driverfs_files();
3422 driver_unregister(&sdebug_driverfs_driver);
3423 bus_unregister(&pseudo_lld_bus);
3424 device_unregister(&pseudo_primary);
3426 if (dif_storep)
3427 vfree(dif_storep);
3429 vfree(fake_storep);
3432 device_initcall(scsi_debug_init);
3433 module_exit(scsi_debug_exit);
3435 static void sdebug_release_adapter(struct device * dev)
3437 struct sdebug_host_info *sdbg_host;
3439 sdbg_host = to_sdebug_host(dev);
3440 kfree(sdbg_host);
3443 static int sdebug_add_adapter(void)
3445 int k, devs_per_host;
3446 int error = 0;
3447 struct sdebug_host_info *sdbg_host;
3448 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3450 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3451 if (NULL == sdbg_host) {
3452 printk(KERN_ERR "%s: out of memory at line %d\n",
3453 __func__, __LINE__);
3454 return -ENOMEM;
3457 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3459 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3460 for (k = 0; k < devs_per_host; k++) {
3461 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3462 if (!sdbg_devinfo) {
3463 printk(KERN_ERR "%s: out of memory at line %d\n",
3464 __func__, __LINE__);
3465 error = -ENOMEM;
3466 goto clean;
3470 spin_lock(&sdebug_host_list_lock);
3471 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3472 spin_unlock(&sdebug_host_list_lock);
3474 sdbg_host->dev.bus = &pseudo_lld_bus;
3475 sdbg_host->dev.parent = &pseudo_primary;
3476 sdbg_host->dev.release = &sdebug_release_adapter;
3477 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3479 error = device_register(&sdbg_host->dev);
3481 if (error)
3482 goto clean;
3484 ++scsi_debug_add_host;
3485 return error;
3487 clean:
3488 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3489 dev_list) {
3490 list_del(&sdbg_devinfo->dev_list);
3491 kfree(sdbg_devinfo);
3494 kfree(sdbg_host);
3495 return error;
3498 static void sdebug_remove_adapter(void)
3500 struct sdebug_host_info * sdbg_host = NULL;
3502 spin_lock(&sdebug_host_list_lock);
3503 if (!list_empty(&sdebug_host_list)) {
3504 sdbg_host = list_entry(sdebug_host_list.prev,
3505 struct sdebug_host_info, host_list);
3506 list_del(&sdbg_host->host_list);
3508 spin_unlock(&sdebug_host_list_lock);
3510 if (!sdbg_host)
3511 return;
3513 device_unregister(&sdbg_host->dev);
3514 --scsi_debug_add_host;
3517 static
3518 int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3520 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3521 int len, k;
3522 unsigned int num;
3523 unsigned long long lba;
3524 u32 ei_lba;
3525 int errsts = 0;
3526 int target = SCpnt->device->id;
3527 struct sdebug_dev_info *devip = NULL;
3528 int inj_recovered = 0;
3529 int inj_transport = 0;
3530 int inj_dif = 0;
3531 int inj_dix = 0;
3532 int delay_override = 0;
3533 int unmap = 0;
3535 scsi_set_resid(SCpnt, 0);
3536 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3537 printk(KERN_INFO "scsi_debug: cmd ");
3538 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3539 printk("%02x ", (int)cmd[k]);
3540 printk("\n");
3543 if (target == SCpnt->device->host->hostt->this_id) {
3544 printk(KERN_INFO "scsi_debug: initiator's id used as "
3545 "target!\n");
3546 return schedule_resp(SCpnt, NULL, done,
3547 DID_NO_CONNECT << 16, 0);
3550 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3551 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3552 return schedule_resp(SCpnt, NULL, done,
3553 DID_NO_CONNECT << 16, 0);
3554 devip = devInfoReg(SCpnt->device);
3555 if (NULL == devip)
3556 return schedule_resp(SCpnt, NULL, done,
3557 DID_NO_CONNECT << 16, 0);
3559 if ((scsi_debug_every_nth != 0) &&
3560 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3561 scsi_debug_cmnd_count = 0;
3562 if (scsi_debug_every_nth < -1)
3563 scsi_debug_every_nth = -1;
3564 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3565 return 0; /* ignore command causing timeout */
3566 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3567 inj_recovered = 1; /* to reads and writes below */
3568 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3569 inj_transport = 1; /* to reads and writes below */
3570 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3571 inj_dif = 1; /* to reads and writes below */
3572 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3573 inj_dix = 1; /* to reads and writes below */
3576 if (devip->wlun) {
3577 switch (*cmd) {
3578 case INQUIRY:
3579 case REQUEST_SENSE:
3580 case TEST_UNIT_READY:
3581 case REPORT_LUNS:
3582 break; /* only allowable wlun commands */
3583 default:
3584 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3585 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3586 "not supported for wlun\n", *cmd);
3587 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3588 INVALID_OPCODE, 0);
3589 errsts = check_condition_result;
3590 return schedule_resp(SCpnt, devip, done, errsts,
3595 switch (*cmd) {
3596 case INQUIRY: /* mandatory, ignore unit attention */
3597 delay_override = 1;
3598 errsts = resp_inquiry(SCpnt, target, devip);
3599 break;
3600 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3601 delay_override = 1;
3602 errsts = resp_requests(SCpnt, devip);
3603 break;
3604 case REZERO_UNIT: /* actually this is REWIND for SSC */
3605 case START_STOP:
3606 errsts = resp_start_stop(SCpnt, devip);
3607 break;
3608 case ALLOW_MEDIUM_REMOVAL:
3609 errsts = check_readiness(SCpnt, 1, devip);
3610 if (errsts)
3611 break;
3612 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3613 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3614 cmd[4] ? "inhibited" : "enabled");
3615 break;
3616 case SEND_DIAGNOSTIC: /* mandatory */
3617 errsts = check_readiness(SCpnt, 1, devip);
3618 break;
3619 case TEST_UNIT_READY: /* mandatory */
3620 delay_override = 1;
3621 errsts = check_readiness(SCpnt, 0, devip);
3622 break;
3623 case RESERVE:
3624 errsts = check_readiness(SCpnt, 1, devip);
3625 break;
3626 case RESERVE_10:
3627 errsts = check_readiness(SCpnt, 1, devip);
3628 break;
3629 case RELEASE:
3630 errsts = check_readiness(SCpnt, 1, devip);
3631 break;
3632 case RELEASE_10:
3633 errsts = check_readiness(SCpnt, 1, devip);
3634 break;
3635 case READ_CAPACITY:
3636 errsts = resp_readcap(SCpnt, devip);
3637 break;
3638 case SERVICE_ACTION_IN:
3639 if (cmd[1] == SAI_READ_CAPACITY_16)
3640 errsts = resp_readcap16(SCpnt, devip);
3641 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3643 if (scsi_debug_unmap_max_desc == 0) {
3644 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3645 INVALID_COMMAND_OPCODE, 0);
3646 errsts = check_condition_result;
3647 } else
3648 errsts = resp_get_lba_status(SCpnt, devip);
3649 } else {
3650 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3651 INVALID_OPCODE, 0);
3652 errsts = check_condition_result;
3654 break;
3655 case MAINTENANCE_IN:
3656 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3657 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3658 INVALID_OPCODE, 0);
3659 errsts = check_condition_result;
3660 break;
3662 errsts = resp_report_tgtpgs(SCpnt, devip);
3663 break;
3664 case READ_16:
3665 case READ_12:
3666 case READ_10:
3667 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3668 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3669 cmd[1] & 0xe0) {
3670 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3671 INVALID_COMMAND_OPCODE, 0);
3672 errsts = check_condition_result;
3673 break;
3676 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3677 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3678 (cmd[1] & 0xe0) == 0)
3679 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3681 /* fall through */
3682 case READ_6:
3683 read:
3684 errsts = check_readiness(SCpnt, 0, devip);
3685 if (errsts)
3686 break;
3687 if (scsi_debug_fake_rw)
3688 break;
3689 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3690 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3691 if (inj_recovered && (0 == errsts)) {
3692 mk_sense_buffer(devip, RECOVERED_ERROR,
3693 THRESHOLD_EXCEEDED, 0);
3694 errsts = check_condition_result;
3695 } else if (inj_transport && (0 == errsts)) {
3696 mk_sense_buffer(devip, ABORTED_COMMAND,
3697 TRANSPORT_PROBLEM, ACK_NAK_TO);
3698 errsts = check_condition_result;
3699 } else if (inj_dif && (0 == errsts)) {
3700 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3701 errsts = illegal_condition_result;
3702 } else if (inj_dix && (0 == errsts)) {
3703 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3704 errsts = illegal_condition_result;
3706 break;
3707 case REPORT_LUNS: /* mandatory, ignore unit attention */
3708 delay_override = 1;
3709 errsts = resp_report_luns(SCpnt, devip);
3710 break;
3711 case VERIFY: /* 10 byte SBC-2 command */
3712 errsts = check_readiness(SCpnt, 0, devip);
3713 break;
3714 case WRITE_16:
3715 case WRITE_12:
3716 case WRITE_10:
3717 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3718 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3719 cmd[1] & 0xe0) {
3720 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3721 INVALID_COMMAND_OPCODE, 0);
3722 errsts = check_condition_result;
3723 break;
3726 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3727 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3728 (cmd[1] & 0xe0) == 0)
3729 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3731 /* fall through */
3732 case WRITE_6:
3733 write:
3734 errsts = check_readiness(SCpnt, 0, devip);
3735 if (errsts)
3736 break;
3737 if (scsi_debug_fake_rw)
3738 break;
3739 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3740 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3741 if (inj_recovered && (0 == errsts)) {
3742 mk_sense_buffer(devip, RECOVERED_ERROR,
3743 THRESHOLD_EXCEEDED, 0);
3744 errsts = check_condition_result;
3745 } else if (inj_dif && (0 == errsts)) {
3746 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3747 errsts = illegal_condition_result;
3748 } else if (inj_dix && (0 == errsts)) {
3749 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3750 errsts = illegal_condition_result;
3752 break;
3753 case WRITE_SAME_16:
3754 if (cmd[1] & 0x8)
3755 unmap = 1;
3756 /* fall through */
3757 case WRITE_SAME:
3758 errsts = check_readiness(SCpnt, 0, devip);
3759 if (errsts)
3760 break;
3761 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3762 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3763 break;
3764 case UNMAP:
3765 errsts = check_readiness(SCpnt, 0, devip);
3766 if (errsts)
3767 break;
3769 if (scsi_debug_unmap_max_desc == 0) {
3770 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3771 INVALID_COMMAND_OPCODE, 0);
3772 errsts = check_condition_result;
3773 } else
3774 errsts = resp_unmap(SCpnt, devip);
3775 break;
3776 case MODE_SENSE:
3777 case MODE_SENSE_10:
3778 errsts = resp_mode_sense(SCpnt, target, devip);
3779 break;
3780 case MODE_SELECT:
3781 errsts = resp_mode_select(SCpnt, 1, devip);
3782 break;
3783 case MODE_SELECT_10:
3784 errsts = resp_mode_select(SCpnt, 0, devip);
3785 break;
3786 case LOG_SENSE:
3787 errsts = resp_log_sense(SCpnt, devip);
3788 break;
3789 case SYNCHRONIZE_CACHE:
3790 delay_override = 1;
3791 errsts = check_readiness(SCpnt, 0, devip);
3792 break;
3793 case WRITE_BUFFER:
3794 errsts = check_readiness(SCpnt, 1, devip);
3795 break;
3796 case XDWRITEREAD_10:
3797 if (!scsi_bidi_cmnd(SCpnt)) {
3798 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3799 INVALID_FIELD_IN_CDB, 0);
3800 errsts = check_condition_result;
3801 break;
3804 errsts = check_readiness(SCpnt, 0, devip);
3805 if (errsts)
3806 break;
3807 if (scsi_debug_fake_rw)
3808 break;
3809 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3810 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3811 if (errsts)
3812 break;
3813 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3814 if (errsts)
3815 break;
3816 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3817 break;
3818 case VARIABLE_LENGTH_CMD:
3819 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3821 if ((cmd[10] & 0xe0) == 0)
3822 printk(KERN_ERR
3823 "Unprotected RD/WR to DIF device\n");
3825 if (cmd[9] == READ_32) {
3826 BUG_ON(SCpnt->cmd_len < 32);
3827 goto read;
3830 if (cmd[9] == WRITE_32) {
3831 BUG_ON(SCpnt->cmd_len < 32);
3832 goto write;
3836 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3837 INVALID_FIELD_IN_CDB, 0);
3838 errsts = check_condition_result;
3839 break;
3841 default:
3842 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3843 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3844 "supported\n", *cmd);
3845 errsts = check_readiness(SCpnt, 1, devip);
3846 if (errsts)
3847 break; /* Unit attention takes precedence */
3848 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3849 errsts = check_condition_result;
3850 break;
3852 return schedule_resp(SCpnt, devip, done, errsts,
3853 (delay_override ? 0 : scsi_debug_delay));
3856 static struct scsi_host_template sdebug_driver_template = {
3857 .proc_info = scsi_debug_proc_info,
3858 .proc_name = sdebug_proc_name,
3859 .name = "SCSI DEBUG",
3860 .info = scsi_debug_info,
3861 .slave_alloc = scsi_debug_slave_alloc,
3862 .slave_configure = scsi_debug_slave_configure,
3863 .slave_destroy = scsi_debug_slave_destroy,
3864 .ioctl = scsi_debug_ioctl,
3865 .queuecommand = scsi_debug_queuecommand,
3866 .eh_abort_handler = scsi_debug_abort,
3867 .eh_bus_reset_handler = scsi_debug_bus_reset,
3868 .eh_device_reset_handler = scsi_debug_device_reset,
3869 .eh_host_reset_handler = scsi_debug_host_reset,
3870 .bios_param = scsi_debug_biosparam,
3871 .can_queue = SCSI_DEBUG_CANQUEUE,
3872 .this_id = 7,
3873 .sg_tablesize = 256,
3874 .cmd_per_lun = 16,
3875 .max_sectors = 0xffff,
3876 .use_clustering = DISABLE_CLUSTERING,
3877 .module = THIS_MODULE,
3880 static int sdebug_driver_probe(struct device * dev)
3882 int error = 0;
3883 struct sdebug_host_info *sdbg_host;
3884 struct Scsi_Host *hpnt;
3885 int host_prot;
3887 sdbg_host = to_sdebug_host(dev);
3889 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3890 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3891 if (NULL == hpnt) {
3892 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3893 error = -ENODEV;
3894 return error;
3897 sdbg_host->shost = hpnt;
3898 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3899 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3900 hpnt->max_id = scsi_debug_num_tgts + 1;
3901 else
3902 hpnt->max_id = scsi_debug_num_tgts;
3903 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3905 host_prot = 0;
3907 switch (scsi_debug_dif) {
3909 case SD_DIF_TYPE1_PROTECTION:
3910 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3911 if (scsi_debug_dix)
3912 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3913 break;
3915 case SD_DIF_TYPE2_PROTECTION:
3916 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3917 if (scsi_debug_dix)
3918 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3919 break;
3921 case SD_DIF_TYPE3_PROTECTION:
3922 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3923 if (scsi_debug_dix)
3924 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3925 break;
3927 default:
3928 if (scsi_debug_dix)
3929 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3930 break;
3933 scsi_host_set_prot(hpnt, host_prot);
3935 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3936 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3937 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3938 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3939 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3940 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3941 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3942 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3944 if (scsi_debug_guard == 1)
3945 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3946 else
3947 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3949 error = scsi_add_host(hpnt, &sdbg_host->dev);
3950 if (error) {
3951 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3952 error = -ENODEV;
3953 scsi_host_put(hpnt);
3954 } else
3955 scsi_scan_host(hpnt);
3958 return error;
3961 static int sdebug_driver_remove(struct device * dev)
3963 struct sdebug_host_info *sdbg_host;
3964 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3966 sdbg_host = to_sdebug_host(dev);
3968 if (!sdbg_host) {
3969 printk(KERN_ERR "%s: Unable to locate host info\n",
3970 __func__);
3971 return -ENODEV;
3974 scsi_remove_host(sdbg_host->shost);
3976 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3977 dev_list) {
3978 list_del(&sdbg_devinfo->dev_list);
3979 kfree(sdbg_devinfo);
3982 scsi_host_put(sdbg_host->shost);
3983 return 0;
3986 static int pseudo_lld_bus_match(struct device *dev,
3987 struct device_driver *dev_driver)
3989 return 1;
3992 static struct bus_type pseudo_lld_bus = {
3993 .name = "pseudo",
3994 .match = pseudo_lld_bus_match,
3995 .probe = sdebug_driver_probe,
3996 .remove = sdebug_driver_remove,