ALSA: hda - Add quirk for Dell Vostro 1220
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / scsi_debug.c
blob3a5bfd10b2cbd4d419b2a84cb57deeed13c6ad32
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
15 * For documentation see http://www.torque.net/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
58 #include "sd.h"
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.81"
62 static const char * scsi_debug_version_date = "20070104";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92 #define DEF_DELAY 1
93 #define DEF_DEV_SIZE_MB 8
94 #define DEF_EVERY_NTH 0
95 #define DEF_NUM_PARTS 0
96 #define DEF_OPTS 0
97 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
98 #define DEF_PTYPE 0
99 #define DEF_D_SENSE 0
100 #define DEF_NO_LUN_0 0
101 #define DEF_VIRTUAL_GB 0
102 #define DEF_FAKE_RW 0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
105 #define DEF_DIX 0
106 #define DEF_DIF 0
107 #define DEF_GUARD 0
108 #define DEF_ATO 1
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_UNMAP_MAX_BLOCKS 0
112 #define DEF_UNMAP_MAX_DESC 0
113 #define DEF_UNMAP_GRANULARITY 0
114 #define DEF_UNMAP_ALIGNMENT 0
116 /* bit mask values for scsi_debug_opts */
117 #define SCSI_DEBUG_OPT_NOISE 1
118 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
119 #define SCSI_DEBUG_OPT_TIMEOUT 4
120 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
121 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
122 #define SCSI_DEBUG_OPT_DIF_ERR 32
123 #define SCSI_DEBUG_OPT_DIX_ERR 64
124 /* When "every_nth" > 0 then modulo "every_nth" commands:
125 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
126 * - a RECOVERED_ERROR is simulated on successful read and write
127 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
128 * - a TRANSPORT_ERROR is simulated on successful read and write
129 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
131 * When "every_nth" < 0 then after "- every_nth" commands:
132 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
133 * - a RECOVERED_ERROR is simulated on successful read and write
134 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
135 * - a TRANSPORT_ERROR is simulated on successful read and write
136 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
137 * This will continue until some other action occurs (e.g. the user
138 * writing a new value (other than -1 or 1) to every_nth via sysfs).
141 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
142 * sector on read commands: */
143 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
145 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
146 * or "peripheral device" addressing (value 0) */
147 #define SAM2_LUN_ADDRESS_METHOD 0
148 #define SAM2_WLUN_REPORT_LUNS 0xc101
150 static int scsi_debug_add_host = DEF_NUM_HOST;
151 static int scsi_debug_delay = DEF_DELAY;
152 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
153 static int scsi_debug_every_nth = DEF_EVERY_NTH;
154 static int scsi_debug_max_luns = DEF_MAX_LUNS;
155 static int scsi_debug_num_parts = DEF_NUM_PARTS;
156 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
157 static int scsi_debug_opts = DEF_OPTS;
158 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
159 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
160 static int scsi_debug_dsense = DEF_D_SENSE;
161 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
162 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
163 static int scsi_debug_fake_rw = DEF_FAKE_RW;
164 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
165 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
166 static int scsi_debug_dix = DEF_DIX;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_guard = DEF_GUARD;
169 static int scsi_debug_ato = DEF_ATO;
170 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
171 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
172 static int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
173 static int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
174 static int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
175 static int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
177 static int scsi_debug_cmnd_count = 0;
179 #define DEV_READONLY(TGT) (0)
180 #define DEV_REMOVEABLE(TGT) (0)
182 static unsigned int sdebug_store_sectors;
183 static sector_t sdebug_capacity; /* in sectors */
185 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
186 may still need them */
187 static int sdebug_heads; /* heads per disk */
188 static int sdebug_cylinders_per; /* cylinders per surface */
189 static int sdebug_sectors_per; /* sectors per cylinder */
191 #define SDEBUG_MAX_PARTS 4
193 #define SDEBUG_SENSE_LEN 32
195 #define SCSI_DEBUG_CANQUEUE 255
196 #define SCSI_DEBUG_MAX_CMD_LEN 32
198 struct sdebug_dev_info {
199 struct list_head dev_list;
200 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
201 unsigned int channel;
202 unsigned int target;
203 unsigned int lun;
204 struct sdebug_host_info *sdbg_host;
205 unsigned int wlun;
206 char reset;
207 char stopped;
208 char used;
211 struct sdebug_host_info {
212 struct list_head host_list;
213 struct Scsi_Host *shost;
214 struct device dev;
215 struct list_head dev_info_list;
218 #define to_sdebug_host(d) \
219 container_of(d, struct sdebug_host_info, dev)
221 static LIST_HEAD(sdebug_host_list);
222 static DEFINE_SPINLOCK(sdebug_host_list_lock);
224 typedef void (* done_funct_t) (struct scsi_cmnd *);
226 struct sdebug_queued_cmd {
227 int in_use;
228 struct timer_list cmnd_timer;
229 done_funct_t done_funct;
230 struct scsi_cmnd * a_cmnd;
231 int scsi_result;
233 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
235 static unsigned char * fake_storep; /* ramdisk storage */
236 static unsigned char *dif_storep; /* protection info */
237 static void *map_storep; /* provisioning map */
239 static unsigned long map_size;
240 static int num_aborts = 0;
241 static int num_dev_resets = 0;
242 static int num_bus_resets = 0;
243 static int num_host_resets = 0;
244 static int dix_writes;
245 static int dix_reads;
246 static int dif_errors;
248 static DEFINE_SPINLOCK(queued_arr_lock);
249 static DEFINE_RWLOCK(atomic_rw);
251 static char sdebug_proc_name[] = "scsi_debug";
253 static struct bus_type pseudo_lld_bus;
255 static inline sector_t dif_offset(sector_t sector)
257 return sector << 3;
260 static struct device_driver sdebug_driverfs_driver = {
261 .name = sdebug_proc_name,
262 .bus = &pseudo_lld_bus,
265 static const int check_condition_result =
266 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
268 static const int illegal_condition_result =
269 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
271 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
272 0, 0, 0x2, 0x4b};
273 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
274 0, 0, 0x0, 0x0};
276 static int sdebug_add_adapter(void);
277 static void sdebug_remove_adapter(void);
279 static void sdebug_max_tgts_luns(void)
281 struct sdebug_host_info *sdbg_host;
282 struct Scsi_Host *hpnt;
284 spin_lock(&sdebug_host_list_lock);
285 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
286 hpnt = sdbg_host->shost;
287 if ((hpnt->this_id >= 0) &&
288 (scsi_debug_num_tgts > hpnt->this_id))
289 hpnt->max_id = scsi_debug_num_tgts + 1;
290 else
291 hpnt->max_id = scsi_debug_num_tgts;
292 /* scsi_debug_max_luns; */
293 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
295 spin_unlock(&sdebug_host_list_lock);
298 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
299 int asc, int asq)
301 unsigned char *sbuff;
303 sbuff = devip->sense_buff;
304 memset(sbuff, 0, SDEBUG_SENSE_LEN);
306 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
308 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
309 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
310 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
313 static void get_data_transfer_info(unsigned char *cmd,
314 unsigned long long *lba, unsigned int *num,
315 u32 *ei_lba)
317 *ei_lba = 0;
319 switch (*cmd) {
320 case VARIABLE_LENGTH_CMD:
321 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
322 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
323 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
324 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
326 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
327 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
329 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
330 (u32)cmd[28] << 24;
331 break;
333 case WRITE_SAME_16:
334 case WRITE_16:
335 case READ_16:
336 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
337 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
338 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
339 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
341 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
342 (u32)cmd[10] << 24;
343 break;
344 case WRITE_12:
345 case READ_12:
346 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
347 (u32)cmd[2] << 24;
349 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
350 (u32)cmd[6] << 24;
351 break;
352 case WRITE_SAME:
353 case WRITE_10:
354 case READ_10:
355 case XDWRITEREAD_10:
356 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
357 (u32)cmd[2] << 24;
359 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
360 break;
361 case WRITE_6:
362 case READ_6:
363 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
364 (u32)(cmd[1] & 0x1f) << 16;
365 *num = (0 == cmd[4]) ? 256 : cmd[4];
366 break;
367 default:
368 break;
372 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
374 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
375 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
377 return -EINVAL;
378 /* return -ENOTTY; // correct return but upsets fdisk */
381 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
382 struct sdebug_dev_info * devip)
384 if (devip->reset) {
385 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
386 printk(KERN_INFO "scsi_debug: Reporting Unit "
387 "attention: power on reset\n");
388 devip->reset = 0;
389 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
390 return check_condition_result;
392 if ((0 == reset_only) && devip->stopped) {
393 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
394 printk(KERN_INFO "scsi_debug: Reporting Not "
395 "ready: initializing command required\n");
396 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
397 0x2);
398 return check_condition_result;
400 return 0;
403 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
404 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
405 int arr_len)
407 int act_len;
408 struct scsi_data_buffer *sdb = scsi_in(scp);
410 if (!sdb->length)
411 return 0;
412 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
413 return (DID_ERROR << 16);
415 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
416 arr, arr_len);
417 if (sdb->resid)
418 sdb->resid -= act_len;
419 else
420 sdb->resid = scsi_bufflen(scp) - act_len;
422 return 0;
425 /* Returns number of bytes fetched into 'arr' or -1 if error. */
426 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
427 int arr_len)
429 if (!scsi_bufflen(scp))
430 return 0;
431 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
432 return -1;
434 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
438 static const char * inq_vendor_id = "Linux ";
439 static const char * inq_product_id = "scsi_debug ";
440 static const char * inq_product_rev = "0004";
442 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
443 int target_dev_id, int dev_id_num,
444 const char * dev_id_str,
445 int dev_id_str_len)
447 int num, port_a;
448 char b[32];
450 port_a = target_dev_id + 1;
451 /* T10 vendor identifier field format (faked) */
452 arr[0] = 0x2; /* ASCII */
453 arr[1] = 0x1;
454 arr[2] = 0x0;
455 memcpy(&arr[4], inq_vendor_id, 8);
456 memcpy(&arr[12], inq_product_id, 16);
457 memcpy(&arr[28], dev_id_str, dev_id_str_len);
458 num = 8 + 16 + dev_id_str_len;
459 arr[3] = num;
460 num += 4;
461 if (dev_id_num >= 0) {
462 /* NAA-5, Logical unit identifier (binary) */
463 arr[num++] = 0x1; /* binary (not necessarily sas) */
464 arr[num++] = 0x3; /* PIV=0, lu, naa */
465 arr[num++] = 0x0;
466 arr[num++] = 0x8;
467 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
468 arr[num++] = 0x33;
469 arr[num++] = 0x33;
470 arr[num++] = 0x30;
471 arr[num++] = (dev_id_num >> 24);
472 arr[num++] = (dev_id_num >> 16) & 0xff;
473 arr[num++] = (dev_id_num >> 8) & 0xff;
474 arr[num++] = dev_id_num & 0xff;
475 /* Target relative port number */
476 arr[num++] = 0x61; /* proto=sas, binary */
477 arr[num++] = 0x94; /* PIV=1, target port, rel port */
478 arr[num++] = 0x0; /* reserved */
479 arr[num++] = 0x4; /* length */
480 arr[num++] = 0x0; /* reserved */
481 arr[num++] = 0x0; /* reserved */
482 arr[num++] = 0x0;
483 arr[num++] = 0x1; /* relative port A */
485 /* NAA-5, Target port identifier */
486 arr[num++] = 0x61; /* proto=sas, binary */
487 arr[num++] = 0x93; /* piv=1, target port, naa */
488 arr[num++] = 0x0;
489 arr[num++] = 0x8;
490 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
491 arr[num++] = 0x22;
492 arr[num++] = 0x22;
493 arr[num++] = 0x20;
494 arr[num++] = (port_a >> 24);
495 arr[num++] = (port_a >> 16) & 0xff;
496 arr[num++] = (port_a >> 8) & 0xff;
497 arr[num++] = port_a & 0xff;
498 /* NAA-5, Target port group identifier */
499 arr[num++] = 0x61; /* proto=sas, binary */
500 arr[num++] = 0x95; /* piv=1, target port group id */
501 arr[num++] = 0x0;
502 arr[num++] = 0x4;
503 arr[num++] = 0;
504 arr[num++] = 0;
505 arr[num++] = (port_group_id >> 8) & 0xff;
506 arr[num++] = port_group_id & 0xff;
507 /* NAA-5, Target device identifier */
508 arr[num++] = 0x61; /* proto=sas, binary */
509 arr[num++] = 0xa3; /* piv=1, target device, naa */
510 arr[num++] = 0x0;
511 arr[num++] = 0x8;
512 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
513 arr[num++] = 0x22;
514 arr[num++] = 0x22;
515 arr[num++] = 0x20;
516 arr[num++] = (target_dev_id >> 24);
517 arr[num++] = (target_dev_id >> 16) & 0xff;
518 arr[num++] = (target_dev_id >> 8) & 0xff;
519 arr[num++] = target_dev_id & 0xff;
520 /* SCSI name string: Target device identifier */
521 arr[num++] = 0x63; /* proto=sas, UTF-8 */
522 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
523 arr[num++] = 0x0;
524 arr[num++] = 24;
525 memcpy(arr + num, "naa.52222220", 12);
526 num += 12;
527 snprintf(b, sizeof(b), "%08X", target_dev_id);
528 memcpy(arr + num, b, 8);
529 num += 8;
530 memset(arr + num, 0, 4);
531 num += 4;
532 return num;
536 static unsigned char vpd84_data[] = {
537 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
538 0x22,0x22,0x22,0x0,0xbb,0x1,
539 0x22,0x22,0x22,0x0,0xbb,0x2,
542 static int inquiry_evpd_84(unsigned char * arr)
544 memcpy(arr, vpd84_data, sizeof(vpd84_data));
545 return sizeof(vpd84_data);
548 static int inquiry_evpd_85(unsigned char * arr)
550 int num = 0;
551 const char * na1 = "https://www.kernel.org/config";
552 const char * na2 = "http://www.kernel.org/log";
553 int plen, olen;
555 arr[num++] = 0x1; /* lu, storage config */
556 arr[num++] = 0x0; /* reserved */
557 arr[num++] = 0x0;
558 olen = strlen(na1);
559 plen = olen + 1;
560 if (plen % 4)
561 plen = ((plen / 4) + 1) * 4;
562 arr[num++] = plen; /* length, null termianted, padded */
563 memcpy(arr + num, na1, olen);
564 memset(arr + num + olen, 0, plen - olen);
565 num += plen;
567 arr[num++] = 0x4; /* lu, logging */
568 arr[num++] = 0x0; /* reserved */
569 arr[num++] = 0x0;
570 olen = strlen(na2);
571 plen = olen + 1;
572 if (plen % 4)
573 plen = ((plen / 4) + 1) * 4;
574 arr[num++] = plen; /* length, null terminated, padded */
575 memcpy(arr + num, na2, olen);
576 memset(arr + num + olen, 0, plen - olen);
577 num += plen;
579 return num;
582 /* SCSI ports VPD page */
583 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
585 int num = 0;
586 int port_a, port_b;
588 port_a = target_dev_id + 1;
589 port_b = port_a + 1;
590 arr[num++] = 0x0; /* reserved */
591 arr[num++] = 0x0; /* reserved */
592 arr[num++] = 0x0;
593 arr[num++] = 0x1; /* relative port 1 (primary) */
594 memset(arr + num, 0, 6);
595 num += 6;
596 arr[num++] = 0x0;
597 arr[num++] = 12; /* length tp descriptor */
598 /* naa-5 target port identifier (A) */
599 arr[num++] = 0x61; /* proto=sas, binary */
600 arr[num++] = 0x93; /* PIV=1, target port, NAA */
601 arr[num++] = 0x0; /* reserved */
602 arr[num++] = 0x8; /* length */
603 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
604 arr[num++] = 0x22;
605 arr[num++] = 0x22;
606 arr[num++] = 0x20;
607 arr[num++] = (port_a >> 24);
608 arr[num++] = (port_a >> 16) & 0xff;
609 arr[num++] = (port_a >> 8) & 0xff;
610 arr[num++] = port_a & 0xff;
612 arr[num++] = 0x0; /* reserved */
613 arr[num++] = 0x0; /* reserved */
614 arr[num++] = 0x0;
615 arr[num++] = 0x2; /* relative port 2 (secondary) */
616 memset(arr + num, 0, 6);
617 num += 6;
618 arr[num++] = 0x0;
619 arr[num++] = 12; /* length tp descriptor */
620 /* naa-5 target port identifier (B) */
621 arr[num++] = 0x61; /* proto=sas, binary */
622 arr[num++] = 0x93; /* PIV=1, target port, NAA */
623 arr[num++] = 0x0; /* reserved */
624 arr[num++] = 0x8; /* length */
625 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
626 arr[num++] = 0x22;
627 arr[num++] = 0x22;
628 arr[num++] = 0x20;
629 arr[num++] = (port_b >> 24);
630 arr[num++] = (port_b >> 16) & 0xff;
631 arr[num++] = (port_b >> 8) & 0xff;
632 arr[num++] = port_b & 0xff;
634 return num;
638 static unsigned char vpd89_data[] = {
639 /* from 4th byte */ 0,0,0,0,
640 'l','i','n','u','x',' ',' ',' ',
641 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
642 '1','2','3','4',
643 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
644 0xec,0,0,0,
645 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
646 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
647 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
648 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
649 0x53,0x41,
650 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
651 0x20,0x20,
652 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
653 0x10,0x80,
654 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
655 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
656 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
657 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
658 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
659 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
660 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
661 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
662 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
663 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
664 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
665 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
666 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
667 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
668 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
669 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
670 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
671 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
672 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
673 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
675 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
676 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
677 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
682 static int inquiry_evpd_89(unsigned char * arr)
684 memcpy(arr, vpd89_data, sizeof(vpd89_data));
685 return sizeof(vpd89_data);
689 /* Block limits VPD page (SBC-3) */
690 static unsigned char vpdb0_data[] = {
691 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
697 static int inquiry_evpd_b0(unsigned char * arr)
699 unsigned int gran;
701 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
702 gran = 1 << scsi_debug_physblk_exp;
703 arr[2] = (gran >> 8) & 0xff;
704 arr[3] = gran & 0xff;
705 if (sdebug_store_sectors > 0x400) {
706 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
707 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
708 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
709 arr[7] = sdebug_store_sectors & 0xff;
712 if (scsi_debug_unmap_max_desc) {
713 unsigned int blocks;
715 if (scsi_debug_unmap_max_blocks)
716 blocks = scsi_debug_unmap_max_blocks;
717 else
718 blocks = 0xffffffff;
720 put_unaligned_be32(blocks, &arr[16]);
721 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
724 if (scsi_debug_unmap_alignment) {
725 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
726 arr[28] |= 0x80; /* UGAVALID */
729 if (scsi_debug_unmap_granularity) {
730 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
731 return 0x3c; /* Mandatory page length for thin provisioning */
734 return sizeof(vpdb0_data);
737 /* Block device characteristics VPD page (SBC-3) */
738 static int inquiry_evpd_b1(unsigned char *arr)
740 memset(arr, 0, 0x3c);
741 arr[0] = 0;
742 arr[1] = 1; /* non rotating medium (e.g. solid state) */
743 arr[2] = 0;
744 arr[3] = 5; /* less than 1.8" */
746 return 0x3c;
749 #define SDEBUG_LONG_INQ_SZ 96
750 #define SDEBUG_MAX_INQ_ARR_SZ 584
752 static int resp_inquiry(struct scsi_cmnd * scp, int target,
753 struct sdebug_dev_info * devip)
755 unsigned char pq_pdt;
756 unsigned char * arr;
757 unsigned char *cmd = (unsigned char *)scp->cmnd;
758 int alloc_len, n, ret;
760 alloc_len = (cmd[3] << 8) + cmd[4];
761 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
762 if (! arr)
763 return DID_REQUEUE << 16;
764 if (devip->wlun)
765 pq_pdt = 0x1e; /* present, wlun */
766 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
767 pq_pdt = 0x7f; /* not present, no device type */
768 else
769 pq_pdt = (scsi_debug_ptype & 0x1f);
770 arr[0] = pq_pdt;
771 if (0x2 & cmd[1]) { /* CMDDT bit set */
772 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
774 kfree(arr);
775 return check_condition_result;
776 } else if (0x1 & cmd[1]) { /* EVPD bit set */
777 int lu_id_num, port_group_id, target_dev_id, len;
778 char lu_id_str[6];
779 int host_no = devip->sdbg_host->shost->host_no;
781 port_group_id = (((host_no + 1) & 0x7f) << 8) +
782 (devip->channel & 0x7f);
783 if (0 == scsi_debug_vpd_use_hostno)
784 host_no = 0;
785 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
786 (devip->target * 1000) + devip->lun);
787 target_dev_id = ((host_no + 1) * 2000) +
788 (devip->target * 1000) - 3;
789 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
790 if (0 == cmd[2]) { /* supported vital product data pages */
791 arr[1] = cmd[2]; /*sanity */
792 n = 4;
793 arr[n++] = 0x0; /* this page */
794 arr[n++] = 0x80; /* unit serial number */
795 arr[n++] = 0x83; /* device identification */
796 arr[n++] = 0x84; /* software interface ident. */
797 arr[n++] = 0x85; /* management network addresses */
798 arr[n++] = 0x86; /* extended inquiry */
799 arr[n++] = 0x87; /* mode page policy */
800 arr[n++] = 0x88; /* SCSI ports */
801 arr[n++] = 0x89; /* ATA information */
802 arr[n++] = 0xb0; /* Block limits (SBC) */
803 arr[n++] = 0xb1; /* Block characteristics (SBC) */
804 arr[3] = n - 4; /* number of supported VPD pages */
805 } else if (0x80 == cmd[2]) { /* unit serial number */
806 arr[1] = cmd[2]; /*sanity */
807 arr[3] = len;
808 memcpy(&arr[4], lu_id_str, len);
809 } else if (0x83 == cmd[2]) { /* device identification */
810 arr[1] = cmd[2]; /*sanity */
811 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
812 target_dev_id, lu_id_num,
813 lu_id_str, len);
814 } else if (0x84 == cmd[2]) { /* Software interface ident. */
815 arr[1] = cmd[2]; /*sanity */
816 arr[3] = inquiry_evpd_84(&arr[4]);
817 } else if (0x85 == cmd[2]) { /* Management network addresses */
818 arr[1] = cmd[2]; /*sanity */
819 arr[3] = inquiry_evpd_85(&arr[4]);
820 } else if (0x86 == cmd[2]) { /* extended inquiry */
821 arr[1] = cmd[2]; /*sanity */
822 arr[3] = 0x3c; /* number of following entries */
823 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
824 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
825 else if (scsi_debug_dif)
826 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
827 else
828 arr[4] = 0x0; /* no protection stuff */
829 arr[5] = 0x7; /* head of q, ordered + simple q's */
830 } else if (0x87 == cmd[2]) { /* mode page policy */
831 arr[1] = cmd[2]; /*sanity */
832 arr[3] = 0x8; /* number of following entries */
833 arr[4] = 0x2; /* disconnect-reconnect mp */
834 arr[6] = 0x80; /* mlus, shared */
835 arr[8] = 0x18; /* protocol specific lu */
836 arr[10] = 0x82; /* mlus, per initiator port */
837 } else if (0x88 == cmd[2]) { /* SCSI Ports */
838 arr[1] = cmd[2]; /*sanity */
839 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
840 } else if (0x89 == cmd[2]) { /* ATA information */
841 arr[1] = cmd[2]; /*sanity */
842 n = inquiry_evpd_89(&arr[4]);
843 arr[2] = (n >> 8);
844 arr[3] = (n & 0xff);
845 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
846 arr[1] = cmd[2]; /*sanity */
847 arr[3] = inquiry_evpd_b0(&arr[4]);
848 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
849 arr[1] = cmd[2]; /*sanity */
850 arr[3] = inquiry_evpd_b1(&arr[4]);
851 } else {
852 /* Illegal request, invalid field in cdb */
853 mk_sense_buffer(devip, ILLEGAL_REQUEST,
854 INVALID_FIELD_IN_CDB, 0);
855 kfree(arr);
856 return check_condition_result;
858 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
859 ret = fill_from_dev_buffer(scp, arr,
860 min(len, SDEBUG_MAX_INQ_ARR_SZ));
861 kfree(arr);
862 return ret;
864 /* drops through here for a standard inquiry */
865 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
866 arr[2] = scsi_debug_scsi_level;
867 arr[3] = 2; /* response_data_format==2 */
868 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
869 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
870 if (0 == scsi_debug_vpd_use_hostno)
871 arr[5] = 0x10; /* claim: implicit TGPS */
872 arr[6] = 0x10; /* claim: MultiP */
873 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
874 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
875 memcpy(&arr[8], inq_vendor_id, 8);
876 memcpy(&arr[16], inq_product_id, 16);
877 memcpy(&arr[32], inq_product_rev, 4);
878 /* version descriptors (2 bytes each) follow */
879 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
880 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
881 n = 62;
882 if (scsi_debug_ptype == 0) {
883 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
884 } else if (scsi_debug_ptype == 1) {
885 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
887 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
888 ret = fill_from_dev_buffer(scp, arr,
889 min(alloc_len, SDEBUG_LONG_INQ_SZ));
890 kfree(arr);
891 return ret;
894 static int resp_requests(struct scsi_cmnd * scp,
895 struct sdebug_dev_info * devip)
897 unsigned char * sbuff;
898 unsigned char *cmd = (unsigned char *)scp->cmnd;
899 unsigned char arr[SDEBUG_SENSE_LEN];
900 int want_dsense;
901 int len = 18;
903 memset(arr, 0, sizeof(arr));
904 if (devip->reset == 1)
905 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
906 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
907 sbuff = devip->sense_buff;
908 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
909 if (want_dsense) {
910 arr[0] = 0x72;
911 arr[1] = 0x0; /* NO_SENSE in sense_key */
912 arr[2] = THRESHOLD_EXCEEDED;
913 arr[3] = 0xff; /* TEST set and MRIE==6 */
914 } else {
915 arr[0] = 0x70;
916 arr[2] = 0x0; /* NO_SENSE in sense_key */
917 arr[7] = 0xa; /* 18 byte sense buffer */
918 arr[12] = THRESHOLD_EXCEEDED;
919 arr[13] = 0xff; /* TEST set and MRIE==6 */
921 } else {
922 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
923 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
924 /* DESC bit set and sense_buff in fixed format */
925 memset(arr, 0, sizeof(arr));
926 arr[0] = 0x72;
927 arr[1] = sbuff[2]; /* sense key */
928 arr[2] = sbuff[12]; /* asc */
929 arr[3] = sbuff[13]; /* ascq */
930 len = 8;
933 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
934 return fill_from_dev_buffer(scp, arr, len);
937 static int resp_start_stop(struct scsi_cmnd * scp,
938 struct sdebug_dev_info * devip)
940 unsigned char *cmd = (unsigned char *)scp->cmnd;
941 int power_cond, errsts, start;
943 if ((errsts = check_readiness(scp, 1, devip)))
944 return errsts;
945 power_cond = (cmd[4] & 0xf0) >> 4;
946 if (power_cond) {
947 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
949 return check_condition_result;
951 start = cmd[4] & 1;
952 if (start == devip->stopped)
953 devip->stopped = !start;
954 return 0;
957 static sector_t get_sdebug_capacity(void)
959 if (scsi_debug_virtual_gb > 0)
960 return (sector_t)scsi_debug_virtual_gb *
961 (1073741824 / scsi_debug_sector_size);
962 else
963 return sdebug_store_sectors;
966 #define SDEBUG_READCAP_ARR_SZ 8
967 static int resp_readcap(struct scsi_cmnd * scp,
968 struct sdebug_dev_info * devip)
970 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
971 unsigned int capac;
972 int errsts;
974 if ((errsts = check_readiness(scp, 1, devip)))
975 return errsts;
976 /* following just in case virtual_gb changed */
977 sdebug_capacity = get_sdebug_capacity();
978 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
979 if (sdebug_capacity < 0xffffffff) {
980 capac = (unsigned int)sdebug_capacity - 1;
981 arr[0] = (capac >> 24);
982 arr[1] = (capac >> 16) & 0xff;
983 arr[2] = (capac >> 8) & 0xff;
984 arr[3] = capac & 0xff;
985 } else {
986 arr[0] = 0xff;
987 arr[1] = 0xff;
988 arr[2] = 0xff;
989 arr[3] = 0xff;
991 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
992 arr[7] = scsi_debug_sector_size & 0xff;
993 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
996 #define SDEBUG_READCAP16_ARR_SZ 32
997 static int resp_readcap16(struct scsi_cmnd * scp,
998 struct sdebug_dev_info * devip)
1000 unsigned char *cmd = (unsigned char *)scp->cmnd;
1001 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1002 unsigned long long capac;
1003 int errsts, k, alloc_len;
1005 if ((errsts = check_readiness(scp, 1, devip)))
1006 return errsts;
1007 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1008 + cmd[13]);
1009 /* following just in case virtual_gb changed */
1010 sdebug_capacity = get_sdebug_capacity();
1011 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1012 capac = sdebug_capacity - 1;
1013 for (k = 0; k < 8; ++k, capac >>= 8)
1014 arr[7 - k] = capac & 0xff;
1015 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1016 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1017 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1018 arr[11] = scsi_debug_sector_size & 0xff;
1019 arr[13] = scsi_debug_physblk_exp & 0xf;
1020 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1022 if (scsi_debug_unmap_granularity)
1023 arr[14] |= 0x80; /* TPE */
1025 arr[15] = scsi_debug_lowest_aligned & 0xff;
1027 if (scsi_debug_dif) {
1028 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1029 arr[12] |= 1; /* PROT_EN */
1032 return fill_from_dev_buffer(scp, arr,
1033 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1036 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1038 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1039 struct sdebug_dev_info * devip)
1041 unsigned char *cmd = (unsigned char *)scp->cmnd;
1042 unsigned char * arr;
1043 int host_no = devip->sdbg_host->shost->host_no;
1044 int n, ret, alen, rlen;
1045 int port_group_a, port_group_b, port_a, port_b;
1047 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1048 + cmd[9]);
1050 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1051 if (! arr)
1052 return DID_REQUEUE << 16;
1054 * EVPD page 0x88 states we have two ports, one
1055 * real and a fake port with no device connected.
1056 * So we create two port groups with one port each
1057 * and set the group with port B to unavailable.
1059 port_a = 0x1; /* relative port A */
1060 port_b = 0x2; /* relative port B */
1061 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1062 (devip->channel & 0x7f);
1063 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1064 (devip->channel & 0x7f) + 0x80;
1067 * The asymmetric access state is cycled according to the host_id.
1069 n = 4;
1070 if (0 == scsi_debug_vpd_use_hostno) {
1071 arr[n++] = host_no % 3; /* Asymm access state */
1072 arr[n++] = 0x0F; /* claim: all states are supported */
1073 } else {
1074 arr[n++] = 0x0; /* Active/Optimized path */
1075 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1077 arr[n++] = (port_group_a >> 8) & 0xff;
1078 arr[n++] = port_group_a & 0xff;
1079 arr[n++] = 0; /* Reserved */
1080 arr[n++] = 0; /* Status code */
1081 arr[n++] = 0; /* Vendor unique */
1082 arr[n++] = 0x1; /* One port per group */
1083 arr[n++] = 0; /* Reserved */
1084 arr[n++] = 0; /* Reserved */
1085 arr[n++] = (port_a >> 8) & 0xff;
1086 arr[n++] = port_a & 0xff;
1087 arr[n++] = 3; /* Port unavailable */
1088 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1089 arr[n++] = (port_group_b >> 8) & 0xff;
1090 arr[n++] = port_group_b & 0xff;
1091 arr[n++] = 0; /* Reserved */
1092 arr[n++] = 0; /* Status code */
1093 arr[n++] = 0; /* Vendor unique */
1094 arr[n++] = 0x1; /* One port per group */
1095 arr[n++] = 0; /* Reserved */
1096 arr[n++] = 0; /* Reserved */
1097 arr[n++] = (port_b >> 8) & 0xff;
1098 arr[n++] = port_b & 0xff;
1100 rlen = n - 4;
1101 arr[0] = (rlen >> 24) & 0xff;
1102 arr[1] = (rlen >> 16) & 0xff;
1103 arr[2] = (rlen >> 8) & 0xff;
1104 arr[3] = rlen & 0xff;
1107 * Return the smallest value of either
1108 * - The allocated length
1109 * - The constructed command length
1110 * - The maximum array size
1112 rlen = min(alen,n);
1113 ret = fill_from_dev_buffer(scp, arr,
1114 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1115 kfree(arr);
1116 return ret;
1119 /* <<Following mode page info copied from ST318451LW>> */
1121 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1122 { /* Read-Write Error Recovery page for mode_sense */
1123 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1124 5, 0, 0xff, 0xff};
1126 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1127 if (1 == pcontrol)
1128 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1129 return sizeof(err_recov_pg);
1132 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1133 { /* Disconnect-Reconnect page for mode_sense */
1134 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1135 0, 0, 0, 0, 0, 0, 0, 0};
1137 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1138 if (1 == pcontrol)
1139 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1140 return sizeof(disconnect_pg);
1143 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1144 { /* Format device page for mode_sense */
1145 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1146 0, 0, 0, 0, 0, 0, 0, 0,
1147 0, 0, 0, 0, 0x40, 0, 0, 0};
1149 memcpy(p, format_pg, sizeof(format_pg));
1150 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1151 p[11] = sdebug_sectors_per & 0xff;
1152 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1153 p[13] = scsi_debug_sector_size & 0xff;
1154 if (DEV_REMOVEABLE(target))
1155 p[20] |= 0x20; /* should agree with INQUIRY */
1156 if (1 == pcontrol)
1157 memset(p + 2, 0, sizeof(format_pg) - 2);
1158 return sizeof(format_pg);
1161 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1162 { /* Caching page for mode_sense */
1163 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1164 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1166 memcpy(p, caching_pg, sizeof(caching_pg));
1167 if (1 == pcontrol)
1168 memset(p + 2, 0, sizeof(caching_pg) - 2);
1169 return sizeof(caching_pg);
1172 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1173 { /* Control mode page for mode_sense */
1174 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1175 0, 0, 0, 0};
1176 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1177 0, 0, 0x2, 0x4b};
1179 if (scsi_debug_dsense)
1180 ctrl_m_pg[2] |= 0x4;
1181 else
1182 ctrl_m_pg[2] &= ~0x4;
1184 if (scsi_debug_ato)
1185 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1187 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1188 if (1 == pcontrol)
1189 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1190 else if (2 == pcontrol)
1191 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1192 return sizeof(ctrl_m_pg);
1196 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1197 { /* Informational Exceptions control mode page for mode_sense */
1198 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1199 0, 0, 0x0, 0x0};
1200 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1201 0, 0, 0x0, 0x0};
1203 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1204 if (1 == pcontrol)
1205 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1206 else if (2 == pcontrol)
1207 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1208 return sizeof(iec_m_pg);
1211 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1212 { /* SAS SSP mode page - short format for mode_sense */
1213 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1214 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1216 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1217 if (1 == pcontrol)
1218 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1219 return sizeof(sas_sf_m_pg);
1223 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1224 int target_dev_id)
1225 { /* SAS phy control and discover mode page for mode_sense */
1226 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1227 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1228 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1229 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1230 0x2, 0, 0, 0, 0, 0, 0, 0,
1231 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1232 0, 0, 0, 0, 0, 0, 0, 0,
1233 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1234 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1235 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1236 0x3, 0, 0, 0, 0, 0, 0, 0,
1237 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1238 0, 0, 0, 0, 0, 0, 0, 0,
1240 int port_a, port_b;
1242 port_a = target_dev_id + 1;
1243 port_b = port_a + 1;
1244 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1245 p[20] = (port_a >> 24);
1246 p[21] = (port_a >> 16) & 0xff;
1247 p[22] = (port_a >> 8) & 0xff;
1248 p[23] = port_a & 0xff;
1249 p[48 + 20] = (port_b >> 24);
1250 p[48 + 21] = (port_b >> 16) & 0xff;
1251 p[48 + 22] = (port_b >> 8) & 0xff;
1252 p[48 + 23] = port_b & 0xff;
1253 if (1 == pcontrol)
1254 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1255 return sizeof(sas_pcd_m_pg);
1258 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1259 { /* SAS SSP shared protocol specific port mode subpage */
1260 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1261 0, 0, 0, 0, 0, 0, 0, 0,
1264 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1265 if (1 == pcontrol)
1266 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1267 return sizeof(sas_sha_m_pg);
1270 #define SDEBUG_MAX_MSENSE_SZ 256
1272 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1273 struct sdebug_dev_info * devip)
1275 unsigned char dbd, llbaa;
1276 int pcontrol, pcode, subpcode, bd_len;
1277 unsigned char dev_spec;
1278 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1279 unsigned char * ap;
1280 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1281 unsigned char *cmd = (unsigned char *)scp->cmnd;
1283 if ((errsts = check_readiness(scp, 1, devip)))
1284 return errsts;
1285 dbd = !!(cmd[1] & 0x8);
1286 pcontrol = (cmd[2] & 0xc0) >> 6;
1287 pcode = cmd[2] & 0x3f;
1288 subpcode = cmd[3];
1289 msense_6 = (MODE_SENSE == cmd[0]);
1290 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1291 if ((0 == scsi_debug_ptype) && (0 == dbd))
1292 bd_len = llbaa ? 16 : 8;
1293 else
1294 bd_len = 0;
1295 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1296 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1297 if (0x3 == pcontrol) { /* Saving values not supported */
1298 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1300 return check_condition_result;
1302 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1303 (devip->target * 1000) - 3;
1304 /* set DPOFUA bit for disks */
1305 if (0 == scsi_debug_ptype)
1306 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1307 else
1308 dev_spec = 0x0;
1309 if (msense_6) {
1310 arr[2] = dev_spec;
1311 arr[3] = bd_len;
1312 offset = 4;
1313 } else {
1314 arr[3] = dev_spec;
1315 if (16 == bd_len)
1316 arr[4] = 0x1; /* set LONGLBA bit */
1317 arr[7] = bd_len; /* assume 255 or less */
1318 offset = 8;
1320 ap = arr + offset;
1321 if ((bd_len > 0) && (!sdebug_capacity))
1322 sdebug_capacity = get_sdebug_capacity();
1324 if (8 == bd_len) {
1325 if (sdebug_capacity > 0xfffffffe) {
1326 ap[0] = 0xff;
1327 ap[1] = 0xff;
1328 ap[2] = 0xff;
1329 ap[3] = 0xff;
1330 } else {
1331 ap[0] = (sdebug_capacity >> 24) & 0xff;
1332 ap[1] = (sdebug_capacity >> 16) & 0xff;
1333 ap[2] = (sdebug_capacity >> 8) & 0xff;
1334 ap[3] = sdebug_capacity & 0xff;
1336 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1337 ap[7] = scsi_debug_sector_size & 0xff;
1338 offset += bd_len;
1339 ap = arr + offset;
1340 } else if (16 == bd_len) {
1341 unsigned long long capac = sdebug_capacity;
1343 for (k = 0; k < 8; ++k, capac >>= 8)
1344 ap[7 - k] = capac & 0xff;
1345 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1346 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1347 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1348 ap[15] = scsi_debug_sector_size & 0xff;
1349 offset += bd_len;
1350 ap = arr + offset;
1353 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1354 /* TODO: Control Extension page */
1355 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1357 return check_condition_result;
1359 switch (pcode) {
1360 case 0x1: /* Read-Write error recovery page, direct access */
1361 len = resp_err_recov_pg(ap, pcontrol, target);
1362 offset += len;
1363 break;
1364 case 0x2: /* Disconnect-Reconnect page, all devices */
1365 len = resp_disconnect_pg(ap, pcontrol, target);
1366 offset += len;
1367 break;
1368 case 0x3: /* Format device page, direct access */
1369 len = resp_format_pg(ap, pcontrol, target);
1370 offset += len;
1371 break;
1372 case 0x8: /* Caching page, direct access */
1373 len = resp_caching_pg(ap, pcontrol, target);
1374 offset += len;
1375 break;
1376 case 0xa: /* Control Mode page, all devices */
1377 len = resp_ctrl_m_pg(ap, pcontrol, target);
1378 offset += len;
1379 break;
1380 case 0x19: /* if spc==1 then sas phy, control+discover */
1381 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1382 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1383 INVALID_FIELD_IN_CDB, 0);
1384 return check_condition_result;
1386 len = 0;
1387 if ((0x0 == subpcode) || (0xff == subpcode))
1388 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1389 if ((0x1 == subpcode) || (0xff == subpcode))
1390 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1391 target_dev_id);
1392 if ((0x2 == subpcode) || (0xff == subpcode))
1393 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1394 offset += len;
1395 break;
1396 case 0x1c: /* Informational Exceptions Mode page, all devices */
1397 len = resp_iec_m_pg(ap, pcontrol, target);
1398 offset += len;
1399 break;
1400 case 0x3f: /* Read all Mode pages */
1401 if ((0 == subpcode) || (0xff == subpcode)) {
1402 len = resp_err_recov_pg(ap, pcontrol, target);
1403 len += resp_disconnect_pg(ap + len, pcontrol, target);
1404 len += resp_format_pg(ap + len, pcontrol, target);
1405 len += resp_caching_pg(ap + len, pcontrol, target);
1406 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1407 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1408 if (0xff == subpcode) {
1409 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1410 target, target_dev_id);
1411 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1413 len += resp_iec_m_pg(ap + len, pcontrol, target);
1414 } else {
1415 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1416 INVALID_FIELD_IN_CDB, 0);
1417 return check_condition_result;
1419 offset += len;
1420 break;
1421 default:
1422 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1424 return check_condition_result;
1426 if (msense_6)
1427 arr[0] = offset - 1;
1428 else {
1429 arr[0] = ((offset - 2) >> 8) & 0xff;
1430 arr[1] = (offset - 2) & 0xff;
1432 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1435 #define SDEBUG_MAX_MSELECT_SZ 512
1437 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1438 struct sdebug_dev_info * devip)
1440 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1441 int param_len, res, errsts, mpage;
1442 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1443 unsigned char *cmd = (unsigned char *)scp->cmnd;
1445 if ((errsts = check_readiness(scp, 1, devip)))
1446 return errsts;
1447 memset(arr, 0, sizeof(arr));
1448 pf = cmd[1] & 0x10;
1449 sp = cmd[1] & 0x1;
1450 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1451 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1452 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1453 INVALID_FIELD_IN_CDB, 0);
1454 return check_condition_result;
1456 res = fetch_to_dev_buffer(scp, arr, param_len);
1457 if (-1 == res)
1458 return (DID_ERROR << 16);
1459 else if ((res < param_len) &&
1460 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1461 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1462 " IO sent=%d bytes\n", param_len, res);
1463 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1464 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1465 if (md_len > 2) {
1466 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1467 INVALID_FIELD_IN_PARAM_LIST, 0);
1468 return check_condition_result;
1470 off = bd_len + (mselect6 ? 4 : 8);
1471 mpage = arr[off] & 0x3f;
1472 ps = !!(arr[off] & 0x80);
1473 if (ps) {
1474 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1475 INVALID_FIELD_IN_PARAM_LIST, 0);
1476 return check_condition_result;
1478 spf = !!(arr[off] & 0x40);
1479 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1480 (arr[off + 1] + 2);
1481 if ((pg_len + off) > param_len) {
1482 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1483 PARAMETER_LIST_LENGTH_ERR, 0);
1484 return check_condition_result;
1486 switch (mpage) {
1487 case 0xa: /* Control Mode page */
1488 if (ctrl_m_pg[1] == arr[off + 1]) {
1489 memcpy(ctrl_m_pg + 2, arr + off + 2,
1490 sizeof(ctrl_m_pg) - 2);
1491 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1492 return 0;
1494 break;
1495 case 0x1c: /* Informational Exceptions Mode page */
1496 if (iec_m_pg[1] == arr[off + 1]) {
1497 memcpy(iec_m_pg + 2, arr + off + 2,
1498 sizeof(iec_m_pg) - 2);
1499 return 0;
1501 break;
1502 default:
1503 break;
1505 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1506 INVALID_FIELD_IN_PARAM_LIST, 0);
1507 return check_condition_result;
1510 static int resp_temp_l_pg(unsigned char * arr)
1512 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1513 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1516 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1517 return sizeof(temp_l_pg);
1520 static int resp_ie_l_pg(unsigned char * arr)
1522 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1525 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1526 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1527 arr[4] = THRESHOLD_EXCEEDED;
1528 arr[5] = 0xff;
1530 return sizeof(ie_l_pg);
1533 #define SDEBUG_MAX_LSENSE_SZ 512
1535 static int resp_log_sense(struct scsi_cmnd * scp,
1536 struct sdebug_dev_info * devip)
1538 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1539 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1540 unsigned char *cmd = (unsigned char *)scp->cmnd;
1542 if ((errsts = check_readiness(scp, 1, devip)))
1543 return errsts;
1544 memset(arr, 0, sizeof(arr));
1545 ppc = cmd[1] & 0x2;
1546 sp = cmd[1] & 0x1;
1547 if (ppc || sp) {
1548 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1549 INVALID_FIELD_IN_CDB, 0);
1550 return check_condition_result;
1552 pcontrol = (cmd[2] & 0xc0) >> 6;
1553 pcode = cmd[2] & 0x3f;
1554 subpcode = cmd[3] & 0xff;
1555 alloc_len = (cmd[7] << 8) + cmd[8];
1556 arr[0] = pcode;
1557 if (0 == subpcode) {
1558 switch (pcode) {
1559 case 0x0: /* Supported log pages log page */
1560 n = 4;
1561 arr[n++] = 0x0; /* this page */
1562 arr[n++] = 0xd; /* Temperature */
1563 arr[n++] = 0x2f; /* Informational exceptions */
1564 arr[3] = n - 4;
1565 break;
1566 case 0xd: /* Temperature log page */
1567 arr[3] = resp_temp_l_pg(arr + 4);
1568 break;
1569 case 0x2f: /* Informational exceptions log page */
1570 arr[3] = resp_ie_l_pg(arr + 4);
1571 break;
1572 default:
1573 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1574 INVALID_FIELD_IN_CDB, 0);
1575 return check_condition_result;
1577 } else if (0xff == subpcode) {
1578 arr[0] |= 0x40;
1579 arr[1] = subpcode;
1580 switch (pcode) {
1581 case 0x0: /* Supported log pages and subpages log page */
1582 n = 4;
1583 arr[n++] = 0x0;
1584 arr[n++] = 0x0; /* 0,0 page */
1585 arr[n++] = 0x0;
1586 arr[n++] = 0xff; /* this page */
1587 arr[n++] = 0xd;
1588 arr[n++] = 0x0; /* Temperature */
1589 arr[n++] = 0x2f;
1590 arr[n++] = 0x0; /* Informational exceptions */
1591 arr[3] = n - 4;
1592 break;
1593 case 0xd: /* Temperature subpages */
1594 n = 4;
1595 arr[n++] = 0xd;
1596 arr[n++] = 0x0; /* Temperature */
1597 arr[3] = n - 4;
1598 break;
1599 case 0x2f: /* Informational exceptions subpages */
1600 n = 4;
1601 arr[n++] = 0x2f;
1602 arr[n++] = 0x0; /* Informational exceptions */
1603 arr[3] = n - 4;
1604 break;
1605 default:
1606 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1607 INVALID_FIELD_IN_CDB, 0);
1608 return check_condition_result;
1610 } else {
1611 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1612 INVALID_FIELD_IN_CDB, 0);
1613 return check_condition_result;
1615 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1616 return fill_from_dev_buffer(scp, arr,
1617 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1620 static int check_device_access_params(struct sdebug_dev_info *devi,
1621 unsigned long long lba, unsigned int num)
1623 if (lba + num > sdebug_capacity) {
1624 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1625 return check_condition_result;
1627 /* transfer length excessive (tie in to block limits VPD page) */
1628 if (num > sdebug_store_sectors) {
1629 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1630 return check_condition_result;
1632 return 0;
1635 static int do_device_access(struct scsi_cmnd *scmd,
1636 struct sdebug_dev_info *devi,
1637 unsigned long long lba, unsigned int num, int write)
1639 int ret;
1640 unsigned int block, rest = 0;
1641 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1643 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1645 block = do_div(lba, sdebug_store_sectors);
1646 if (block + num > sdebug_store_sectors)
1647 rest = block + num - sdebug_store_sectors;
1649 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1650 (num - rest) * scsi_debug_sector_size);
1651 if (!ret && rest)
1652 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1654 return ret;
1657 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1658 unsigned int sectors, u32 ei_lba)
1660 unsigned int i, resid;
1661 struct scatterlist *psgl;
1662 struct sd_dif_tuple *sdt;
1663 sector_t sector;
1664 sector_t tmp_sec = start_sec;
1665 void *paddr;
1667 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1669 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1671 for (i = 0 ; i < sectors ; i++) {
1672 u16 csum;
1674 if (sdt[i].app_tag == 0xffff)
1675 continue;
1677 sector = start_sec + i;
1679 switch (scsi_debug_guard) {
1680 case 1:
1681 csum = ip_compute_csum(fake_storep +
1682 sector * scsi_debug_sector_size,
1683 scsi_debug_sector_size);
1684 break;
1685 case 0:
1686 csum = crc_t10dif(fake_storep +
1687 sector * scsi_debug_sector_size,
1688 scsi_debug_sector_size);
1689 csum = cpu_to_be16(csum);
1690 break;
1691 default:
1692 BUG();
1695 if (sdt[i].guard_tag != csum) {
1696 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1697 " rcvd 0x%04x, data 0x%04x\n", __func__,
1698 (unsigned long)sector,
1699 be16_to_cpu(sdt[i].guard_tag),
1700 be16_to_cpu(csum));
1701 dif_errors++;
1702 return 0x01;
1705 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1706 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1707 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1708 __func__, (unsigned long)sector);
1709 dif_errors++;
1710 return 0x03;
1713 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1714 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1715 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1716 __func__, (unsigned long)sector);
1717 dif_errors++;
1718 return 0x03;
1721 ei_lba++;
1724 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1725 sector = start_sec;
1727 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1728 int len = min(psgl->length, resid);
1730 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1731 memcpy(paddr, dif_storep + dif_offset(sector), len);
1733 sector += len >> 3;
1734 if (sector >= sdebug_store_sectors) {
1735 /* Force wrap */
1736 tmp_sec = sector;
1737 sector = do_div(tmp_sec, sdebug_store_sectors);
1739 resid -= len;
1740 kunmap_atomic(paddr, KM_IRQ0);
1743 dix_reads++;
1745 return 0;
1748 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1749 unsigned int num, struct sdebug_dev_info *devip,
1750 u32 ei_lba)
1752 unsigned long iflags;
1753 int ret;
1755 ret = check_device_access_params(devip, lba, num);
1756 if (ret)
1757 return ret;
1759 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1760 (lba <= OPT_MEDIUM_ERR_ADDR) &&
1761 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1762 /* claim unrecoverable read error */
1763 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
1765 /* set info field and valid bit for fixed descriptor */
1766 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1767 devip->sense_buff[0] |= 0x80; /* Valid bit */
1768 ret = OPT_MEDIUM_ERR_ADDR;
1769 devip->sense_buff[3] = (ret >> 24) & 0xff;
1770 devip->sense_buff[4] = (ret >> 16) & 0xff;
1771 devip->sense_buff[5] = (ret >> 8) & 0xff;
1772 devip->sense_buff[6] = ret & 0xff;
1774 return check_condition_result;
1777 /* DIX + T10 DIF */
1778 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1779 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1781 if (prot_ret) {
1782 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1783 return illegal_condition_result;
1787 read_lock_irqsave(&atomic_rw, iflags);
1788 ret = do_device_access(SCpnt, devip, lba, num, 0);
1789 read_unlock_irqrestore(&atomic_rw, iflags);
1790 return ret;
1793 void dump_sector(unsigned char *buf, int len)
1795 int i, j;
1797 printk(KERN_ERR ">>> Sector Dump <<<\n");
1799 for (i = 0 ; i < len ; i += 16) {
1800 printk(KERN_ERR "%04d: ", i);
1802 for (j = 0 ; j < 16 ; j++) {
1803 unsigned char c = buf[i+j];
1804 if (c >= 0x20 && c < 0x7e)
1805 printk(" %c ", buf[i+j]);
1806 else
1807 printk("%02x ", buf[i+j]);
1810 printk("\n");
1814 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1815 unsigned int sectors, u32 ei_lba)
1817 int i, j, ret;
1818 struct sd_dif_tuple *sdt;
1819 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1820 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1821 void *daddr, *paddr;
1822 sector_t tmp_sec = start_sec;
1823 sector_t sector;
1824 int ppage_offset;
1825 unsigned short csum;
1827 sector = do_div(tmp_sec, sdebug_store_sectors);
1829 BUG_ON(scsi_sg_count(SCpnt) == 0);
1830 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1832 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1833 ppage_offset = 0;
1835 /* For each data page */
1836 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1837 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1839 /* For each sector-sized chunk in data page */
1840 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1842 /* If we're at the end of the current
1843 * protection page advance to the next one
1845 if (ppage_offset >= psgl->length) {
1846 kunmap_atomic(paddr, KM_IRQ1);
1847 psgl = sg_next(psgl);
1848 BUG_ON(psgl == NULL);
1849 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1850 + psgl->offset;
1851 ppage_offset = 0;
1854 sdt = paddr + ppage_offset;
1856 switch (scsi_debug_guard) {
1857 case 1:
1858 csum = ip_compute_csum(daddr,
1859 scsi_debug_sector_size);
1860 break;
1861 case 0:
1862 csum = cpu_to_be16(crc_t10dif(daddr,
1863 scsi_debug_sector_size));
1864 break;
1865 default:
1866 BUG();
1867 ret = 0;
1868 goto out;
1871 if (sdt->guard_tag != csum) {
1872 printk(KERN_ERR
1873 "%s: GUARD check failed on sector %lu " \
1874 "rcvd 0x%04x, calculated 0x%04x\n",
1875 __func__, (unsigned long)sector,
1876 be16_to_cpu(sdt->guard_tag),
1877 be16_to_cpu(csum));
1878 ret = 0x01;
1879 dump_sector(daddr, scsi_debug_sector_size);
1880 goto out;
1883 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1884 be32_to_cpu(sdt->ref_tag)
1885 != (start_sec & 0xffffffff)) {
1886 printk(KERN_ERR
1887 "%s: REF check failed on sector %lu\n",
1888 __func__, (unsigned long)sector);
1889 ret = 0x03;
1890 dump_sector(daddr, scsi_debug_sector_size);
1891 goto out;
1894 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1895 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1896 printk(KERN_ERR
1897 "%s: REF check failed on sector %lu\n",
1898 __func__, (unsigned long)sector);
1899 ret = 0x03;
1900 dump_sector(daddr, scsi_debug_sector_size);
1901 goto out;
1904 /* Would be great to copy this in bigger
1905 * chunks. However, for the sake of
1906 * correctness we need to verify each sector
1907 * before writing it to "stable" storage
1909 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1911 sector++;
1913 if (sector == sdebug_store_sectors)
1914 sector = 0; /* Force wrap */
1916 start_sec++;
1917 ei_lba++;
1918 daddr += scsi_debug_sector_size;
1919 ppage_offset += sizeof(struct sd_dif_tuple);
1922 kunmap_atomic(daddr, KM_IRQ0);
1925 kunmap_atomic(paddr, KM_IRQ1);
1927 dix_writes++;
1929 return 0;
1931 out:
1932 dif_errors++;
1933 kunmap_atomic(daddr, KM_IRQ0);
1934 kunmap_atomic(paddr, KM_IRQ1);
1935 return ret;
1938 static unsigned int map_state(sector_t lba, unsigned int *num)
1940 unsigned int granularity, alignment, mapped;
1941 sector_t block, next, end;
1943 granularity = scsi_debug_unmap_granularity;
1944 alignment = granularity - scsi_debug_unmap_alignment;
1945 block = lba + alignment;
1946 do_div(block, granularity);
1948 mapped = test_bit(block, map_storep);
1950 if (mapped)
1951 next = find_next_zero_bit(map_storep, map_size, block);
1952 else
1953 next = find_next_bit(map_storep, map_size, block);
1955 end = next * granularity - scsi_debug_unmap_alignment;
1956 *num = end - lba;
1958 return mapped;
1961 static void map_region(sector_t lba, unsigned int len)
1963 unsigned int granularity, alignment;
1964 sector_t end = lba + len;
1966 granularity = scsi_debug_unmap_granularity;
1967 alignment = granularity - scsi_debug_unmap_alignment;
1969 while (lba < end) {
1970 sector_t block, rem;
1972 block = lba + alignment;
1973 rem = do_div(block, granularity);
1975 set_bit(block, map_storep);
1977 lba += granularity - rem;
1981 static void unmap_region(sector_t lba, unsigned int len)
1983 unsigned int granularity, alignment;
1984 sector_t end = lba + len;
1986 granularity = scsi_debug_unmap_granularity;
1987 alignment = granularity - scsi_debug_unmap_alignment;
1989 while (lba < end) {
1990 sector_t block, rem;
1992 block = lba + alignment;
1993 rem = do_div(block, granularity);
1995 if (rem == 0 && lba + granularity <= end)
1996 clear_bit(block, map_storep);
1998 lba += granularity - rem;
2002 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2003 unsigned int num, struct sdebug_dev_info *devip,
2004 u32 ei_lba)
2006 unsigned long iflags;
2007 int ret;
2009 ret = check_device_access_params(devip, lba, num);
2010 if (ret)
2011 return ret;
2013 /* DIX + T10 DIF */
2014 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2015 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2017 if (prot_ret) {
2018 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2019 return illegal_condition_result;
2023 write_lock_irqsave(&atomic_rw, iflags);
2024 ret = do_device_access(SCpnt, devip, lba, num, 1);
2025 if (scsi_debug_unmap_granularity)
2026 map_region(lba, num);
2027 write_unlock_irqrestore(&atomic_rw, iflags);
2028 if (-1 == ret)
2029 return (DID_ERROR << 16);
2030 else if ((ret < (num * scsi_debug_sector_size)) &&
2031 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2032 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2033 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2035 return 0;
2038 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2039 unsigned int num, struct sdebug_dev_info *devip,
2040 u32 ei_lba, unsigned int unmap)
2042 unsigned long iflags;
2043 unsigned long long i;
2044 int ret;
2046 ret = check_device_access_params(devip, lba, num);
2047 if (ret)
2048 return ret;
2050 write_lock_irqsave(&atomic_rw, iflags);
2052 if (unmap && scsi_debug_unmap_granularity) {
2053 unmap_region(lba, num);
2054 goto out;
2057 /* Else fetch one logical block */
2058 ret = fetch_to_dev_buffer(scmd,
2059 fake_storep + (lba * scsi_debug_sector_size),
2060 scsi_debug_sector_size);
2062 if (-1 == ret) {
2063 write_unlock_irqrestore(&atomic_rw, iflags);
2064 return (DID_ERROR << 16);
2065 } else if ((ret < (num * scsi_debug_sector_size)) &&
2066 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2067 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2068 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2070 /* Copy first sector to remaining blocks */
2071 for (i = 1 ; i < num ; i++)
2072 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2073 fake_storep + (lba * scsi_debug_sector_size),
2074 scsi_debug_sector_size);
2076 if (scsi_debug_unmap_granularity)
2077 map_region(lba, num);
2078 out:
2079 write_unlock_irqrestore(&atomic_rw, iflags);
2081 return 0;
2084 struct unmap_block_desc {
2085 __be64 lba;
2086 __be32 blocks;
2087 __be32 __reserved;
2090 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2092 unsigned char *buf;
2093 struct unmap_block_desc *desc;
2094 unsigned int i, payload_len, descriptors;
2095 int ret;
2097 ret = check_readiness(scmd, 1, devip);
2098 if (ret)
2099 return ret;
2101 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2102 BUG_ON(scsi_bufflen(scmd) != payload_len);
2104 descriptors = (payload_len - 8) / 16;
2106 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2107 if (!buf)
2108 return check_condition_result;
2110 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2112 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2113 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2115 desc = (void *)&buf[8];
2117 for (i = 0 ; i < descriptors ; i++) {
2118 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2119 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2121 ret = check_device_access_params(devip, lba, num);
2122 if (ret)
2123 goto out;
2125 unmap_region(lba, num);
2128 ret = 0;
2130 out:
2131 kfree(buf);
2133 return ret;
2136 #define SDEBUG_GET_LBA_STATUS_LEN 32
2138 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2139 struct sdebug_dev_info * devip)
2141 unsigned long long lba;
2142 unsigned int alloc_len, mapped, num;
2143 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2144 int ret;
2146 ret = check_readiness(scmd, 1, devip);
2147 if (ret)
2148 return ret;
2150 lba = get_unaligned_be64(&scmd->cmnd[2]);
2151 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2153 if (alloc_len < 24)
2154 return 0;
2156 ret = check_device_access_params(devip, lba, 1);
2157 if (ret)
2158 return ret;
2160 mapped = map_state(lba, &num);
2162 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2163 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2164 put_unaligned_be64(lba, &arr[8]); /* LBA */
2165 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2166 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2168 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2171 #define SDEBUG_RLUN_ARR_SZ 256
2173 static int resp_report_luns(struct scsi_cmnd * scp,
2174 struct sdebug_dev_info * devip)
2176 unsigned int alloc_len;
2177 int lun_cnt, i, upper, num, n, wlun, lun;
2178 unsigned char *cmd = (unsigned char *)scp->cmnd;
2179 int select_report = (int)cmd[2];
2180 struct scsi_lun *one_lun;
2181 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2182 unsigned char * max_addr;
2184 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2185 if ((alloc_len < 4) || (select_report > 2)) {
2186 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2188 return check_condition_result;
2190 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2191 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2192 lun_cnt = scsi_debug_max_luns;
2193 if (1 == select_report)
2194 lun_cnt = 0;
2195 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2196 --lun_cnt;
2197 wlun = (select_report > 0) ? 1 : 0;
2198 num = lun_cnt + wlun;
2199 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2200 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2201 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2202 sizeof(struct scsi_lun)), num);
2203 if (n < num) {
2204 wlun = 0;
2205 lun_cnt = n;
2207 one_lun = (struct scsi_lun *) &arr[8];
2208 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2209 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2210 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2211 i++, lun++) {
2212 upper = (lun >> 8) & 0x3f;
2213 if (upper)
2214 one_lun[i].scsi_lun[0] =
2215 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2216 one_lun[i].scsi_lun[1] = lun & 0xff;
2218 if (wlun) {
2219 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2220 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2221 i++;
2223 alloc_len = (unsigned char *)(one_lun + i) - arr;
2224 return fill_from_dev_buffer(scp, arr,
2225 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2228 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2229 unsigned int num, struct sdebug_dev_info *devip)
2231 int i, j, ret = -1;
2232 unsigned char *kaddr, *buf;
2233 unsigned int offset;
2234 struct scatterlist *sg;
2235 struct scsi_data_buffer *sdb = scsi_in(scp);
2237 /* better not to use temporary buffer. */
2238 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2239 if (!buf)
2240 return ret;
2242 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2244 offset = 0;
2245 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2246 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2247 if (!kaddr)
2248 goto out;
2250 for (j = 0; j < sg->length; j++)
2251 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2253 offset += sg->length;
2254 kunmap_atomic(kaddr, KM_USER0);
2256 ret = 0;
2257 out:
2258 kfree(buf);
2260 return ret;
2263 /* When timer goes off this function is called. */
2264 static void timer_intr_handler(unsigned long indx)
2266 struct sdebug_queued_cmd * sqcp;
2267 unsigned long iflags;
2269 if (indx >= SCSI_DEBUG_CANQUEUE) {
2270 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2271 "large\n");
2272 return;
2274 spin_lock_irqsave(&queued_arr_lock, iflags);
2275 sqcp = &queued_arr[(int)indx];
2276 if (! sqcp->in_use) {
2277 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2278 "interrupt\n");
2279 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2280 return;
2282 sqcp->in_use = 0;
2283 if (sqcp->done_funct) {
2284 sqcp->a_cmnd->result = sqcp->scsi_result;
2285 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2287 sqcp->done_funct = NULL;
2288 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2292 static struct sdebug_dev_info *
2293 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2295 struct sdebug_dev_info *devip;
2297 devip = kzalloc(sizeof(*devip), flags);
2298 if (devip) {
2299 devip->sdbg_host = sdbg_host;
2300 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2302 return devip;
2305 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2307 struct sdebug_host_info * sdbg_host;
2308 struct sdebug_dev_info * open_devip = NULL;
2309 struct sdebug_dev_info * devip =
2310 (struct sdebug_dev_info *)sdev->hostdata;
2312 if (devip)
2313 return devip;
2314 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2315 if (!sdbg_host) {
2316 printk(KERN_ERR "Host info NULL\n");
2317 return NULL;
2319 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2320 if ((devip->used) && (devip->channel == sdev->channel) &&
2321 (devip->target == sdev->id) &&
2322 (devip->lun == sdev->lun))
2323 return devip;
2324 else {
2325 if ((!devip->used) && (!open_devip))
2326 open_devip = devip;
2329 if (!open_devip) { /* try and make a new one */
2330 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2331 if (!open_devip) {
2332 printk(KERN_ERR "%s: out of memory at line %d\n",
2333 __func__, __LINE__);
2334 return NULL;
2338 open_devip->channel = sdev->channel;
2339 open_devip->target = sdev->id;
2340 open_devip->lun = sdev->lun;
2341 open_devip->sdbg_host = sdbg_host;
2342 open_devip->reset = 1;
2343 open_devip->used = 1;
2344 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2345 if (scsi_debug_dsense)
2346 open_devip->sense_buff[0] = 0x72;
2347 else {
2348 open_devip->sense_buff[0] = 0x70;
2349 open_devip->sense_buff[7] = 0xa;
2351 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2352 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2354 return open_devip;
2357 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2359 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2360 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2361 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2362 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2363 return 0;
2366 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2368 struct sdebug_dev_info *devip;
2370 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2371 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2372 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2373 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2374 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2375 devip = devInfoReg(sdp);
2376 if (NULL == devip)
2377 return 1; /* no resources, will be marked offline */
2378 sdp->hostdata = devip;
2379 if (sdp->host->cmd_per_lun)
2380 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2381 sdp->host->cmd_per_lun);
2382 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2383 return 0;
2386 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2388 struct sdebug_dev_info *devip =
2389 (struct sdebug_dev_info *)sdp->hostdata;
2391 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2392 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2393 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2394 if (devip) {
2395 /* make this slot avaliable for re-use */
2396 devip->used = 0;
2397 sdp->hostdata = NULL;
2401 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2402 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2404 unsigned long iflags;
2405 int k;
2406 struct sdebug_queued_cmd *sqcp;
2408 spin_lock_irqsave(&queued_arr_lock, iflags);
2409 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2410 sqcp = &queued_arr[k];
2411 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2412 del_timer_sync(&sqcp->cmnd_timer);
2413 sqcp->in_use = 0;
2414 sqcp->a_cmnd = NULL;
2415 break;
2418 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2419 return (k < SCSI_DEBUG_CANQUEUE) ? 1 : 0;
2422 /* Deletes (stops) timers of all queued commands */
2423 static void stop_all_queued(void)
2425 unsigned long iflags;
2426 int k;
2427 struct sdebug_queued_cmd *sqcp;
2429 spin_lock_irqsave(&queued_arr_lock, iflags);
2430 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2431 sqcp = &queued_arr[k];
2432 if (sqcp->in_use && sqcp->a_cmnd) {
2433 del_timer_sync(&sqcp->cmnd_timer);
2434 sqcp->in_use = 0;
2435 sqcp->a_cmnd = NULL;
2438 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2441 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2443 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2444 printk(KERN_INFO "scsi_debug: abort\n");
2445 ++num_aborts;
2446 stop_queued_cmnd(SCpnt);
2447 return SUCCESS;
2450 static int scsi_debug_biosparam(struct scsi_device *sdev,
2451 struct block_device * bdev, sector_t capacity, int *info)
2453 int res;
2454 unsigned char *buf;
2456 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2457 printk(KERN_INFO "scsi_debug: biosparam\n");
2458 buf = scsi_bios_ptable(bdev);
2459 if (buf) {
2460 res = scsi_partsize(buf, capacity,
2461 &info[2], &info[0], &info[1]);
2462 kfree(buf);
2463 if (! res)
2464 return res;
2466 info[0] = sdebug_heads;
2467 info[1] = sdebug_sectors_per;
2468 info[2] = sdebug_cylinders_per;
2469 return 0;
2472 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2474 struct sdebug_dev_info * devip;
2476 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2477 printk(KERN_INFO "scsi_debug: device_reset\n");
2478 ++num_dev_resets;
2479 if (SCpnt) {
2480 devip = devInfoReg(SCpnt->device);
2481 if (devip)
2482 devip->reset = 1;
2484 return SUCCESS;
2487 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2489 struct sdebug_host_info *sdbg_host;
2490 struct sdebug_dev_info * dev_info;
2491 struct scsi_device * sdp;
2492 struct Scsi_Host * hp;
2494 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2495 printk(KERN_INFO "scsi_debug: bus_reset\n");
2496 ++num_bus_resets;
2497 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2498 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2499 if (sdbg_host) {
2500 list_for_each_entry(dev_info,
2501 &sdbg_host->dev_info_list,
2502 dev_list)
2503 dev_info->reset = 1;
2506 return SUCCESS;
2509 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2511 struct sdebug_host_info * sdbg_host;
2512 struct sdebug_dev_info * dev_info;
2514 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2515 printk(KERN_INFO "scsi_debug: host_reset\n");
2516 ++num_host_resets;
2517 spin_lock(&sdebug_host_list_lock);
2518 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2519 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2520 dev_list)
2521 dev_info->reset = 1;
2523 spin_unlock(&sdebug_host_list_lock);
2524 stop_all_queued();
2525 return SUCCESS;
2528 /* Initializes timers in queued array */
2529 static void __init init_all_queued(void)
2531 unsigned long iflags;
2532 int k;
2533 struct sdebug_queued_cmd * sqcp;
2535 spin_lock_irqsave(&queued_arr_lock, iflags);
2536 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2537 sqcp = &queued_arr[k];
2538 init_timer(&sqcp->cmnd_timer);
2539 sqcp->in_use = 0;
2540 sqcp->a_cmnd = NULL;
2542 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2545 static void __init sdebug_build_parts(unsigned char *ramp,
2546 unsigned long store_size)
2548 struct partition * pp;
2549 int starts[SDEBUG_MAX_PARTS + 2];
2550 int sectors_per_part, num_sectors, k;
2551 int heads_by_sects, start_sec, end_sec;
2553 /* assume partition table already zeroed */
2554 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2555 return;
2556 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2557 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2558 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2559 "partitions to %d\n", SDEBUG_MAX_PARTS);
2561 num_sectors = (int)sdebug_store_sectors;
2562 sectors_per_part = (num_sectors - sdebug_sectors_per)
2563 / scsi_debug_num_parts;
2564 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2565 starts[0] = sdebug_sectors_per;
2566 for (k = 1; k < scsi_debug_num_parts; ++k)
2567 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2568 * heads_by_sects;
2569 starts[scsi_debug_num_parts] = num_sectors;
2570 starts[scsi_debug_num_parts + 1] = 0;
2572 ramp[510] = 0x55; /* magic partition markings */
2573 ramp[511] = 0xAA;
2574 pp = (struct partition *)(ramp + 0x1be);
2575 for (k = 0; starts[k + 1]; ++k, ++pp) {
2576 start_sec = starts[k];
2577 end_sec = starts[k + 1] - 1;
2578 pp->boot_ind = 0;
2580 pp->cyl = start_sec / heads_by_sects;
2581 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2582 / sdebug_sectors_per;
2583 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2585 pp->end_cyl = end_sec / heads_by_sects;
2586 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2587 / sdebug_sectors_per;
2588 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2590 pp->start_sect = start_sec;
2591 pp->nr_sects = end_sec - start_sec + 1;
2592 pp->sys_ind = 0x83; /* plain Linux partition */
2596 static int schedule_resp(struct scsi_cmnd * cmnd,
2597 struct sdebug_dev_info * devip,
2598 done_funct_t done, int scsi_result, int delta_jiff)
2600 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2601 if (scsi_result) {
2602 struct scsi_device * sdp = cmnd->device;
2604 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2605 "non-zero result=0x%x\n", sdp->host->host_no,
2606 sdp->channel, sdp->id, sdp->lun, scsi_result);
2609 if (cmnd && devip) {
2610 /* simulate autosense by this driver */
2611 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2612 memcpy(cmnd->sense_buffer, devip->sense_buff,
2613 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2614 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2616 if (delta_jiff <= 0) {
2617 if (cmnd)
2618 cmnd->result = scsi_result;
2619 if (done)
2620 done(cmnd);
2621 return 0;
2622 } else {
2623 unsigned long iflags;
2624 int k;
2625 struct sdebug_queued_cmd * sqcp = NULL;
2627 spin_lock_irqsave(&queued_arr_lock, iflags);
2628 for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
2629 sqcp = &queued_arr[k];
2630 if (! sqcp->in_use)
2631 break;
2633 if (k >= SCSI_DEBUG_CANQUEUE) {
2634 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2635 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2636 return 1; /* report busy to mid level */
2638 sqcp->in_use = 1;
2639 sqcp->a_cmnd = cmnd;
2640 sqcp->scsi_result = scsi_result;
2641 sqcp->done_funct = done;
2642 sqcp->cmnd_timer.function = timer_intr_handler;
2643 sqcp->cmnd_timer.data = k;
2644 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2645 add_timer(&sqcp->cmnd_timer);
2646 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2647 if (cmnd)
2648 cmnd->result = 0;
2649 return 0;
2652 /* Note: The following macros create attribute files in the
2653 /sys/module/scsi_debug/parameters directory. Unfortunately this
2654 driver is unaware of a change and cannot trigger auxiliary actions
2655 as it can when the corresponding attribute in the
2656 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2658 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2659 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2660 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2661 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2662 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2663 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2664 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2665 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2666 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2667 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2668 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2669 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2670 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2671 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2672 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2673 S_IRUGO | S_IWUSR);
2674 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2675 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2676 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2677 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2678 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2679 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2680 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2681 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2682 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2683 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2684 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2686 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2687 MODULE_DESCRIPTION("SCSI debug adapter driver");
2688 MODULE_LICENSE("GPL");
2689 MODULE_VERSION(SCSI_DEBUG_VERSION);
2691 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2692 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2693 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2694 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2695 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2696 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2697 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2698 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2699 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2700 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2701 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2702 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2703 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2704 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2705 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2706 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2707 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2708 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2709 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2710 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2711 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2712 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2713 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0)");
2714 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=0)");
2715 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=0)");
2716 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2718 static char sdebug_info[256];
2720 static const char * scsi_debug_info(struct Scsi_Host * shp)
2722 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2723 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2724 scsi_debug_version_date, scsi_debug_dev_size_mb,
2725 scsi_debug_opts);
2726 return sdebug_info;
2729 /* scsi_debug_proc_info
2730 * Used if the driver currently has no own support for /proc/scsi
2732 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2733 int length, int inout)
2735 int len, pos, begin;
2736 int orig_length;
2738 orig_length = length;
2740 if (inout == 1) {
2741 char arr[16];
2742 int minLen = length > 15 ? 15 : length;
2744 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2745 return -EACCES;
2746 memcpy(arr, buffer, minLen);
2747 arr[minLen] = '\0';
2748 if (1 != sscanf(arr, "%d", &pos))
2749 return -EINVAL;
2750 scsi_debug_opts = pos;
2751 if (scsi_debug_every_nth != 0)
2752 scsi_debug_cmnd_count = 0;
2753 return length;
2755 begin = 0;
2756 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2757 "%s [%s]\n"
2758 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2759 "every_nth=%d(curr:%d)\n"
2760 "delay=%d, max_luns=%d, scsi_level=%d\n"
2761 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2762 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2763 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2764 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2765 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2766 scsi_debug_cmnd_count, scsi_debug_delay,
2767 scsi_debug_max_luns, scsi_debug_scsi_level,
2768 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2769 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2770 num_host_resets, dix_reads, dix_writes, dif_errors);
2771 if (pos < offset) {
2772 len = 0;
2773 begin = pos;
2775 *start = buffer + (offset - begin); /* Start of wanted data */
2776 len -= (offset - begin);
2777 if (len > length)
2778 len = length;
2779 return len;
2782 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2784 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2787 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2788 const char * buf, size_t count)
2790 int delay;
2791 char work[20];
2793 if (1 == sscanf(buf, "%10s", work)) {
2794 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2795 scsi_debug_delay = delay;
2796 return count;
2799 return -EINVAL;
2801 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2802 sdebug_delay_store);
2804 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2806 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2809 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2810 const char * buf, size_t count)
2812 int opts;
2813 char work[20];
2815 if (1 == sscanf(buf, "%10s", work)) {
2816 if (0 == strnicmp(work,"0x", 2)) {
2817 if (1 == sscanf(&work[2], "%x", &opts))
2818 goto opts_done;
2819 } else {
2820 if (1 == sscanf(work, "%d", &opts))
2821 goto opts_done;
2824 return -EINVAL;
2825 opts_done:
2826 scsi_debug_opts = opts;
2827 scsi_debug_cmnd_count = 0;
2828 return count;
2830 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2831 sdebug_opts_store);
2833 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2835 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2837 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2838 const char * buf, size_t count)
2840 int n;
2842 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2843 scsi_debug_ptype = n;
2844 return count;
2846 return -EINVAL;
2848 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2850 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2852 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2854 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2855 const char * buf, size_t count)
2857 int n;
2859 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2860 scsi_debug_dsense = n;
2861 return count;
2863 return -EINVAL;
2865 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2866 sdebug_dsense_store);
2868 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2870 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2872 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2873 const char * buf, size_t count)
2875 int n;
2877 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2878 scsi_debug_fake_rw = n;
2879 return count;
2881 return -EINVAL;
2883 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2884 sdebug_fake_rw_store);
2886 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2888 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2890 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2891 const char * buf, size_t count)
2893 int n;
2895 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2896 scsi_debug_no_lun_0 = n;
2897 return count;
2899 return -EINVAL;
2901 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2902 sdebug_no_lun_0_store);
2904 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2906 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2908 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2909 const char * buf, size_t count)
2911 int n;
2913 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2914 scsi_debug_num_tgts = n;
2915 sdebug_max_tgts_luns();
2916 return count;
2918 return -EINVAL;
2920 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2921 sdebug_num_tgts_store);
2923 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
2925 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
2927 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
2929 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
2931 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
2933 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
2935 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
2937 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
2939 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
2940 const char * buf, size_t count)
2942 int nth;
2944 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
2945 scsi_debug_every_nth = nth;
2946 scsi_debug_cmnd_count = 0;
2947 return count;
2949 return -EINVAL;
2951 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
2952 sdebug_every_nth_store);
2954 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
2956 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
2958 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
2959 const char * buf, size_t count)
2961 int n;
2963 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2964 scsi_debug_max_luns = n;
2965 sdebug_max_tgts_luns();
2966 return count;
2968 return -EINVAL;
2970 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
2971 sdebug_max_luns_store);
2973 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
2975 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
2977 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
2979 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
2981 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
2983 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
2984 const char * buf, size_t count)
2986 int n;
2988 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2989 scsi_debug_virtual_gb = n;
2991 sdebug_capacity = get_sdebug_capacity();
2993 return count;
2995 return -EINVAL;
2997 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
2998 sdebug_virtual_gb_store);
3000 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3002 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3005 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3006 const char * buf, size_t count)
3008 int delta_hosts;
3010 if (sscanf(buf, "%d", &delta_hosts) != 1)
3011 return -EINVAL;
3012 if (delta_hosts > 0) {
3013 do {
3014 sdebug_add_adapter();
3015 } while (--delta_hosts);
3016 } else if (delta_hosts < 0) {
3017 do {
3018 sdebug_remove_adapter();
3019 } while (++delta_hosts);
3021 return count;
3023 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3024 sdebug_add_host_store);
3026 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3027 char * buf)
3029 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3031 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3032 const char * buf, size_t count)
3034 int n;
3036 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3037 scsi_debug_vpd_use_hostno = n;
3038 return count;
3040 return -EINVAL;
3042 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3043 sdebug_vpd_use_hostno_store);
3045 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3047 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3049 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3051 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3053 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3055 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3057 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3059 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3061 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3063 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3065 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3067 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3069 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3071 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3073 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3075 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3077 ssize_t count;
3079 if (scsi_debug_unmap_granularity == 0)
3080 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3081 sdebug_store_sectors);
3083 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3085 buf[count++] = '\n';
3086 buf[count++] = 0;
3088 return count;
3090 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3093 /* Note: The following function creates attribute files in the
3094 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3095 files (over those found in the /sys/module/scsi_debug/parameters
3096 directory) is that auxiliary actions can be triggered when an attribute
3097 is changed. For example see: sdebug_add_host_store() above.
3099 static int do_create_driverfs_files(void)
3101 int ret;
3103 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3104 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3105 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3106 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3107 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3108 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3109 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3110 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3111 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3112 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3113 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3114 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3115 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3116 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3117 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3118 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3119 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3120 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3121 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3122 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3123 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3124 return ret;
3127 static void do_remove_driverfs_files(void)
3129 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3130 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3131 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3132 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3133 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3134 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3135 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3136 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3137 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3138 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3139 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3140 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3141 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3142 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3143 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3144 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3145 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3146 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3147 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3148 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3149 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3152 static void pseudo_0_release(struct device *dev)
3154 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3155 printk(KERN_INFO "scsi_debug: pseudo_0_release() called\n");
3158 static struct device pseudo_primary = {
3159 .init_name = "pseudo_0",
3160 .release = pseudo_0_release,
3163 static int __init scsi_debug_init(void)
3165 unsigned long sz;
3166 int host_to_add;
3167 int k;
3168 int ret;
3170 switch (scsi_debug_sector_size) {
3171 case 512:
3172 case 1024:
3173 case 2048:
3174 case 4096:
3175 break;
3176 default:
3177 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3178 scsi_debug_sector_size);
3179 return -EINVAL;
3182 switch (scsi_debug_dif) {
3184 case SD_DIF_TYPE0_PROTECTION:
3185 case SD_DIF_TYPE1_PROTECTION:
3186 case SD_DIF_TYPE2_PROTECTION:
3187 case SD_DIF_TYPE3_PROTECTION:
3188 break;
3190 default:
3191 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3192 return -EINVAL;
3195 if (scsi_debug_guard > 1) {
3196 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3197 return -EINVAL;
3200 if (scsi_debug_ato > 1) {
3201 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3202 return -EINVAL;
3205 if (scsi_debug_physblk_exp > 15) {
3206 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3207 scsi_debug_physblk_exp);
3208 return -EINVAL;
3211 if (scsi_debug_lowest_aligned > 0x3fff) {
3212 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3213 scsi_debug_lowest_aligned);
3214 return -EINVAL;
3217 if (scsi_debug_dev_size_mb < 1)
3218 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3219 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3220 sdebug_store_sectors = sz / scsi_debug_sector_size;
3221 sdebug_capacity = get_sdebug_capacity();
3223 /* play around with geometry, don't waste too much on track 0 */
3224 sdebug_heads = 8;
3225 sdebug_sectors_per = 32;
3226 if (scsi_debug_dev_size_mb >= 16)
3227 sdebug_heads = 32;
3228 else if (scsi_debug_dev_size_mb >= 256)
3229 sdebug_heads = 64;
3230 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3231 (sdebug_sectors_per * sdebug_heads);
3232 if (sdebug_cylinders_per >= 1024) {
3233 /* other LLDs do this; implies >= 1GB ram disk ... */
3234 sdebug_heads = 255;
3235 sdebug_sectors_per = 63;
3236 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3237 (sdebug_sectors_per * sdebug_heads);
3240 fake_storep = vmalloc(sz);
3241 if (NULL == fake_storep) {
3242 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3243 return -ENOMEM;
3245 memset(fake_storep, 0, sz);
3246 if (scsi_debug_num_parts > 0)
3247 sdebug_build_parts(fake_storep, sz);
3249 if (scsi_debug_dif) {
3250 int dif_size;
3252 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3253 dif_storep = vmalloc(dif_size);
3255 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3256 dif_size, dif_storep);
3258 if (dif_storep == NULL) {
3259 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3260 ret = -ENOMEM;
3261 goto free_vm;
3264 memset(dif_storep, 0xff, dif_size);
3267 if (scsi_debug_unmap_granularity) {
3268 unsigned int map_bytes;
3270 if (scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3271 printk(KERN_ERR
3272 "%s: ERR: unmap_granularity < unmap_alignment\n",
3273 __func__);
3274 return -EINVAL;
3277 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3278 map_bytes = map_size >> 3;
3279 map_storep = vmalloc(map_bytes);
3281 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3282 map_size);
3284 if (map_storep == NULL) {
3285 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3286 ret = -ENOMEM;
3287 goto free_vm;
3290 memset(map_storep, 0x0, map_bytes);
3292 /* Map first 1KB for partition table */
3293 if (scsi_debug_num_parts)
3294 map_region(0, 2);
3297 ret = device_register(&pseudo_primary);
3298 if (ret < 0) {
3299 printk(KERN_WARNING "scsi_debug: device_register error: %d\n",
3300 ret);
3301 goto free_vm;
3303 ret = bus_register(&pseudo_lld_bus);
3304 if (ret < 0) {
3305 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3306 ret);
3307 goto dev_unreg;
3309 ret = driver_register(&sdebug_driverfs_driver);
3310 if (ret < 0) {
3311 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3312 ret);
3313 goto bus_unreg;
3315 ret = do_create_driverfs_files();
3316 if (ret < 0) {
3317 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3318 ret);
3319 goto del_files;
3322 init_all_queued();
3324 host_to_add = scsi_debug_add_host;
3325 scsi_debug_add_host = 0;
3327 for (k = 0; k < host_to_add; k++) {
3328 if (sdebug_add_adapter()) {
3329 printk(KERN_ERR "scsi_debug_init: "
3330 "sdebug_add_adapter failed k=%d\n", k);
3331 break;
3335 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3336 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3337 scsi_debug_add_host);
3339 return 0;
3341 del_files:
3342 do_remove_driverfs_files();
3343 driver_unregister(&sdebug_driverfs_driver);
3344 bus_unreg:
3345 bus_unregister(&pseudo_lld_bus);
3346 dev_unreg:
3347 device_unregister(&pseudo_primary);
3348 free_vm:
3349 if (map_storep)
3350 vfree(map_storep);
3351 if (dif_storep)
3352 vfree(dif_storep);
3353 vfree(fake_storep);
3355 return ret;
3358 static void __exit scsi_debug_exit(void)
3360 int k = scsi_debug_add_host;
3362 stop_all_queued();
3363 for (; k; k--)
3364 sdebug_remove_adapter();
3365 do_remove_driverfs_files();
3366 driver_unregister(&sdebug_driverfs_driver);
3367 bus_unregister(&pseudo_lld_bus);
3368 device_unregister(&pseudo_primary);
3370 if (dif_storep)
3371 vfree(dif_storep);
3373 vfree(fake_storep);
3376 device_initcall(scsi_debug_init);
3377 module_exit(scsi_debug_exit);
3379 static void sdebug_release_adapter(struct device * dev)
3381 struct sdebug_host_info *sdbg_host;
3383 sdbg_host = to_sdebug_host(dev);
3384 kfree(sdbg_host);
3387 static int sdebug_add_adapter(void)
3389 int k, devs_per_host;
3390 int error = 0;
3391 struct sdebug_host_info *sdbg_host;
3392 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3394 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3395 if (NULL == sdbg_host) {
3396 printk(KERN_ERR "%s: out of memory at line %d\n",
3397 __func__, __LINE__);
3398 return -ENOMEM;
3401 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3403 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3404 for (k = 0; k < devs_per_host; k++) {
3405 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3406 if (!sdbg_devinfo) {
3407 printk(KERN_ERR "%s: out of memory at line %d\n",
3408 __func__, __LINE__);
3409 error = -ENOMEM;
3410 goto clean;
3414 spin_lock(&sdebug_host_list_lock);
3415 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3416 spin_unlock(&sdebug_host_list_lock);
3418 sdbg_host->dev.bus = &pseudo_lld_bus;
3419 sdbg_host->dev.parent = &pseudo_primary;
3420 sdbg_host->dev.release = &sdebug_release_adapter;
3421 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3423 error = device_register(&sdbg_host->dev);
3425 if (error)
3426 goto clean;
3428 ++scsi_debug_add_host;
3429 return error;
3431 clean:
3432 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3433 dev_list) {
3434 list_del(&sdbg_devinfo->dev_list);
3435 kfree(sdbg_devinfo);
3438 kfree(sdbg_host);
3439 return error;
3442 static void sdebug_remove_adapter(void)
3444 struct sdebug_host_info * sdbg_host = NULL;
3446 spin_lock(&sdebug_host_list_lock);
3447 if (!list_empty(&sdebug_host_list)) {
3448 sdbg_host = list_entry(sdebug_host_list.prev,
3449 struct sdebug_host_info, host_list);
3450 list_del(&sdbg_host->host_list);
3452 spin_unlock(&sdebug_host_list_lock);
3454 if (!sdbg_host)
3455 return;
3457 device_unregister(&sdbg_host->dev);
3458 --scsi_debug_add_host;
3461 static
3462 int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done)
3464 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3465 int len, k;
3466 unsigned int num;
3467 unsigned long long lba;
3468 u32 ei_lba;
3469 int errsts = 0;
3470 int target = SCpnt->device->id;
3471 struct sdebug_dev_info *devip = NULL;
3472 int inj_recovered = 0;
3473 int inj_transport = 0;
3474 int inj_dif = 0;
3475 int inj_dix = 0;
3476 int delay_override = 0;
3477 int unmap = 0;
3479 scsi_set_resid(SCpnt, 0);
3480 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3481 printk(KERN_INFO "scsi_debug: cmd ");
3482 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3483 printk("%02x ", (int)cmd[k]);
3484 printk("\n");
3487 if (target == SCpnt->device->host->hostt->this_id) {
3488 printk(KERN_INFO "scsi_debug: initiator's id used as "
3489 "target!\n");
3490 return schedule_resp(SCpnt, NULL, done,
3491 DID_NO_CONNECT << 16, 0);
3494 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3495 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3496 return schedule_resp(SCpnt, NULL, done,
3497 DID_NO_CONNECT << 16, 0);
3498 devip = devInfoReg(SCpnt->device);
3499 if (NULL == devip)
3500 return schedule_resp(SCpnt, NULL, done,
3501 DID_NO_CONNECT << 16, 0);
3503 if ((scsi_debug_every_nth != 0) &&
3504 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3505 scsi_debug_cmnd_count = 0;
3506 if (scsi_debug_every_nth < -1)
3507 scsi_debug_every_nth = -1;
3508 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3509 return 0; /* ignore command causing timeout */
3510 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3511 inj_recovered = 1; /* to reads and writes below */
3512 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3513 inj_transport = 1; /* to reads and writes below */
3514 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3515 inj_dif = 1; /* to reads and writes below */
3516 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3517 inj_dix = 1; /* to reads and writes below */
3520 if (devip->wlun) {
3521 switch (*cmd) {
3522 case INQUIRY:
3523 case REQUEST_SENSE:
3524 case TEST_UNIT_READY:
3525 case REPORT_LUNS:
3526 break; /* only allowable wlun commands */
3527 default:
3528 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3529 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3530 "not supported for wlun\n", *cmd);
3531 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3532 INVALID_OPCODE, 0);
3533 errsts = check_condition_result;
3534 return schedule_resp(SCpnt, devip, done, errsts,
3539 switch (*cmd) {
3540 case INQUIRY: /* mandatory, ignore unit attention */
3541 delay_override = 1;
3542 errsts = resp_inquiry(SCpnt, target, devip);
3543 break;
3544 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3545 delay_override = 1;
3546 errsts = resp_requests(SCpnt, devip);
3547 break;
3548 case REZERO_UNIT: /* actually this is REWIND for SSC */
3549 case START_STOP:
3550 errsts = resp_start_stop(SCpnt, devip);
3551 break;
3552 case ALLOW_MEDIUM_REMOVAL:
3553 errsts = check_readiness(SCpnt, 1, devip);
3554 if (errsts)
3555 break;
3556 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3557 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3558 cmd[4] ? "inhibited" : "enabled");
3559 break;
3560 case SEND_DIAGNOSTIC: /* mandatory */
3561 errsts = check_readiness(SCpnt, 1, devip);
3562 break;
3563 case TEST_UNIT_READY: /* mandatory */
3564 delay_override = 1;
3565 errsts = check_readiness(SCpnt, 0, devip);
3566 break;
3567 case RESERVE:
3568 errsts = check_readiness(SCpnt, 1, devip);
3569 break;
3570 case RESERVE_10:
3571 errsts = check_readiness(SCpnt, 1, devip);
3572 break;
3573 case RELEASE:
3574 errsts = check_readiness(SCpnt, 1, devip);
3575 break;
3576 case RELEASE_10:
3577 errsts = check_readiness(SCpnt, 1, devip);
3578 break;
3579 case READ_CAPACITY:
3580 errsts = resp_readcap(SCpnt, devip);
3581 break;
3582 case SERVICE_ACTION_IN:
3583 if (cmd[1] == SAI_READ_CAPACITY_16)
3584 errsts = resp_readcap16(SCpnt, devip);
3585 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3587 if (scsi_debug_unmap_max_desc == 0) {
3588 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3589 INVALID_COMMAND_OPCODE, 0);
3590 errsts = check_condition_result;
3591 } else
3592 errsts = resp_get_lba_status(SCpnt, devip);
3593 } else {
3594 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3595 INVALID_OPCODE, 0);
3596 errsts = check_condition_result;
3598 break;
3599 case MAINTENANCE_IN:
3600 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3601 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3602 INVALID_OPCODE, 0);
3603 errsts = check_condition_result;
3604 break;
3606 errsts = resp_report_tgtpgs(SCpnt, devip);
3607 break;
3608 case READ_16:
3609 case READ_12:
3610 case READ_10:
3611 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3612 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3613 cmd[1] & 0xe0) {
3614 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3615 INVALID_COMMAND_OPCODE, 0);
3616 errsts = check_condition_result;
3617 break;
3620 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3621 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3622 (cmd[1] & 0xe0) == 0)
3623 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3625 /* fall through */
3626 case READ_6:
3627 read:
3628 errsts = check_readiness(SCpnt, 0, devip);
3629 if (errsts)
3630 break;
3631 if (scsi_debug_fake_rw)
3632 break;
3633 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3634 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3635 if (inj_recovered && (0 == errsts)) {
3636 mk_sense_buffer(devip, RECOVERED_ERROR,
3637 THRESHOLD_EXCEEDED, 0);
3638 errsts = check_condition_result;
3639 } else if (inj_transport && (0 == errsts)) {
3640 mk_sense_buffer(devip, ABORTED_COMMAND,
3641 TRANSPORT_PROBLEM, ACK_NAK_TO);
3642 errsts = check_condition_result;
3643 } else if (inj_dif && (0 == errsts)) {
3644 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3645 errsts = illegal_condition_result;
3646 } else if (inj_dix && (0 == errsts)) {
3647 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3648 errsts = illegal_condition_result;
3650 break;
3651 case REPORT_LUNS: /* mandatory, ignore unit attention */
3652 delay_override = 1;
3653 errsts = resp_report_luns(SCpnt, devip);
3654 break;
3655 case VERIFY: /* 10 byte SBC-2 command */
3656 errsts = check_readiness(SCpnt, 0, devip);
3657 break;
3658 case WRITE_16:
3659 case WRITE_12:
3660 case WRITE_10:
3661 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3662 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3663 cmd[1] & 0xe0) {
3664 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3665 INVALID_COMMAND_OPCODE, 0);
3666 errsts = check_condition_result;
3667 break;
3670 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3671 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3672 (cmd[1] & 0xe0) == 0)
3673 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3675 /* fall through */
3676 case WRITE_6:
3677 write:
3678 errsts = check_readiness(SCpnt, 0, devip);
3679 if (errsts)
3680 break;
3681 if (scsi_debug_fake_rw)
3682 break;
3683 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3684 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3685 if (inj_recovered && (0 == errsts)) {
3686 mk_sense_buffer(devip, RECOVERED_ERROR,
3687 THRESHOLD_EXCEEDED, 0);
3688 errsts = check_condition_result;
3689 } else if (inj_dif && (0 == errsts)) {
3690 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3691 errsts = illegal_condition_result;
3692 } else if (inj_dix && (0 == errsts)) {
3693 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3694 errsts = illegal_condition_result;
3696 break;
3697 case WRITE_SAME_16:
3698 if (cmd[1] & 0x8)
3699 unmap = 1;
3700 /* fall through */
3701 case WRITE_SAME:
3702 errsts = check_readiness(SCpnt, 0, devip);
3703 if (errsts)
3704 break;
3705 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3706 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3707 break;
3708 case UNMAP:
3709 errsts = check_readiness(SCpnt, 0, devip);
3710 if (errsts)
3711 break;
3713 if (scsi_debug_unmap_max_desc == 0) {
3714 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3715 INVALID_COMMAND_OPCODE, 0);
3716 errsts = check_condition_result;
3717 } else
3718 errsts = resp_unmap(SCpnt, devip);
3719 break;
3720 case MODE_SENSE:
3721 case MODE_SENSE_10:
3722 errsts = resp_mode_sense(SCpnt, target, devip);
3723 break;
3724 case MODE_SELECT:
3725 errsts = resp_mode_select(SCpnt, 1, devip);
3726 break;
3727 case MODE_SELECT_10:
3728 errsts = resp_mode_select(SCpnt, 0, devip);
3729 break;
3730 case LOG_SENSE:
3731 errsts = resp_log_sense(SCpnt, devip);
3732 break;
3733 case SYNCHRONIZE_CACHE:
3734 delay_override = 1;
3735 errsts = check_readiness(SCpnt, 0, devip);
3736 break;
3737 case WRITE_BUFFER:
3738 errsts = check_readiness(SCpnt, 1, devip);
3739 break;
3740 case XDWRITEREAD_10:
3741 if (!scsi_bidi_cmnd(SCpnt)) {
3742 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3743 INVALID_FIELD_IN_CDB, 0);
3744 errsts = check_condition_result;
3745 break;
3748 errsts = check_readiness(SCpnt, 0, devip);
3749 if (errsts)
3750 break;
3751 if (scsi_debug_fake_rw)
3752 break;
3753 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3754 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3755 if (errsts)
3756 break;
3757 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3758 if (errsts)
3759 break;
3760 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3761 break;
3762 case VARIABLE_LENGTH_CMD:
3763 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3765 if ((cmd[10] & 0xe0) == 0)
3766 printk(KERN_ERR
3767 "Unprotected RD/WR to DIF device\n");
3769 if (cmd[9] == READ_32) {
3770 BUG_ON(SCpnt->cmd_len < 32);
3771 goto read;
3774 if (cmd[9] == WRITE_32) {
3775 BUG_ON(SCpnt->cmd_len < 32);
3776 goto write;
3780 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3781 INVALID_FIELD_IN_CDB, 0);
3782 errsts = check_condition_result;
3783 break;
3785 default:
3786 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3787 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3788 "supported\n", *cmd);
3789 errsts = check_readiness(SCpnt, 1, devip);
3790 if (errsts)
3791 break; /* Unit attention takes precedence */
3792 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3793 errsts = check_condition_result;
3794 break;
3796 return schedule_resp(SCpnt, devip, done, errsts,
3797 (delay_override ? 0 : scsi_debug_delay));
3800 static struct scsi_host_template sdebug_driver_template = {
3801 .proc_info = scsi_debug_proc_info,
3802 .proc_name = sdebug_proc_name,
3803 .name = "SCSI DEBUG",
3804 .info = scsi_debug_info,
3805 .slave_alloc = scsi_debug_slave_alloc,
3806 .slave_configure = scsi_debug_slave_configure,
3807 .slave_destroy = scsi_debug_slave_destroy,
3808 .ioctl = scsi_debug_ioctl,
3809 .queuecommand = scsi_debug_queuecommand,
3810 .eh_abort_handler = scsi_debug_abort,
3811 .eh_bus_reset_handler = scsi_debug_bus_reset,
3812 .eh_device_reset_handler = scsi_debug_device_reset,
3813 .eh_host_reset_handler = scsi_debug_host_reset,
3814 .bios_param = scsi_debug_biosparam,
3815 .can_queue = SCSI_DEBUG_CANQUEUE,
3816 .this_id = 7,
3817 .sg_tablesize = 256,
3818 .cmd_per_lun = 16,
3819 .max_sectors = 0xffff,
3820 .use_clustering = DISABLE_CLUSTERING,
3821 .module = THIS_MODULE,
3824 static int sdebug_driver_probe(struct device * dev)
3826 int error = 0;
3827 struct sdebug_host_info *sdbg_host;
3828 struct Scsi_Host *hpnt;
3829 int host_prot;
3831 sdbg_host = to_sdebug_host(dev);
3833 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3834 if (NULL == hpnt) {
3835 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3836 error = -ENODEV;
3837 return error;
3840 sdbg_host->shost = hpnt;
3841 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3842 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3843 hpnt->max_id = scsi_debug_num_tgts + 1;
3844 else
3845 hpnt->max_id = scsi_debug_num_tgts;
3846 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3848 host_prot = 0;
3850 switch (scsi_debug_dif) {
3852 case SD_DIF_TYPE1_PROTECTION:
3853 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3854 if (scsi_debug_dix)
3855 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3856 break;
3858 case SD_DIF_TYPE2_PROTECTION:
3859 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3860 if (scsi_debug_dix)
3861 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3862 break;
3864 case SD_DIF_TYPE3_PROTECTION:
3865 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3866 if (scsi_debug_dix)
3867 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3868 break;
3870 default:
3871 if (scsi_debug_dix)
3872 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3873 break;
3876 scsi_host_set_prot(hpnt, host_prot);
3878 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3879 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3880 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3881 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3882 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3883 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3884 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3885 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3887 if (scsi_debug_guard == 1)
3888 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3889 else
3890 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3892 error = scsi_add_host(hpnt, &sdbg_host->dev);
3893 if (error) {
3894 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3895 error = -ENODEV;
3896 scsi_host_put(hpnt);
3897 } else
3898 scsi_scan_host(hpnt);
3901 return error;
3904 static int sdebug_driver_remove(struct device * dev)
3906 struct sdebug_host_info *sdbg_host;
3907 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3909 sdbg_host = to_sdebug_host(dev);
3911 if (!sdbg_host) {
3912 printk(KERN_ERR "%s: Unable to locate host info\n",
3913 __func__);
3914 return -ENODEV;
3917 scsi_remove_host(sdbg_host->shost);
3919 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3920 dev_list) {
3921 list_del(&sdbg_devinfo->dev_list);
3922 kfree(sdbg_devinfo);
3925 scsi_host_put(sdbg_host->shost);
3926 return 0;
3929 static int pseudo_lld_bus_match(struct device *dev,
3930 struct device_driver *dev_driver)
3932 return 1;
3935 static struct bus_type pseudo_lld_bus = {
3936 .name = "pseudo",
3937 .match = pseudo_lld_bus_match,
3938 .probe = sdebug_driver_probe,
3939 .remove = sdebug_driver_remove,