[SCSI] scsi_debug: Fix 32-bit overflow in do_device_access causing memory corruption
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / scsi_debug.c
bloba6b2d72022fc7a4aec861f7610eecd3176094a7a
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
58 #include "sd.h"
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92 #define DEF_DELAY 1
93 #define DEF_DEV_SIZE_MB 8
94 #define DEF_EVERY_NTH 0
95 #define DEF_NUM_PARTS 0
96 #define DEF_OPTS 0
97 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
98 #define DEF_PTYPE 0
99 #define DEF_D_SENSE 0
100 #define DEF_NO_LUN_0 0
101 #define DEF_VIRTUAL_GB 0
102 #define DEF_FAKE_RW 0
103 #define DEF_VPD_USE_HOSTNO 1
104 #define DEF_SECTOR_SIZE 512
105 #define DEF_DIX 0
106 #define DEF_DIF 0
107 #define DEF_GUARD 0
108 #define DEF_ATO 1
109 #define DEF_PHYSBLK_EXP 0
110 #define DEF_LOWEST_ALIGNED 0
111 #define DEF_OPT_BLKS 64
112 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
113 #define DEF_UNMAP_MAX_DESC 256
114 #define DEF_UNMAP_GRANULARITY 1
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_TPWS 0
117 #define DEF_TPU 0
119 /* bit mask values for scsi_debug_opts */
120 #define SCSI_DEBUG_OPT_NOISE 1
121 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
122 #define SCSI_DEBUG_OPT_TIMEOUT 4
123 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
124 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
125 #define SCSI_DEBUG_OPT_DIF_ERR 32
126 #define SCSI_DEBUG_OPT_DIX_ERR 64
127 /* When "every_nth" > 0 then modulo "every_nth" commands:
128 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
129 * - a RECOVERED_ERROR is simulated on successful read and write
130 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
131 * - a TRANSPORT_ERROR is simulated on successful read and write
132 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
134 * When "every_nth" < 0 then after "- every_nth" commands:
135 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
136 * - a RECOVERED_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
138 * - a TRANSPORT_ERROR is simulated on successful read and write
139 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
140 * This will continue until some other action occurs (e.g. the user
141 * writing a new value (other than -1 or 1) to every_nth via sysfs).
144 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
145 * sector on read commands: */
146 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
148 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
149 * or "peripheral device" addressing (value 0) */
150 #define SAM2_LUN_ADDRESS_METHOD 0
151 #define SAM2_WLUN_REPORT_LUNS 0xc101
153 /* Can queue up to this number of commands. Typically commands that
154 * that have a non-zero delay are queued. */
155 #define SCSI_DEBUG_CANQUEUE 255
157 static int scsi_debug_add_host = DEF_NUM_HOST;
158 static int scsi_debug_delay = DEF_DELAY;
159 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
160 static int scsi_debug_every_nth = DEF_EVERY_NTH;
161 static int scsi_debug_max_luns = DEF_MAX_LUNS;
162 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
163 static int scsi_debug_num_parts = DEF_NUM_PARTS;
164 static int scsi_debug_no_uld = 0;
165 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
166 static int scsi_debug_opts = DEF_OPTS;
167 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
168 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
171 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
172 static int scsi_debug_fake_rw = DEF_FAKE_RW;
173 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
174 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
175 static int scsi_debug_dix = DEF_DIX;
176 static int scsi_debug_dif = DEF_DIF;
177 static int scsi_debug_guard = DEF_GUARD;
178 static int scsi_debug_ato = DEF_ATO;
179 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
180 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
181 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
182 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
183 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
184 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
185 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
186 static unsigned int scsi_debug_tpws = DEF_TPWS;
187 static unsigned int scsi_debug_tpu = DEF_TPU;
189 static int scsi_debug_cmnd_count = 0;
191 #define DEV_READONLY(TGT) (0)
192 #define DEV_REMOVEABLE(TGT) (0)
194 static unsigned int sdebug_store_sectors;
195 static sector_t sdebug_capacity; /* in sectors */
197 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
198 may still need them */
199 static int sdebug_heads; /* heads per disk */
200 static int sdebug_cylinders_per; /* cylinders per surface */
201 static int sdebug_sectors_per; /* sectors per cylinder */
203 #define SDEBUG_MAX_PARTS 4
205 #define SDEBUG_SENSE_LEN 32
207 #define SCSI_DEBUG_MAX_CMD_LEN 32
209 struct sdebug_dev_info {
210 struct list_head dev_list;
211 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
212 unsigned int channel;
213 unsigned int target;
214 unsigned int lun;
215 struct sdebug_host_info *sdbg_host;
216 unsigned int wlun;
217 char reset;
218 char stopped;
219 char used;
222 struct sdebug_host_info {
223 struct list_head host_list;
224 struct Scsi_Host *shost;
225 struct device dev;
226 struct list_head dev_info_list;
229 #define to_sdebug_host(d) \
230 container_of(d, struct sdebug_host_info, dev)
232 static LIST_HEAD(sdebug_host_list);
233 static DEFINE_SPINLOCK(sdebug_host_list_lock);
235 typedef void (* done_funct_t) (struct scsi_cmnd *);
237 struct sdebug_queued_cmd {
238 int in_use;
239 struct timer_list cmnd_timer;
240 done_funct_t done_funct;
241 struct scsi_cmnd * a_cmnd;
242 int scsi_result;
244 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
246 static unsigned char * fake_storep; /* ramdisk storage */
247 static unsigned char *dif_storep; /* protection info */
248 static void *map_storep; /* provisioning map */
250 static unsigned long map_size;
251 static int num_aborts = 0;
252 static int num_dev_resets = 0;
253 static int num_bus_resets = 0;
254 static int num_host_resets = 0;
255 static int dix_writes;
256 static int dix_reads;
257 static int dif_errors;
259 static DEFINE_SPINLOCK(queued_arr_lock);
260 static DEFINE_RWLOCK(atomic_rw);
262 static char sdebug_proc_name[] = "scsi_debug";
264 static struct bus_type pseudo_lld_bus;
266 static inline sector_t dif_offset(sector_t sector)
268 return sector << 3;
271 static struct device_driver sdebug_driverfs_driver = {
272 .name = sdebug_proc_name,
273 .bus = &pseudo_lld_bus,
276 static const int check_condition_result =
277 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
279 static const int illegal_condition_result =
280 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
282 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
283 0, 0, 0x2, 0x4b};
284 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
285 0, 0, 0x0, 0x0};
287 static int sdebug_add_adapter(void);
288 static void sdebug_remove_adapter(void);
290 static void sdebug_max_tgts_luns(void)
292 struct sdebug_host_info *sdbg_host;
293 struct Scsi_Host *hpnt;
295 spin_lock(&sdebug_host_list_lock);
296 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
297 hpnt = sdbg_host->shost;
298 if ((hpnt->this_id >= 0) &&
299 (scsi_debug_num_tgts > hpnt->this_id))
300 hpnt->max_id = scsi_debug_num_tgts + 1;
301 else
302 hpnt->max_id = scsi_debug_num_tgts;
303 /* scsi_debug_max_luns; */
304 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
306 spin_unlock(&sdebug_host_list_lock);
309 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
310 int asc, int asq)
312 unsigned char *sbuff;
314 sbuff = devip->sense_buff;
315 memset(sbuff, 0, SDEBUG_SENSE_LEN);
317 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
319 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
320 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
321 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
324 static void get_data_transfer_info(unsigned char *cmd,
325 unsigned long long *lba, unsigned int *num,
326 u32 *ei_lba)
328 *ei_lba = 0;
330 switch (*cmd) {
331 case VARIABLE_LENGTH_CMD:
332 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
333 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
334 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
335 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
337 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
338 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
340 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
341 (u32)cmd[28] << 24;
342 break;
344 case WRITE_SAME_16:
345 case WRITE_16:
346 case READ_16:
347 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
348 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
349 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
350 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
352 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
353 (u32)cmd[10] << 24;
354 break;
355 case WRITE_12:
356 case READ_12:
357 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
358 (u32)cmd[2] << 24;
360 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
361 (u32)cmd[6] << 24;
362 break;
363 case WRITE_SAME:
364 case WRITE_10:
365 case READ_10:
366 case XDWRITEREAD_10:
367 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
368 (u32)cmd[2] << 24;
370 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
371 break;
372 case WRITE_6:
373 case READ_6:
374 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
375 (u32)(cmd[1] & 0x1f) << 16;
376 *num = (0 == cmd[4]) ? 256 : cmd[4];
377 break;
378 default:
379 break;
383 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
385 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
386 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
388 return -EINVAL;
389 /* return -ENOTTY; // correct return but upsets fdisk */
392 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
393 struct sdebug_dev_info * devip)
395 if (devip->reset) {
396 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
397 printk(KERN_INFO "scsi_debug: Reporting Unit "
398 "attention: power on reset\n");
399 devip->reset = 0;
400 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
401 return check_condition_result;
403 if ((0 == reset_only) && devip->stopped) {
404 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
405 printk(KERN_INFO "scsi_debug: Reporting Not "
406 "ready: initializing command required\n");
407 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
408 0x2);
409 return check_condition_result;
411 return 0;
414 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
415 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
416 int arr_len)
418 int act_len;
419 struct scsi_data_buffer *sdb = scsi_in(scp);
421 if (!sdb->length)
422 return 0;
423 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
424 return (DID_ERROR << 16);
426 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
427 arr, arr_len);
428 if (sdb->resid)
429 sdb->resid -= act_len;
430 else
431 sdb->resid = scsi_bufflen(scp) - act_len;
433 return 0;
436 /* Returns number of bytes fetched into 'arr' or -1 if error. */
437 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
438 int arr_len)
440 if (!scsi_bufflen(scp))
441 return 0;
442 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
443 return -1;
445 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
449 static const char * inq_vendor_id = "Linux ";
450 static const char * inq_product_id = "scsi_debug ";
451 static const char * inq_product_rev = "0004";
453 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
454 int target_dev_id, int dev_id_num,
455 const char * dev_id_str,
456 int dev_id_str_len)
458 int num, port_a;
459 char b[32];
461 port_a = target_dev_id + 1;
462 /* T10 vendor identifier field format (faked) */
463 arr[0] = 0x2; /* ASCII */
464 arr[1] = 0x1;
465 arr[2] = 0x0;
466 memcpy(&arr[4], inq_vendor_id, 8);
467 memcpy(&arr[12], inq_product_id, 16);
468 memcpy(&arr[28], dev_id_str, dev_id_str_len);
469 num = 8 + 16 + dev_id_str_len;
470 arr[3] = num;
471 num += 4;
472 if (dev_id_num >= 0) {
473 /* NAA-5, Logical unit identifier (binary) */
474 arr[num++] = 0x1; /* binary (not necessarily sas) */
475 arr[num++] = 0x3; /* PIV=0, lu, naa */
476 arr[num++] = 0x0;
477 arr[num++] = 0x8;
478 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
479 arr[num++] = 0x33;
480 arr[num++] = 0x33;
481 arr[num++] = 0x30;
482 arr[num++] = (dev_id_num >> 24);
483 arr[num++] = (dev_id_num >> 16) & 0xff;
484 arr[num++] = (dev_id_num >> 8) & 0xff;
485 arr[num++] = dev_id_num & 0xff;
486 /* Target relative port number */
487 arr[num++] = 0x61; /* proto=sas, binary */
488 arr[num++] = 0x94; /* PIV=1, target port, rel port */
489 arr[num++] = 0x0; /* reserved */
490 arr[num++] = 0x4; /* length */
491 arr[num++] = 0x0; /* reserved */
492 arr[num++] = 0x0; /* reserved */
493 arr[num++] = 0x0;
494 arr[num++] = 0x1; /* relative port A */
496 /* NAA-5, Target port identifier */
497 arr[num++] = 0x61; /* proto=sas, binary */
498 arr[num++] = 0x93; /* piv=1, target port, naa */
499 arr[num++] = 0x0;
500 arr[num++] = 0x8;
501 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
502 arr[num++] = 0x22;
503 arr[num++] = 0x22;
504 arr[num++] = 0x20;
505 arr[num++] = (port_a >> 24);
506 arr[num++] = (port_a >> 16) & 0xff;
507 arr[num++] = (port_a >> 8) & 0xff;
508 arr[num++] = port_a & 0xff;
509 /* NAA-5, Target port group identifier */
510 arr[num++] = 0x61; /* proto=sas, binary */
511 arr[num++] = 0x95; /* piv=1, target port group id */
512 arr[num++] = 0x0;
513 arr[num++] = 0x4;
514 arr[num++] = 0;
515 arr[num++] = 0;
516 arr[num++] = (port_group_id >> 8) & 0xff;
517 arr[num++] = port_group_id & 0xff;
518 /* NAA-5, Target device identifier */
519 arr[num++] = 0x61; /* proto=sas, binary */
520 arr[num++] = 0xa3; /* piv=1, target device, naa */
521 arr[num++] = 0x0;
522 arr[num++] = 0x8;
523 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
524 arr[num++] = 0x22;
525 arr[num++] = 0x22;
526 arr[num++] = 0x20;
527 arr[num++] = (target_dev_id >> 24);
528 arr[num++] = (target_dev_id >> 16) & 0xff;
529 arr[num++] = (target_dev_id >> 8) & 0xff;
530 arr[num++] = target_dev_id & 0xff;
531 /* SCSI name string: Target device identifier */
532 arr[num++] = 0x63; /* proto=sas, UTF-8 */
533 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
534 arr[num++] = 0x0;
535 arr[num++] = 24;
536 memcpy(arr + num, "naa.52222220", 12);
537 num += 12;
538 snprintf(b, sizeof(b), "%08X", target_dev_id);
539 memcpy(arr + num, b, 8);
540 num += 8;
541 memset(arr + num, 0, 4);
542 num += 4;
543 return num;
547 static unsigned char vpd84_data[] = {
548 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
549 0x22,0x22,0x22,0x0,0xbb,0x1,
550 0x22,0x22,0x22,0x0,0xbb,0x2,
553 static int inquiry_evpd_84(unsigned char * arr)
555 memcpy(arr, vpd84_data, sizeof(vpd84_data));
556 return sizeof(vpd84_data);
559 static int inquiry_evpd_85(unsigned char * arr)
561 int num = 0;
562 const char * na1 = "https://www.kernel.org/config";
563 const char * na2 = "http://www.kernel.org/log";
564 int plen, olen;
566 arr[num++] = 0x1; /* lu, storage config */
567 arr[num++] = 0x0; /* reserved */
568 arr[num++] = 0x0;
569 olen = strlen(na1);
570 plen = olen + 1;
571 if (plen % 4)
572 plen = ((plen / 4) + 1) * 4;
573 arr[num++] = plen; /* length, null termianted, padded */
574 memcpy(arr + num, na1, olen);
575 memset(arr + num + olen, 0, plen - olen);
576 num += plen;
578 arr[num++] = 0x4; /* lu, logging */
579 arr[num++] = 0x0; /* reserved */
580 arr[num++] = 0x0;
581 olen = strlen(na2);
582 plen = olen + 1;
583 if (plen % 4)
584 plen = ((plen / 4) + 1) * 4;
585 arr[num++] = plen; /* length, null terminated, padded */
586 memcpy(arr + num, na2, olen);
587 memset(arr + num + olen, 0, plen - olen);
588 num += plen;
590 return num;
593 /* SCSI ports VPD page */
594 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
596 int num = 0;
597 int port_a, port_b;
599 port_a = target_dev_id + 1;
600 port_b = port_a + 1;
601 arr[num++] = 0x0; /* reserved */
602 arr[num++] = 0x0; /* reserved */
603 arr[num++] = 0x0;
604 arr[num++] = 0x1; /* relative port 1 (primary) */
605 memset(arr + num, 0, 6);
606 num += 6;
607 arr[num++] = 0x0;
608 arr[num++] = 12; /* length tp descriptor */
609 /* naa-5 target port identifier (A) */
610 arr[num++] = 0x61; /* proto=sas, binary */
611 arr[num++] = 0x93; /* PIV=1, target port, NAA */
612 arr[num++] = 0x0; /* reserved */
613 arr[num++] = 0x8; /* length */
614 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
615 arr[num++] = 0x22;
616 arr[num++] = 0x22;
617 arr[num++] = 0x20;
618 arr[num++] = (port_a >> 24);
619 arr[num++] = (port_a >> 16) & 0xff;
620 arr[num++] = (port_a >> 8) & 0xff;
621 arr[num++] = port_a & 0xff;
623 arr[num++] = 0x0; /* reserved */
624 arr[num++] = 0x0; /* reserved */
625 arr[num++] = 0x0;
626 arr[num++] = 0x2; /* relative port 2 (secondary) */
627 memset(arr + num, 0, 6);
628 num += 6;
629 arr[num++] = 0x0;
630 arr[num++] = 12; /* length tp descriptor */
631 /* naa-5 target port identifier (B) */
632 arr[num++] = 0x61; /* proto=sas, binary */
633 arr[num++] = 0x93; /* PIV=1, target port, NAA */
634 arr[num++] = 0x0; /* reserved */
635 arr[num++] = 0x8; /* length */
636 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
637 arr[num++] = 0x22;
638 arr[num++] = 0x22;
639 arr[num++] = 0x20;
640 arr[num++] = (port_b >> 24);
641 arr[num++] = (port_b >> 16) & 0xff;
642 arr[num++] = (port_b >> 8) & 0xff;
643 arr[num++] = port_b & 0xff;
645 return num;
649 static unsigned char vpd89_data[] = {
650 /* from 4th byte */ 0,0,0,0,
651 'l','i','n','u','x',' ',' ',' ',
652 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
653 '1','2','3','4',
654 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
655 0xec,0,0,0,
656 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
657 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
658 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
659 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
660 0x53,0x41,
661 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
662 0x20,0x20,
663 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
664 0x10,0x80,
665 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
666 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
667 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
668 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
669 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
670 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
671 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
672 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
673 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
675 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
676 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
677 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
678 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
680 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
681 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
682 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
683 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
684 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
685 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
687 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
689 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
693 static int inquiry_evpd_89(unsigned char * arr)
695 memcpy(arr, vpd89_data, sizeof(vpd89_data));
696 return sizeof(vpd89_data);
700 /* Block limits VPD page (SBC-3) */
701 static unsigned char vpdb0_data[] = {
702 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
703 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
704 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
705 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
708 static int inquiry_evpd_b0(unsigned char * arr)
710 unsigned int gran;
712 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
714 /* Optimal transfer length granularity */
715 gran = 1 << scsi_debug_physblk_exp;
716 arr[2] = (gran >> 8) & 0xff;
717 arr[3] = gran & 0xff;
719 /* Maximum Transfer Length */
720 if (sdebug_store_sectors > 0x400) {
721 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
722 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
723 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
724 arr[7] = sdebug_store_sectors & 0xff;
727 /* Optimal Transfer Length */
728 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
730 if (scsi_debug_tpu) {
731 /* Maximum Unmap LBA Count */
732 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
734 /* Maximum Unmap Block Descriptor Count */
735 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
738 /* Unmap Granularity Alignment */
739 if (scsi_debug_unmap_alignment) {
740 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
741 arr[28] |= 0x80; /* UGAVALID */
744 /* Optimal Unmap Granularity */
745 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
747 return 0x3c; /* Mandatory page length for thin provisioning */
749 return sizeof(vpdb0_data);
752 /* Block device characteristics VPD page (SBC-3) */
753 static int inquiry_evpd_b1(unsigned char *arr)
755 memset(arr, 0, 0x3c);
756 arr[0] = 0;
757 arr[1] = 1; /* non rotating medium (e.g. solid state) */
758 arr[2] = 0;
759 arr[3] = 5; /* less than 1.8" */
761 return 0x3c;
764 /* Thin provisioning VPD page (SBC-3) */
765 static int inquiry_evpd_b2(unsigned char *arr)
767 memset(arr, 0, 0x8);
768 arr[0] = 0; /* threshold exponent */
770 if (scsi_debug_tpu)
771 arr[1] = 1 << 7;
773 if (scsi_debug_tpws)
774 arr[1] |= 1 << 6;
776 return 0x8;
779 #define SDEBUG_LONG_INQ_SZ 96
780 #define SDEBUG_MAX_INQ_ARR_SZ 584
782 static int resp_inquiry(struct scsi_cmnd * scp, int target,
783 struct sdebug_dev_info * devip)
785 unsigned char pq_pdt;
786 unsigned char * arr;
787 unsigned char *cmd = (unsigned char *)scp->cmnd;
788 int alloc_len, n, ret;
790 alloc_len = (cmd[3] << 8) + cmd[4];
791 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
792 if (! arr)
793 return DID_REQUEUE << 16;
794 if (devip->wlun)
795 pq_pdt = 0x1e; /* present, wlun */
796 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
797 pq_pdt = 0x7f; /* not present, no device type */
798 else
799 pq_pdt = (scsi_debug_ptype & 0x1f);
800 arr[0] = pq_pdt;
801 if (0x2 & cmd[1]) { /* CMDDT bit set */
802 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
804 kfree(arr);
805 return check_condition_result;
806 } else if (0x1 & cmd[1]) { /* EVPD bit set */
807 int lu_id_num, port_group_id, target_dev_id, len;
808 char lu_id_str[6];
809 int host_no = devip->sdbg_host->shost->host_no;
811 port_group_id = (((host_no + 1) & 0x7f) << 8) +
812 (devip->channel & 0x7f);
813 if (0 == scsi_debug_vpd_use_hostno)
814 host_no = 0;
815 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
816 (devip->target * 1000) + devip->lun);
817 target_dev_id = ((host_no + 1) * 2000) +
818 (devip->target * 1000) - 3;
819 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
820 if (0 == cmd[2]) { /* supported vital product data pages */
821 arr[1] = cmd[2]; /*sanity */
822 n = 4;
823 arr[n++] = 0x0; /* this page */
824 arr[n++] = 0x80; /* unit serial number */
825 arr[n++] = 0x83; /* device identification */
826 arr[n++] = 0x84; /* software interface ident. */
827 arr[n++] = 0x85; /* management network addresses */
828 arr[n++] = 0x86; /* extended inquiry */
829 arr[n++] = 0x87; /* mode page policy */
830 arr[n++] = 0x88; /* SCSI ports */
831 arr[n++] = 0x89; /* ATA information */
832 arr[n++] = 0xb0; /* Block limits (SBC) */
833 arr[n++] = 0xb1; /* Block characteristics (SBC) */
834 arr[n++] = 0xb2; /* Thin provisioning (SBC) */
835 arr[3] = n - 4; /* number of supported VPD pages */
836 } else if (0x80 == cmd[2]) { /* unit serial number */
837 arr[1] = cmd[2]; /*sanity */
838 arr[3] = len;
839 memcpy(&arr[4], lu_id_str, len);
840 } else if (0x83 == cmd[2]) { /* device identification */
841 arr[1] = cmd[2]; /*sanity */
842 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
843 target_dev_id, lu_id_num,
844 lu_id_str, len);
845 } else if (0x84 == cmd[2]) { /* Software interface ident. */
846 arr[1] = cmd[2]; /*sanity */
847 arr[3] = inquiry_evpd_84(&arr[4]);
848 } else if (0x85 == cmd[2]) { /* Management network addresses */
849 arr[1] = cmd[2]; /*sanity */
850 arr[3] = inquiry_evpd_85(&arr[4]);
851 } else if (0x86 == cmd[2]) { /* extended inquiry */
852 arr[1] = cmd[2]; /*sanity */
853 arr[3] = 0x3c; /* number of following entries */
854 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
855 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
856 else if (scsi_debug_dif)
857 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
858 else
859 arr[4] = 0x0; /* no protection stuff */
860 arr[5] = 0x7; /* head of q, ordered + simple q's */
861 } else if (0x87 == cmd[2]) { /* mode page policy */
862 arr[1] = cmd[2]; /*sanity */
863 arr[3] = 0x8; /* number of following entries */
864 arr[4] = 0x2; /* disconnect-reconnect mp */
865 arr[6] = 0x80; /* mlus, shared */
866 arr[8] = 0x18; /* protocol specific lu */
867 arr[10] = 0x82; /* mlus, per initiator port */
868 } else if (0x88 == cmd[2]) { /* SCSI Ports */
869 arr[1] = cmd[2]; /*sanity */
870 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
871 } else if (0x89 == cmd[2]) { /* ATA information */
872 arr[1] = cmd[2]; /*sanity */
873 n = inquiry_evpd_89(&arr[4]);
874 arr[2] = (n >> 8);
875 arr[3] = (n & 0xff);
876 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
877 arr[1] = cmd[2]; /*sanity */
878 arr[3] = inquiry_evpd_b0(&arr[4]);
879 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
880 arr[1] = cmd[2]; /*sanity */
881 arr[3] = inquiry_evpd_b1(&arr[4]);
882 } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */
883 arr[1] = cmd[2]; /*sanity */
884 arr[3] = inquiry_evpd_b2(&arr[4]);
885 } else {
886 /* Illegal request, invalid field in cdb */
887 mk_sense_buffer(devip, ILLEGAL_REQUEST,
888 INVALID_FIELD_IN_CDB, 0);
889 kfree(arr);
890 return check_condition_result;
892 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
893 ret = fill_from_dev_buffer(scp, arr,
894 min(len, SDEBUG_MAX_INQ_ARR_SZ));
895 kfree(arr);
896 return ret;
898 /* drops through here for a standard inquiry */
899 arr[1] = DEV_REMOVEABLE(target) ? 0x80 : 0; /* Removable disk */
900 arr[2] = scsi_debug_scsi_level;
901 arr[3] = 2; /* response_data_format==2 */
902 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
903 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
904 if (0 == scsi_debug_vpd_use_hostno)
905 arr[5] = 0x10; /* claim: implicit TGPS */
906 arr[6] = 0x10; /* claim: MultiP */
907 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
908 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
909 memcpy(&arr[8], inq_vendor_id, 8);
910 memcpy(&arr[16], inq_product_id, 16);
911 memcpy(&arr[32], inq_product_rev, 4);
912 /* version descriptors (2 bytes each) follow */
913 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
914 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
915 n = 62;
916 if (scsi_debug_ptype == 0) {
917 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
918 } else if (scsi_debug_ptype == 1) {
919 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
921 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
922 ret = fill_from_dev_buffer(scp, arr,
923 min(alloc_len, SDEBUG_LONG_INQ_SZ));
924 kfree(arr);
925 return ret;
928 static int resp_requests(struct scsi_cmnd * scp,
929 struct sdebug_dev_info * devip)
931 unsigned char * sbuff;
932 unsigned char *cmd = (unsigned char *)scp->cmnd;
933 unsigned char arr[SDEBUG_SENSE_LEN];
934 int want_dsense;
935 int len = 18;
937 memset(arr, 0, sizeof(arr));
938 if (devip->reset == 1)
939 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
940 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
941 sbuff = devip->sense_buff;
942 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
943 if (want_dsense) {
944 arr[0] = 0x72;
945 arr[1] = 0x0; /* NO_SENSE in sense_key */
946 arr[2] = THRESHOLD_EXCEEDED;
947 arr[3] = 0xff; /* TEST set and MRIE==6 */
948 } else {
949 arr[0] = 0x70;
950 arr[2] = 0x0; /* NO_SENSE in sense_key */
951 arr[7] = 0xa; /* 18 byte sense buffer */
952 arr[12] = THRESHOLD_EXCEEDED;
953 arr[13] = 0xff; /* TEST set and MRIE==6 */
955 } else {
956 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
957 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
958 /* DESC bit set and sense_buff in fixed format */
959 memset(arr, 0, sizeof(arr));
960 arr[0] = 0x72;
961 arr[1] = sbuff[2]; /* sense key */
962 arr[2] = sbuff[12]; /* asc */
963 arr[3] = sbuff[13]; /* ascq */
964 len = 8;
967 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
968 return fill_from_dev_buffer(scp, arr, len);
971 static int resp_start_stop(struct scsi_cmnd * scp,
972 struct sdebug_dev_info * devip)
974 unsigned char *cmd = (unsigned char *)scp->cmnd;
975 int power_cond, errsts, start;
977 if ((errsts = check_readiness(scp, 1, devip)))
978 return errsts;
979 power_cond = (cmd[4] & 0xf0) >> 4;
980 if (power_cond) {
981 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
983 return check_condition_result;
985 start = cmd[4] & 1;
986 if (start == devip->stopped)
987 devip->stopped = !start;
988 return 0;
991 static sector_t get_sdebug_capacity(void)
993 if (scsi_debug_virtual_gb > 0)
994 return (sector_t)scsi_debug_virtual_gb *
995 (1073741824 / scsi_debug_sector_size);
996 else
997 return sdebug_store_sectors;
1000 #define SDEBUG_READCAP_ARR_SZ 8
1001 static int resp_readcap(struct scsi_cmnd * scp,
1002 struct sdebug_dev_info * devip)
1004 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1005 unsigned int capac;
1006 int errsts;
1008 if ((errsts = check_readiness(scp, 1, devip)))
1009 return errsts;
1010 /* following just in case virtual_gb changed */
1011 sdebug_capacity = get_sdebug_capacity();
1012 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1013 if (sdebug_capacity < 0xffffffff) {
1014 capac = (unsigned int)sdebug_capacity - 1;
1015 arr[0] = (capac >> 24);
1016 arr[1] = (capac >> 16) & 0xff;
1017 arr[2] = (capac >> 8) & 0xff;
1018 arr[3] = capac & 0xff;
1019 } else {
1020 arr[0] = 0xff;
1021 arr[1] = 0xff;
1022 arr[2] = 0xff;
1023 arr[3] = 0xff;
1025 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1026 arr[7] = scsi_debug_sector_size & 0xff;
1027 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1030 #define SDEBUG_READCAP16_ARR_SZ 32
1031 static int resp_readcap16(struct scsi_cmnd * scp,
1032 struct sdebug_dev_info * devip)
1034 unsigned char *cmd = (unsigned char *)scp->cmnd;
1035 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1036 unsigned long long capac;
1037 int errsts, k, alloc_len;
1039 if ((errsts = check_readiness(scp, 1, devip)))
1040 return errsts;
1041 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1042 + cmd[13]);
1043 /* following just in case virtual_gb changed */
1044 sdebug_capacity = get_sdebug_capacity();
1045 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1046 capac = sdebug_capacity - 1;
1047 for (k = 0; k < 8; ++k, capac >>= 8)
1048 arr[7 - k] = capac & 0xff;
1049 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1050 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1051 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1052 arr[11] = scsi_debug_sector_size & 0xff;
1053 arr[13] = scsi_debug_physblk_exp & 0xf;
1054 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1056 if (scsi_debug_tpu || scsi_debug_tpws)
1057 arr[14] |= 0x80; /* TPE */
1059 arr[15] = scsi_debug_lowest_aligned & 0xff;
1061 if (scsi_debug_dif) {
1062 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1063 arr[12] |= 1; /* PROT_EN */
1066 return fill_from_dev_buffer(scp, arr,
1067 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1070 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1072 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1073 struct sdebug_dev_info * devip)
1075 unsigned char *cmd = (unsigned char *)scp->cmnd;
1076 unsigned char * arr;
1077 int host_no = devip->sdbg_host->shost->host_no;
1078 int n, ret, alen, rlen;
1079 int port_group_a, port_group_b, port_a, port_b;
1081 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1082 + cmd[9]);
1084 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1085 if (! arr)
1086 return DID_REQUEUE << 16;
1088 * EVPD page 0x88 states we have two ports, one
1089 * real and a fake port with no device connected.
1090 * So we create two port groups with one port each
1091 * and set the group with port B to unavailable.
1093 port_a = 0x1; /* relative port A */
1094 port_b = 0x2; /* relative port B */
1095 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1096 (devip->channel & 0x7f);
1097 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1098 (devip->channel & 0x7f) + 0x80;
1101 * The asymmetric access state is cycled according to the host_id.
1103 n = 4;
1104 if (0 == scsi_debug_vpd_use_hostno) {
1105 arr[n++] = host_no % 3; /* Asymm access state */
1106 arr[n++] = 0x0F; /* claim: all states are supported */
1107 } else {
1108 arr[n++] = 0x0; /* Active/Optimized path */
1109 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1111 arr[n++] = (port_group_a >> 8) & 0xff;
1112 arr[n++] = port_group_a & 0xff;
1113 arr[n++] = 0; /* Reserved */
1114 arr[n++] = 0; /* Status code */
1115 arr[n++] = 0; /* Vendor unique */
1116 arr[n++] = 0x1; /* One port per group */
1117 arr[n++] = 0; /* Reserved */
1118 arr[n++] = 0; /* Reserved */
1119 arr[n++] = (port_a >> 8) & 0xff;
1120 arr[n++] = port_a & 0xff;
1121 arr[n++] = 3; /* Port unavailable */
1122 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1123 arr[n++] = (port_group_b >> 8) & 0xff;
1124 arr[n++] = port_group_b & 0xff;
1125 arr[n++] = 0; /* Reserved */
1126 arr[n++] = 0; /* Status code */
1127 arr[n++] = 0; /* Vendor unique */
1128 arr[n++] = 0x1; /* One port per group */
1129 arr[n++] = 0; /* Reserved */
1130 arr[n++] = 0; /* Reserved */
1131 arr[n++] = (port_b >> 8) & 0xff;
1132 arr[n++] = port_b & 0xff;
1134 rlen = n - 4;
1135 arr[0] = (rlen >> 24) & 0xff;
1136 arr[1] = (rlen >> 16) & 0xff;
1137 arr[2] = (rlen >> 8) & 0xff;
1138 arr[3] = rlen & 0xff;
1141 * Return the smallest value of either
1142 * - The allocated length
1143 * - The constructed command length
1144 * - The maximum array size
1146 rlen = min(alen,n);
1147 ret = fill_from_dev_buffer(scp, arr,
1148 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1149 kfree(arr);
1150 return ret;
1153 /* <<Following mode page info copied from ST318451LW>> */
1155 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1156 { /* Read-Write Error Recovery page for mode_sense */
1157 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1158 5, 0, 0xff, 0xff};
1160 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1161 if (1 == pcontrol)
1162 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1163 return sizeof(err_recov_pg);
1166 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1167 { /* Disconnect-Reconnect page for mode_sense */
1168 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1169 0, 0, 0, 0, 0, 0, 0, 0};
1171 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1172 if (1 == pcontrol)
1173 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1174 return sizeof(disconnect_pg);
1177 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1178 { /* Format device page for mode_sense */
1179 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1180 0, 0, 0, 0, 0, 0, 0, 0,
1181 0, 0, 0, 0, 0x40, 0, 0, 0};
1183 memcpy(p, format_pg, sizeof(format_pg));
1184 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1185 p[11] = sdebug_sectors_per & 0xff;
1186 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1187 p[13] = scsi_debug_sector_size & 0xff;
1188 if (DEV_REMOVEABLE(target))
1189 p[20] |= 0x20; /* should agree with INQUIRY */
1190 if (1 == pcontrol)
1191 memset(p + 2, 0, sizeof(format_pg) - 2);
1192 return sizeof(format_pg);
1195 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1196 { /* Caching page for mode_sense */
1197 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1198 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1200 memcpy(p, caching_pg, sizeof(caching_pg));
1201 if (1 == pcontrol)
1202 memset(p + 2, 0, sizeof(caching_pg) - 2);
1203 return sizeof(caching_pg);
1206 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1207 { /* Control mode page for mode_sense */
1208 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1209 0, 0, 0, 0};
1210 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1211 0, 0, 0x2, 0x4b};
1213 if (scsi_debug_dsense)
1214 ctrl_m_pg[2] |= 0x4;
1215 else
1216 ctrl_m_pg[2] &= ~0x4;
1218 if (scsi_debug_ato)
1219 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1221 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1222 if (1 == pcontrol)
1223 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1224 else if (2 == pcontrol)
1225 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1226 return sizeof(ctrl_m_pg);
1230 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1231 { /* Informational Exceptions control mode page for mode_sense */
1232 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1233 0, 0, 0x0, 0x0};
1234 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1235 0, 0, 0x0, 0x0};
1237 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1238 if (1 == pcontrol)
1239 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1240 else if (2 == pcontrol)
1241 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1242 return sizeof(iec_m_pg);
1245 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1246 { /* SAS SSP mode page - short format for mode_sense */
1247 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1248 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1250 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1251 if (1 == pcontrol)
1252 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1253 return sizeof(sas_sf_m_pg);
1257 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1258 int target_dev_id)
1259 { /* SAS phy control and discover mode page for mode_sense */
1260 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1261 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1262 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1263 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1264 0x2, 0, 0, 0, 0, 0, 0, 0,
1265 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1266 0, 0, 0, 0, 0, 0, 0, 0,
1267 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1268 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1269 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1270 0x3, 0, 0, 0, 0, 0, 0, 0,
1271 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1272 0, 0, 0, 0, 0, 0, 0, 0,
1274 int port_a, port_b;
1276 port_a = target_dev_id + 1;
1277 port_b = port_a + 1;
1278 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1279 p[20] = (port_a >> 24);
1280 p[21] = (port_a >> 16) & 0xff;
1281 p[22] = (port_a >> 8) & 0xff;
1282 p[23] = port_a & 0xff;
1283 p[48 + 20] = (port_b >> 24);
1284 p[48 + 21] = (port_b >> 16) & 0xff;
1285 p[48 + 22] = (port_b >> 8) & 0xff;
1286 p[48 + 23] = port_b & 0xff;
1287 if (1 == pcontrol)
1288 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1289 return sizeof(sas_pcd_m_pg);
1292 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1293 { /* SAS SSP shared protocol specific port mode subpage */
1294 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1295 0, 0, 0, 0, 0, 0, 0, 0,
1298 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1299 if (1 == pcontrol)
1300 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1301 return sizeof(sas_sha_m_pg);
1304 #define SDEBUG_MAX_MSENSE_SZ 256
1306 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1307 struct sdebug_dev_info * devip)
1309 unsigned char dbd, llbaa;
1310 int pcontrol, pcode, subpcode, bd_len;
1311 unsigned char dev_spec;
1312 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1313 unsigned char * ap;
1314 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1315 unsigned char *cmd = (unsigned char *)scp->cmnd;
1317 if ((errsts = check_readiness(scp, 1, devip)))
1318 return errsts;
1319 dbd = !!(cmd[1] & 0x8);
1320 pcontrol = (cmd[2] & 0xc0) >> 6;
1321 pcode = cmd[2] & 0x3f;
1322 subpcode = cmd[3];
1323 msense_6 = (MODE_SENSE == cmd[0]);
1324 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1325 if ((0 == scsi_debug_ptype) && (0 == dbd))
1326 bd_len = llbaa ? 16 : 8;
1327 else
1328 bd_len = 0;
1329 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1330 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1331 if (0x3 == pcontrol) { /* Saving values not supported */
1332 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1334 return check_condition_result;
1336 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1337 (devip->target * 1000) - 3;
1338 /* set DPOFUA bit for disks */
1339 if (0 == scsi_debug_ptype)
1340 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1341 else
1342 dev_spec = 0x0;
1343 if (msense_6) {
1344 arr[2] = dev_spec;
1345 arr[3] = bd_len;
1346 offset = 4;
1347 } else {
1348 arr[3] = dev_spec;
1349 if (16 == bd_len)
1350 arr[4] = 0x1; /* set LONGLBA bit */
1351 arr[7] = bd_len; /* assume 255 or less */
1352 offset = 8;
1354 ap = arr + offset;
1355 if ((bd_len > 0) && (!sdebug_capacity))
1356 sdebug_capacity = get_sdebug_capacity();
1358 if (8 == bd_len) {
1359 if (sdebug_capacity > 0xfffffffe) {
1360 ap[0] = 0xff;
1361 ap[1] = 0xff;
1362 ap[2] = 0xff;
1363 ap[3] = 0xff;
1364 } else {
1365 ap[0] = (sdebug_capacity >> 24) & 0xff;
1366 ap[1] = (sdebug_capacity >> 16) & 0xff;
1367 ap[2] = (sdebug_capacity >> 8) & 0xff;
1368 ap[3] = sdebug_capacity & 0xff;
1370 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1371 ap[7] = scsi_debug_sector_size & 0xff;
1372 offset += bd_len;
1373 ap = arr + offset;
1374 } else if (16 == bd_len) {
1375 unsigned long long capac = sdebug_capacity;
1377 for (k = 0; k < 8; ++k, capac >>= 8)
1378 ap[7 - k] = capac & 0xff;
1379 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1380 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1381 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1382 ap[15] = scsi_debug_sector_size & 0xff;
1383 offset += bd_len;
1384 ap = arr + offset;
1387 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1388 /* TODO: Control Extension page */
1389 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1391 return check_condition_result;
1393 switch (pcode) {
1394 case 0x1: /* Read-Write error recovery page, direct access */
1395 len = resp_err_recov_pg(ap, pcontrol, target);
1396 offset += len;
1397 break;
1398 case 0x2: /* Disconnect-Reconnect page, all devices */
1399 len = resp_disconnect_pg(ap, pcontrol, target);
1400 offset += len;
1401 break;
1402 case 0x3: /* Format device page, direct access */
1403 len = resp_format_pg(ap, pcontrol, target);
1404 offset += len;
1405 break;
1406 case 0x8: /* Caching page, direct access */
1407 len = resp_caching_pg(ap, pcontrol, target);
1408 offset += len;
1409 break;
1410 case 0xa: /* Control Mode page, all devices */
1411 len = resp_ctrl_m_pg(ap, pcontrol, target);
1412 offset += len;
1413 break;
1414 case 0x19: /* if spc==1 then sas phy, control+discover */
1415 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1416 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1417 INVALID_FIELD_IN_CDB, 0);
1418 return check_condition_result;
1420 len = 0;
1421 if ((0x0 == subpcode) || (0xff == subpcode))
1422 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1423 if ((0x1 == subpcode) || (0xff == subpcode))
1424 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1425 target_dev_id);
1426 if ((0x2 == subpcode) || (0xff == subpcode))
1427 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1428 offset += len;
1429 break;
1430 case 0x1c: /* Informational Exceptions Mode page, all devices */
1431 len = resp_iec_m_pg(ap, pcontrol, target);
1432 offset += len;
1433 break;
1434 case 0x3f: /* Read all Mode pages */
1435 if ((0 == subpcode) || (0xff == subpcode)) {
1436 len = resp_err_recov_pg(ap, pcontrol, target);
1437 len += resp_disconnect_pg(ap + len, pcontrol, target);
1438 len += resp_format_pg(ap + len, pcontrol, target);
1439 len += resp_caching_pg(ap + len, pcontrol, target);
1440 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1441 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1442 if (0xff == subpcode) {
1443 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1444 target, target_dev_id);
1445 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1447 len += resp_iec_m_pg(ap + len, pcontrol, target);
1448 } else {
1449 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1450 INVALID_FIELD_IN_CDB, 0);
1451 return check_condition_result;
1453 offset += len;
1454 break;
1455 default:
1456 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1458 return check_condition_result;
1460 if (msense_6)
1461 arr[0] = offset - 1;
1462 else {
1463 arr[0] = ((offset - 2) >> 8) & 0xff;
1464 arr[1] = (offset - 2) & 0xff;
1466 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1469 #define SDEBUG_MAX_MSELECT_SZ 512
1471 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1472 struct sdebug_dev_info * devip)
1474 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1475 int param_len, res, errsts, mpage;
1476 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1477 unsigned char *cmd = (unsigned char *)scp->cmnd;
1479 if ((errsts = check_readiness(scp, 1, devip)))
1480 return errsts;
1481 memset(arr, 0, sizeof(arr));
1482 pf = cmd[1] & 0x10;
1483 sp = cmd[1] & 0x1;
1484 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1485 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1486 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1487 INVALID_FIELD_IN_CDB, 0);
1488 return check_condition_result;
1490 res = fetch_to_dev_buffer(scp, arr, param_len);
1491 if (-1 == res)
1492 return (DID_ERROR << 16);
1493 else if ((res < param_len) &&
1494 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1495 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1496 " IO sent=%d bytes\n", param_len, res);
1497 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1498 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1499 if (md_len > 2) {
1500 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1501 INVALID_FIELD_IN_PARAM_LIST, 0);
1502 return check_condition_result;
1504 off = bd_len + (mselect6 ? 4 : 8);
1505 mpage = arr[off] & 0x3f;
1506 ps = !!(arr[off] & 0x80);
1507 if (ps) {
1508 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1509 INVALID_FIELD_IN_PARAM_LIST, 0);
1510 return check_condition_result;
1512 spf = !!(arr[off] & 0x40);
1513 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1514 (arr[off + 1] + 2);
1515 if ((pg_len + off) > param_len) {
1516 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1517 PARAMETER_LIST_LENGTH_ERR, 0);
1518 return check_condition_result;
1520 switch (mpage) {
1521 case 0xa: /* Control Mode page */
1522 if (ctrl_m_pg[1] == arr[off + 1]) {
1523 memcpy(ctrl_m_pg + 2, arr + off + 2,
1524 sizeof(ctrl_m_pg) - 2);
1525 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1526 return 0;
1528 break;
1529 case 0x1c: /* Informational Exceptions Mode page */
1530 if (iec_m_pg[1] == arr[off + 1]) {
1531 memcpy(iec_m_pg + 2, arr + off + 2,
1532 sizeof(iec_m_pg) - 2);
1533 return 0;
1535 break;
1536 default:
1537 break;
1539 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1540 INVALID_FIELD_IN_PARAM_LIST, 0);
1541 return check_condition_result;
1544 static int resp_temp_l_pg(unsigned char * arr)
1546 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1547 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1550 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1551 return sizeof(temp_l_pg);
1554 static int resp_ie_l_pg(unsigned char * arr)
1556 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1559 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1560 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1561 arr[4] = THRESHOLD_EXCEEDED;
1562 arr[5] = 0xff;
1564 return sizeof(ie_l_pg);
1567 #define SDEBUG_MAX_LSENSE_SZ 512
1569 static int resp_log_sense(struct scsi_cmnd * scp,
1570 struct sdebug_dev_info * devip)
1572 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1573 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1574 unsigned char *cmd = (unsigned char *)scp->cmnd;
1576 if ((errsts = check_readiness(scp, 1, devip)))
1577 return errsts;
1578 memset(arr, 0, sizeof(arr));
1579 ppc = cmd[1] & 0x2;
1580 sp = cmd[1] & 0x1;
1581 if (ppc || sp) {
1582 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1583 INVALID_FIELD_IN_CDB, 0);
1584 return check_condition_result;
1586 pcontrol = (cmd[2] & 0xc0) >> 6;
1587 pcode = cmd[2] & 0x3f;
1588 subpcode = cmd[3] & 0xff;
1589 alloc_len = (cmd[7] << 8) + cmd[8];
1590 arr[0] = pcode;
1591 if (0 == subpcode) {
1592 switch (pcode) {
1593 case 0x0: /* Supported log pages log page */
1594 n = 4;
1595 arr[n++] = 0x0; /* this page */
1596 arr[n++] = 0xd; /* Temperature */
1597 arr[n++] = 0x2f; /* Informational exceptions */
1598 arr[3] = n - 4;
1599 break;
1600 case 0xd: /* Temperature log page */
1601 arr[3] = resp_temp_l_pg(arr + 4);
1602 break;
1603 case 0x2f: /* Informational exceptions log page */
1604 arr[3] = resp_ie_l_pg(arr + 4);
1605 break;
1606 default:
1607 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1608 INVALID_FIELD_IN_CDB, 0);
1609 return check_condition_result;
1611 } else if (0xff == subpcode) {
1612 arr[0] |= 0x40;
1613 arr[1] = subpcode;
1614 switch (pcode) {
1615 case 0x0: /* Supported log pages and subpages log page */
1616 n = 4;
1617 arr[n++] = 0x0;
1618 arr[n++] = 0x0; /* 0,0 page */
1619 arr[n++] = 0x0;
1620 arr[n++] = 0xff; /* this page */
1621 arr[n++] = 0xd;
1622 arr[n++] = 0x0; /* Temperature */
1623 arr[n++] = 0x2f;
1624 arr[n++] = 0x0; /* Informational exceptions */
1625 arr[3] = n - 4;
1626 break;
1627 case 0xd: /* Temperature subpages */
1628 n = 4;
1629 arr[n++] = 0xd;
1630 arr[n++] = 0x0; /* Temperature */
1631 arr[3] = n - 4;
1632 break;
1633 case 0x2f: /* Informational exceptions subpages */
1634 n = 4;
1635 arr[n++] = 0x2f;
1636 arr[n++] = 0x0; /* Informational exceptions */
1637 arr[3] = n - 4;
1638 break;
1639 default:
1640 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1641 INVALID_FIELD_IN_CDB, 0);
1642 return check_condition_result;
1644 } else {
1645 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1646 INVALID_FIELD_IN_CDB, 0);
1647 return check_condition_result;
1649 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1650 return fill_from_dev_buffer(scp, arr,
1651 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1654 static int check_device_access_params(struct sdebug_dev_info *devi,
1655 unsigned long long lba, unsigned int num)
1657 if (lba + num > sdebug_capacity) {
1658 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1659 return check_condition_result;
1661 /* transfer length excessive (tie in to block limits VPD page) */
1662 if (num > sdebug_store_sectors) {
1663 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1664 return check_condition_result;
1666 return 0;
1669 static int do_device_access(struct scsi_cmnd *scmd,
1670 struct sdebug_dev_info *devi,
1671 unsigned long long lba, unsigned int num, int write)
1673 int ret;
1674 unsigned long long block, rest = 0;
1675 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
1679 block = do_div(lba, sdebug_store_sectors);
1680 if (block + num > sdebug_store_sectors)
1681 rest = block + num - sdebug_store_sectors;
1683 ret = func(scmd, fake_storep + (block * scsi_debug_sector_size),
1684 (num - rest) * scsi_debug_sector_size);
1685 if (!ret && rest)
1686 ret = func(scmd, fake_storep, rest * scsi_debug_sector_size);
1688 return ret;
1691 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1692 unsigned int sectors, u32 ei_lba)
1694 unsigned int i, resid;
1695 struct scatterlist *psgl;
1696 struct sd_dif_tuple *sdt;
1697 sector_t sector;
1698 sector_t tmp_sec = start_sec;
1699 void *paddr;
1701 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1703 sdt = (struct sd_dif_tuple *)(dif_storep + dif_offset(start_sec));
1705 for (i = 0 ; i < sectors ; i++) {
1706 u16 csum;
1708 if (sdt[i].app_tag == 0xffff)
1709 continue;
1711 sector = start_sec + i;
1713 switch (scsi_debug_guard) {
1714 case 1:
1715 csum = ip_compute_csum(fake_storep +
1716 sector * scsi_debug_sector_size,
1717 scsi_debug_sector_size);
1718 break;
1719 case 0:
1720 csum = crc_t10dif(fake_storep +
1721 sector * scsi_debug_sector_size,
1722 scsi_debug_sector_size);
1723 csum = cpu_to_be16(csum);
1724 break;
1725 default:
1726 BUG();
1729 if (sdt[i].guard_tag != csum) {
1730 printk(KERN_ERR "%s: GUARD check failed on sector %lu" \
1731 " rcvd 0x%04x, data 0x%04x\n", __func__,
1732 (unsigned long)sector,
1733 be16_to_cpu(sdt[i].guard_tag),
1734 be16_to_cpu(csum));
1735 dif_errors++;
1736 return 0x01;
1739 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1740 be32_to_cpu(sdt[i].ref_tag) != (sector & 0xffffffff)) {
1741 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1742 __func__, (unsigned long)sector);
1743 dif_errors++;
1744 return 0x03;
1747 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1748 be32_to_cpu(sdt[i].ref_tag) != ei_lba) {
1749 printk(KERN_ERR "%s: REF check failed on sector %lu\n",
1750 __func__, (unsigned long)sector);
1751 dif_errors++;
1752 return 0x03;
1755 ei_lba++;
1758 resid = sectors * 8; /* Bytes of protection data to copy into sgl */
1759 sector = start_sec;
1761 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1762 int len = min(psgl->length, resid);
1764 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
1765 memcpy(paddr, dif_storep + dif_offset(sector), len);
1767 sector += len >> 3;
1768 if (sector >= sdebug_store_sectors) {
1769 /* Force wrap */
1770 tmp_sec = sector;
1771 sector = do_div(tmp_sec, sdebug_store_sectors);
1773 resid -= len;
1774 kunmap_atomic(paddr, KM_IRQ0);
1777 dix_reads++;
1779 return 0;
1782 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1783 unsigned int num, struct sdebug_dev_info *devip,
1784 u32 ei_lba)
1786 unsigned long iflags;
1787 int ret;
1789 ret = check_device_access_params(devip, lba, num);
1790 if (ret)
1791 return ret;
1793 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1794 (lba <= OPT_MEDIUM_ERR_ADDR) &&
1795 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1796 /* claim unrecoverable read error */
1797 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
1799 /* set info field and valid bit for fixed descriptor */
1800 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1801 devip->sense_buff[0] |= 0x80; /* Valid bit */
1802 ret = OPT_MEDIUM_ERR_ADDR;
1803 devip->sense_buff[3] = (ret >> 24) & 0xff;
1804 devip->sense_buff[4] = (ret >> 16) & 0xff;
1805 devip->sense_buff[5] = (ret >> 8) & 0xff;
1806 devip->sense_buff[6] = ret & 0xff;
1808 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1809 return check_condition_result;
1812 /* DIX + T10 DIF */
1813 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1814 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1816 if (prot_ret) {
1817 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1818 return illegal_condition_result;
1822 read_lock_irqsave(&atomic_rw, iflags);
1823 ret = do_device_access(SCpnt, devip, lba, num, 0);
1824 read_unlock_irqrestore(&atomic_rw, iflags);
1825 return ret;
1828 void dump_sector(unsigned char *buf, int len)
1830 int i, j;
1832 printk(KERN_ERR ">>> Sector Dump <<<\n");
1834 for (i = 0 ; i < len ; i += 16) {
1835 printk(KERN_ERR "%04d: ", i);
1837 for (j = 0 ; j < 16 ; j++) {
1838 unsigned char c = buf[i+j];
1839 if (c >= 0x20 && c < 0x7e)
1840 printk(" %c ", buf[i+j]);
1841 else
1842 printk("%02x ", buf[i+j]);
1845 printk("\n");
1849 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1850 unsigned int sectors, u32 ei_lba)
1852 int i, j, ret;
1853 struct sd_dif_tuple *sdt;
1854 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1855 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1856 void *daddr, *paddr;
1857 sector_t tmp_sec = start_sec;
1858 sector_t sector;
1859 int ppage_offset;
1860 unsigned short csum;
1862 sector = do_div(tmp_sec, sdebug_store_sectors);
1864 BUG_ON(scsi_sg_count(SCpnt) == 0);
1865 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1867 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
1868 ppage_offset = 0;
1870 /* For each data page */
1871 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1872 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
1874 /* For each sector-sized chunk in data page */
1875 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
1877 /* If we're at the end of the current
1878 * protection page advance to the next one
1880 if (ppage_offset >= psgl->length) {
1881 kunmap_atomic(paddr, KM_IRQ1);
1882 psgl = sg_next(psgl);
1883 BUG_ON(psgl == NULL);
1884 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
1885 + psgl->offset;
1886 ppage_offset = 0;
1889 sdt = paddr + ppage_offset;
1891 switch (scsi_debug_guard) {
1892 case 1:
1893 csum = ip_compute_csum(daddr,
1894 scsi_debug_sector_size);
1895 break;
1896 case 0:
1897 csum = cpu_to_be16(crc_t10dif(daddr,
1898 scsi_debug_sector_size));
1899 break;
1900 default:
1901 BUG();
1902 ret = 0;
1903 goto out;
1906 if (sdt->guard_tag != csum) {
1907 printk(KERN_ERR
1908 "%s: GUARD check failed on sector %lu " \
1909 "rcvd 0x%04x, calculated 0x%04x\n",
1910 __func__, (unsigned long)sector,
1911 be16_to_cpu(sdt->guard_tag),
1912 be16_to_cpu(csum));
1913 ret = 0x01;
1914 dump_sector(daddr, scsi_debug_sector_size);
1915 goto out;
1918 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1919 be32_to_cpu(sdt->ref_tag)
1920 != (start_sec & 0xffffffff)) {
1921 printk(KERN_ERR
1922 "%s: REF check failed on sector %lu\n",
1923 __func__, (unsigned long)sector);
1924 ret = 0x03;
1925 dump_sector(daddr, scsi_debug_sector_size);
1926 goto out;
1929 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1930 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1931 printk(KERN_ERR
1932 "%s: REF check failed on sector %lu\n",
1933 __func__, (unsigned long)sector);
1934 ret = 0x03;
1935 dump_sector(daddr, scsi_debug_sector_size);
1936 goto out;
1939 /* Would be great to copy this in bigger
1940 * chunks. However, for the sake of
1941 * correctness we need to verify each sector
1942 * before writing it to "stable" storage
1944 memcpy(dif_storep + dif_offset(sector), sdt, 8);
1946 sector++;
1948 if (sector == sdebug_store_sectors)
1949 sector = 0; /* Force wrap */
1951 start_sec++;
1952 ei_lba++;
1953 daddr += scsi_debug_sector_size;
1954 ppage_offset += sizeof(struct sd_dif_tuple);
1957 kunmap_atomic(daddr, KM_IRQ0);
1960 kunmap_atomic(paddr, KM_IRQ1);
1962 dix_writes++;
1964 return 0;
1966 out:
1967 dif_errors++;
1968 kunmap_atomic(daddr, KM_IRQ0);
1969 kunmap_atomic(paddr, KM_IRQ1);
1970 return ret;
1973 static unsigned int map_state(sector_t lba, unsigned int *num)
1975 unsigned int granularity, alignment, mapped;
1976 sector_t block, next, end;
1978 granularity = scsi_debug_unmap_granularity;
1979 alignment = granularity - scsi_debug_unmap_alignment;
1980 block = lba + alignment;
1981 do_div(block, granularity);
1983 mapped = test_bit(block, map_storep);
1985 if (mapped)
1986 next = find_next_zero_bit(map_storep, map_size, block);
1987 else
1988 next = find_next_bit(map_storep, map_size, block);
1990 end = next * granularity - scsi_debug_unmap_alignment;
1991 *num = end - lba;
1993 return mapped;
1996 static void map_region(sector_t lba, unsigned int len)
1998 unsigned int granularity, alignment;
1999 sector_t end = lba + len;
2001 granularity = scsi_debug_unmap_granularity;
2002 alignment = granularity - scsi_debug_unmap_alignment;
2004 while (lba < end) {
2005 sector_t block, rem;
2007 block = lba + alignment;
2008 rem = do_div(block, granularity);
2010 if (block < map_size)
2011 set_bit(block, map_storep);
2013 lba += granularity - rem;
2017 static void unmap_region(sector_t lba, unsigned int len)
2019 unsigned int granularity, alignment;
2020 sector_t end = lba + len;
2022 granularity = scsi_debug_unmap_granularity;
2023 alignment = granularity - scsi_debug_unmap_alignment;
2025 while (lba < end) {
2026 sector_t block, rem;
2028 block = lba + alignment;
2029 rem = do_div(block, granularity);
2031 if (rem == 0 && lba + granularity <= end &&
2032 block < map_size)
2033 clear_bit(block, map_storep);
2035 lba += granularity - rem;
2039 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2040 unsigned int num, struct sdebug_dev_info *devip,
2041 u32 ei_lba)
2043 unsigned long iflags;
2044 int ret;
2046 ret = check_device_access_params(devip, lba, num);
2047 if (ret)
2048 return ret;
2050 /* DIX + T10 DIF */
2051 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2052 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2054 if (prot_ret) {
2055 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2056 return illegal_condition_result;
2060 write_lock_irqsave(&atomic_rw, iflags);
2061 ret = do_device_access(SCpnt, devip, lba, num, 1);
2062 if (scsi_debug_unmap_granularity)
2063 map_region(lba, num);
2064 write_unlock_irqrestore(&atomic_rw, iflags);
2065 if (-1 == ret)
2066 return (DID_ERROR << 16);
2067 else if ((ret < (num * scsi_debug_sector_size)) &&
2068 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2069 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2070 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2072 return 0;
2075 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2076 unsigned int num, struct sdebug_dev_info *devip,
2077 u32 ei_lba, unsigned int unmap)
2079 unsigned long iflags;
2080 unsigned long long i;
2081 int ret;
2083 ret = check_device_access_params(devip, lba, num);
2084 if (ret)
2085 return ret;
2087 write_lock_irqsave(&atomic_rw, iflags);
2089 if (unmap && scsi_debug_unmap_granularity) {
2090 unmap_region(lba, num);
2091 goto out;
2094 /* Else fetch one logical block */
2095 ret = fetch_to_dev_buffer(scmd,
2096 fake_storep + (lba * scsi_debug_sector_size),
2097 scsi_debug_sector_size);
2099 if (-1 == ret) {
2100 write_unlock_irqrestore(&atomic_rw, iflags);
2101 return (DID_ERROR << 16);
2102 } else if ((ret < (num * scsi_debug_sector_size)) &&
2103 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2104 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2105 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2107 /* Copy first sector to remaining blocks */
2108 for (i = 1 ; i < num ; i++)
2109 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2110 fake_storep + (lba * scsi_debug_sector_size),
2111 scsi_debug_sector_size);
2113 if (scsi_debug_unmap_granularity)
2114 map_region(lba, num);
2115 out:
2116 write_unlock_irqrestore(&atomic_rw, iflags);
2118 return 0;
2121 struct unmap_block_desc {
2122 __be64 lba;
2123 __be32 blocks;
2124 __be32 __reserved;
2127 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2129 unsigned char *buf;
2130 struct unmap_block_desc *desc;
2131 unsigned int i, payload_len, descriptors;
2132 int ret;
2134 ret = check_readiness(scmd, 1, devip);
2135 if (ret)
2136 return ret;
2138 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2139 BUG_ON(scsi_bufflen(scmd) != payload_len);
2141 descriptors = (payload_len - 8) / 16;
2143 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2144 if (!buf)
2145 return check_condition_result;
2147 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2149 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2150 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2152 desc = (void *)&buf[8];
2154 for (i = 0 ; i < descriptors ; i++) {
2155 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2156 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2158 ret = check_device_access_params(devip, lba, num);
2159 if (ret)
2160 goto out;
2162 unmap_region(lba, num);
2165 ret = 0;
2167 out:
2168 kfree(buf);
2170 return ret;
2173 #define SDEBUG_GET_LBA_STATUS_LEN 32
2175 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2176 struct sdebug_dev_info * devip)
2178 unsigned long long lba;
2179 unsigned int alloc_len, mapped, num;
2180 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2181 int ret;
2183 ret = check_readiness(scmd, 1, devip);
2184 if (ret)
2185 return ret;
2187 lba = get_unaligned_be64(&scmd->cmnd[2]);
2188 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2190 if (alloc_len < 24)
2191 return 0;
2193 ret = check_device_access_params(devip, lba, 1);
2194 if (ret)
2195 return ret;
2197 mapped = map_state(lba, &num);
2199 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2200 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */
2201 put_unaligned_be64(lba, &arr[8]); /* LBA */
2202 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2203 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2205 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2208 #define SDEBUG_RLUN_ARR_SZ 256
2210 static int resp_report_luns(struct scsi_cmnd * scp,
2211 struct sdebug_dev_info * devip)
2213 unsigned int alloc_len;
2214 int lun_cnt, i, upper, num, n, wlun, lun;
2215 unsigned char *cmd = (unsigned char *)scp->cmnd;
2216 int select_report = (int)cmd[2];
2217 struct scsi_lun *one_lun;
2218 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2219 unsigned char * max_addr;
2221 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2222 if ((alloc_len < 4) || (select_report > 2)) {
2223 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2225 return check_condition_result;
2227 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2228 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2229 lun_cnt = scsi_debug_max_luns;
2230 if (1 == select_report)
2231 lun_cnt = 0;
2232 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2233 --lun_cnt;
2234 wlun = (select_report > 0) ? 1 : 0;
2235 num = lun_cnt + wlun;
2236 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2237 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2238 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2239 sizeof(struct scsi_lun)), num);
2240 if (n < num) {
2241 wlun = 0;
2242 lun_cnt = n;
2244 one_lun = (struct scsi_lun *) &arr[8];
2245 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2246 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2247 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2248 i++, lun++) {
2249 upper = (lun >> 8) & 0x3f;
2250 if (upper)
2251 one_lun[i].scsi_lun[0] =
2252 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2253 one_lun[i].scsi_lun[1] = lun & 0xff;
2255 if (wlun) {
2256 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2257 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2258 i++;
2260 alloc_len = (unsigned char *)(one_lun + i) - arr;
2261 return fill_from_dev_buffer(scp, arr,
2262 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2265 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2266 unsigned int num, struct sdebug_dev_info *devip)
2268 int i, j, ret = -1;
2269 unsigned char *kaddr, *buf;
2270 unsigned int offset;
2271 struct scatterlist *sg;
2272 struct scsi_data_buffer *sdb = scsi_in(scp);
2274 /* better not to use temporary buffer. */
2275 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2276 if (!buf)
2277 return ret;
2279 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2281 offset = 0;
2282 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2283 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
2284 if (!kaddr)
2285 goto out;
2287 for (j = 0; j < sg->length; j++)
2288 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2290 offset += sg->length;
2291 kunmap_atomic(kaddr, KM_USER0);
2293 ret = 0;
2294 out:
2295 kfree(buf);
2297 return ret;
2300 /* When timer goes off this function is called. */
2301 static void timer_intr_handler(unsigned long indx)
2303 struct sdebug_queued_cmd * sqcp;
2304 unsigned long iflags;
2306 if (indx >= scsi_debug_max_queue) {
2307 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2308 "large\n");
2309 return;
2311 spin_lock_irqsave(&queued_arr_lock, iflags);
2312 sqcp = &queued_arr[(int)indx];
2313 if (! sqcp->in_use) {
2314 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2315 "interrupt\n");
2316 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2317 return;
2319 sqcp->in_use = 0;
2320 if (sqcp->done_funct) {
2321 sqcp->a_cmnd->result = sqcp->scsi_result;
2322 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2324 sqcp->done_funct = NULL;
2325 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2329 static struct sdebug_dev_info *
2330 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2332 struct sdebug_dev_info *devip;
2334 devip = kzalloc(sizeof(*devip), flags);
2335 if (devip) {
2336 devip->sdbg_host = sdbg_host;
2337 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2339 return devip;
2342 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2344 struct sdebug_host_info * sdbg_host;
2345 struct sdebug_dev_info * open_devip = NULL;
2346 struct sdebug_dev_info * devip =
2347 (struct sdebug_dev_info *)sdev->hostdata;
2349 if (devip)
2350 return devip;
2351 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2352 if (!sdbg_host) {
2353 printk(KERN_ERR "Host info NULL\n");
2354 return NULL;
2356 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2357 if ((devip->used) && (devip->channel == sdev->channel) &&
2358 (devip->target == sdev->id) &&
2359 (devip->lun == sdev->lun))
2360 return devip;
2361 else {
2362 if ((!devip->used) && (!open_devip))
2363 open_devip = devip;
2366 if (!open_devip) { /* try and make a new one */
2367 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2368 if (!open_devip) {
2369 printk(KERN_ERR "%s: out of memory at line %d\n",
2370 __func__, __LINE__);
2371 return NULL;
2375 open_devip->channel = sdev->channel;
2376 open_devip->target = sdev->id;
2377 open_devip->lun = sdev->lun;
2378 open_devip->sdbg_host = sdbg_host;
2379 open_devip->reset = 1;
2380 open_devip->used = 1;
2381 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2382 if (scsi_debug_dsense)
2383 open_devip->sense_buff[0] = 0x72;
2384 else {
2385 open_devip->sense_buff[0] = 0x70;
2386 open_devip->sense_buff[7] = 0xa;
2388 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2389 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2391 return open_devip;
2394 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2396 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2397 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2398 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2399 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2400 return 0;
2403 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2405 struct sdebug_dev_info *devip;
2407 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2408 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2409 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2410 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2411 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2412 devip = devInfoReg(sdp);
2413 if (NULL == devip)
2414 return 1; /* no resources, will be marked offline */
2415 sdp->hostdata = devip;
2416 if (sdp->host->cmd_per_lun)
2417 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2418 sdp->host->cmd_per_lun);
2419 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2420 if (scsi_debug_no_uld)
2421 sdp->no_uld_attach = 1;
2422 return 0;
2425 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2427 struct sdebug_dev_info *devip =
2428 (struct sdebug_dev_info *)sdp->hostdata;
2430 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2431 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2432 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2433 if (devip) {
2434 /* make this slot avaliable for re-use */
2435 devip->used = 0;
2436 sdp->hostdata = NULL;
2440 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2441 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2443 unsigned long iflags;
2444 int k;
2445 struct sdebug_queued_cmd *sqcp;
2447 spin_lock_irqsave(&queued_arr_lock, iflags);
2448 for (k = 0; k < scsi_debug_max_queue; ++k) {
2449 sqcp = &queued_arr[k];
2450 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2451 del_timer_sync(&sqcp->cmnd_timer);
2452 sqcp->in_use = 0;
2453 sqcp->a_cmnd = NULL;
2454 break;
2457 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2458 return (k < scsi_debug_max_queue) ? 1 : 0;
2461 /* Deletes (stops) timers of all queued commands */
2462 static void stop_all_queued(void)
2464 unsigned long iflags;
2465 int k;
2466 struct sdebug_queued_cmd *sqcp;
2468 spin_lock_irqsave(&queued_arr_lock, iflags);
2469 for (k = 0; k < scsi_debug_max_queue; ++k) {
2470 sqcp = &queued_arr[k];
2471 if (sqcp->in_use && sqcp->a_cmnd) {
2472 del_timer_sync(&sqcp->cmnd_timer);
2473 sqcp->in_use = 0;
2474 sqcp->a_cmnd = NULL;
2477 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2480 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2482 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2483 printk(KERN_INFO "scsi_debug: abort\n");
2484 ++num_aborts;
2485 stop_queued_cmnd(SCpnt);
2486 return SUCCESS;
2489 static int scsi_debug_biosparam(struct scsi_device *sdev,
2490 struct block_device * bdev, sector_t capacity, int *info)
2492 int res;
2493 unsigned char *buf;
2495 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2496 printk(KERN_INFO "scsi_debug: biosparam\n");
2497 buf = scsi_bios_ptable(bdev);
2498 if (buf) {
2499 res = scsi_partsize(buf, capacity,
2500 &info[2], &info[0], &info[1]);
2501 kfree(buf);
2502 if (! res)
2503 return res;
2505 info[0] = sdebug_heads;
2506 info[1] = sdebug_sectors_per;
2507 info[2] = sdebug_cylinders_per;
2508 return 0;
2511 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2513 struct sdebug_dev_info * devip;
2515 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2516 printk(KERN_INFO "scsi_debug: device_reset\n");
2517 ++num_dev_resets;
2518 if (SCpnt) {
2519 devip = devInfoReg(SCpnt->device);
2520 if (devip)
2521 devip->reset = 1;
2523 return SUCCESS;
2526 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2528 struct sdebug_host_info *sdbg_host;
2529 struct sdebug_dev_info * dev_info;
2530 struct scsi_device * sdp;
2531 struct Scsi_Host * hp;
2533 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2534 printk(KERN_INFO "scsi_debug: bus_reset\n");
2535 ++num_bus_resets;
2536 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2537 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2538 if (sdbg_host) {
2539 list_for_each_entry(dev_info,
2540 &sdbg_host->dev_info_list,
2541 dev_list)
2542 dev_info->reset = 1;
2545 return SUCCESS;
2548 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2550 struct sdebug_host_info * sdbg_host;
2551 struct sdebug_dev_info * dev_info;
2553 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2554 printk(KERN_INFO "scsi_debug: host_reset\n");
2555 ++num_host_resets;
2556 spin_lock(&sdebug_host_list_lock);
2557 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2558 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2559 dev_list)
2560 dev_info->reset = 1;
2562 spin_unlock(&sdebug_host_list_lock);
2563 stop_all_queued();
2564 return SUCCESS;
2567 /* Initializes timers in queued array */
2568 static void __init init_all_queued(void)
2570 unsigned long iflags;
2571 int k;
2572 struct sdebug_queued_cmd * sqcp;
2574 spin_lock_irqsave(&queued_arr_lock, iflags);
2575 for (k = 0; k < scsi_debug_max_queue; ++k) {
2576 sqcp = &queued_arr[k];
2577 init_timer(&sqcp->cmnd_timer);
2578 sqcp->in_use = 0;
2579 sqcp->a_cmnd = NULL;
2581 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2584 static void __init sdebug_build_parts(unsigned char *ramp,
2585 unsigned long store_size)
2587 struct partition * pp;
2588 int starts[SDEBUG_MAX_PARTS + 2];
2589 int sectors_per_part, num_sectors, k;
2590 int heads_by_sects, start_sec, end_sec;
2592 /* assume partition table already zeroed */
2593 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2594 return;
2595 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2596 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2597 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2598 "partitions to %d\n", SDEBUG_MAX_PARTS);
2600 num_sectors = (int)sdebug_store_sectors;
2601 sectors_per_part = (num_sectors - sdebug_sectors_per)
2602 / scsi_debug_num_parts;
2603 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2604 starts[0] = sdebug_sectors_per;
2605 for (k = 1; k < scsi_debug_num_parts; ++k)
2606 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2607 * heads_by_sects;
2608 starts[scsi_debug_num_parts] = num_sectors;
2609 starts[scsi_debug_num_parts + 1] = 0;
2611 ramp[510] = 0x55; /* magic partition markings */
2612 ramp[511] = 0xAA;
2613 pp = (struct partition *)(ramp + 0x1be);
2614 for (k = 0; starts[k + 1]; ++k, ++pp) {
2615 start_sec = starts[k];
2616 end_sec = starts[k + 1] - 1;
2617 pp->boot_ind = 0;
2619 pp->cyl = start_sec / heads_by_sects;
2620 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2621 / sdebug_sectors_per;
2622 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2624 pp->end_cyl = end_sec / heads_by_sects;
2625 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2626 / sdebug_sectors_per;
2627 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2629 pp->start_sect = start_sec;
2630 pp->nr_sects = end_sec - start_sec + 1;
2631 pp->sys_ind = 0x83; /* plain Linux partition */
2635 static int schedule_resp(struct scsi_cmnd * cmnd,
2636 struct sdebug_dev_info * devip,
2637 done_funct_t done, int scsi_result, int delta_jiff)
2639 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2640 if (scsi_result) {
2641 struct scsi_device * sdp = cmnd->device;
2643 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2644 "non-zero result=0x%x\n", sdp->host->host_no,
2645 sdp->channel, sdp->id, sdp->lun, scsi_result);
2648 if (cmnd && devip) {
2649 /* simulate autosense by this driver */
2650 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2651 memcpy(cmnd->sense_buffer, devip->sense_buff,
2652 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2653 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2655 if (delta_jiff <= 0) {
2656 if (cmnd)
2657 cmnd->result = scsi_result;
2658 if (done)
2659 done(cmnd);
2660 return 0;
2661 } else {
2662 unsigned long iflags;
2663 int k;
2664 struct sdebug_queued_cmd * sqcp = NULL;
2666 spin_lock_irqsave(&queued_arr_lock, iflags);
2667 for (k = 0; k < scsi_debug_max_queue; ++k) {
2668 sqcp = &queued_arr[k];
2669 if (! sqcp->in_use)
2670 break;
2672 if (k >= scsi_debug_max_queue) {
2673 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2674 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2675 return 1; /* report busy to mid level */
2677 sqcp->in_use = 1;
2678 sqcp->a_cmnd = cmnd;
2679 sqcp->scsi_result = scsi_result;
2680 sqcp->done_funct = done;
2681 sqcp->cmnd_timer.function = timer_intr_handler;
2682 sqcp->cmnd_timer.data = k;
2683 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2684 add_timer(&sqcp->cmnd_timer);
2685 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2686 if (cmnd)
2687 cmnd->result = 0;
2688 return 0;
2691 /* Note: The following macros create attribute files in the
2692 /sys/module/scsi_debug/parameters directory. Unfortunately this
2693 driver is unaware of a change and cannot trigger auxiliary actions
2694 as it can when the corresponding attribute in the
2695 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2697 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2698 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2699 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2700 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2701 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2702 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2703 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2704 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2705 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2706 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2707 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2708 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2709 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2710 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2711 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2712 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2713 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2714 S_IRUGO | S_IWUSR);
2715 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2716 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2717 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2718 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2719 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2720 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2721 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2722 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2723 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2724 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2725 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2726 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2727 module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO);
2728 module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO);
2730 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2731 MODULE_DESCRIPTION("SCSI debug adapter driver");
2732 MODULE_LICENSE("GPL");
2733 MODULE_VERSION(SCSI_DEBUG_VERSION);
2735 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2736 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2737 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2738 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2739 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2740 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2741 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2742 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2743 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2744 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2745 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2746 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2747 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2748 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2749 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2750 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2751 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2752 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2753 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2754 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2755 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2756 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2757 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2758 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2759 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2760 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2761 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2762 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2763 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2764 MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)");
2765 MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)");
2767 static char sdebug_info[256];
2769 static const char * scsi_debug_info(struct Scsi_Host * shp)
2771 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2772 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2773 scsi_debug_version_date, scsi_debug_dev_size_mb,
2774 scsi_debug_opts);
2775 return sdebug_info;
2778 /* scsi_debug_proc_info
2779 * Used if the driver currently has no own support for /proc/scsi
2781 static int scsi_debug_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
2782 int length, int inout)
2784 int len, pos, begin;
2785 int orig_length;
2787 orig_length = length;
2789 if (inout == 1) {
2790 char arr[16];
2791 int minLen = length > 15 ? 15 : length;
2793 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2794 return -EACCES;
2795 memcpy(arr, buffer, minLen);
2796 arr[minLen] = '\0';
2797 if (1 != sscanf(arr, "%d", &pos))
2798 return -EINVAL;
2799 scsi_debug_opts = pos;
2800 if (scsi_debug_every_nth != 0)
2801 scsi_debug_cmnd_count = 0;
2802 return length;
2804 begin = 0;
2805 pos = len = sprintf(buffer, "scsi_debug adapter driver, version "
2806 "%s [%s]\n"
2807 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2808 "every_nth=%d(curr:%d)\n"
2809 "delay=%d, max_luns=%d, scsi_level=%d\n"
2810 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2811 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2812 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2813 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2814 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2815 scsi_debug_cmnd_count, scsi_debug_delay,
2816 scsi_debug_max_luns, scsi_debug_scsi_level,
2817 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2818 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2819 num_host_resets, dix_reads, dix_writes, dif_errors);
2820 if (pos < offset) {
2821 len = 0;
2822 begin = pos;
2824 *start = buffer + (offset - begin); /* Start of wanted data */
2825 len -= (offset - begin);
2826 if (len > length)
2827 len = length;
2828 return len;
2831 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2833 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2836 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2837 const char * buf, size_t count)
2839 int delay;
2840 char work[20];
2842 if (1 == sscanf(buf, "%10s", work)) {
2843 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2844 scsi_debug_delay = delay;
2845 return count;
2848 return -EINVAL;
2850 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2851 sdebug_delay_store);
2853 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2855 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2858 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2859 const char * buf, size_t count)
2861 int opts;
2862 char work[20];
2864 if (1 == sscanf(buf, "%10s", work)) {
2865 if (0 == strnicmp(work,"0x", 2)) {
2866 if (1 == sscanf(&work[2], "%x", &opts))
2867 goto opts_done;
2868 } else {
2869 if (1 == sscanf(work, "%d", &opts))
2870 goto opts_done;
2873 return -EINVAL;
2874 opts_done:
2875 scsi_debug_opts = opts;
2876 scsi_debug_cmnd_count = 0;
2877 return count;
2879 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2880 sdebug_opts_store);
2882 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2884 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2886 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2887 const char * buf, size_t count)
2889 int n;
2891 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2892 scsi_debug_ptype = n;
2893 return count;
2895 return -EINVAL;
2897 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2899 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2901 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2903 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2904 const char * buf, size_t count)
2906 int n;
2908 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2909 scsi_debug_dsense = n;
2910 return count;
2912 return -EINVAL;
2914 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2915 sdebug_dsense_store);
2917 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2919 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2921 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2922 const char * buf, size_t count)
2924 int n;
2926 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2927 scsi_debug_fake_rw = n;
2928 return count;
2930 return -EINVAL;
2932 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2933 sdebug_fake_rw_store);
2935 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2937 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2939 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2940 const char * buf, size_t count)
2942 int n;
2944 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2945 scsi_debug_no_lun_0 = n;
2946 return count;
2948 return -EINVAL;
2950 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2951 sdebug_no_lun_0_store);
2953 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2955 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2957 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2958 const char * buf, size_t count)
2960 int n;
2962 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2963 scsi_debug_num_tgts = n;
2964 sdebug_max_tgts_luns();
2965 return count;
2967 return -EINVAL;
2969 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
2970 sdebug_num_tgts_store);
2972 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
2974 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
2976 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
2978 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
2980 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
2982 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
2984 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
2986 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
2988 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
2989 const char * buf, size_t count)
2991 int nth;
2993 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
2994 scsi_debug_every_nth = nth;
2995 scsi_debug_cmnd_count = 0;
2996 return count;
2998 return -EINVAL;
3000 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3001 sdebug_every_nth_store);
3003 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3005 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3007 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3008 const char * buf, size_t count)
3010 int n;
3012 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3013 scsi_debug_max_luns = n;
3014 sdebug_max_tgts_luns();
3015 return count;
3017 return -EINVAL;
3019 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3020 sdebug_max_luns_store);
3022 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3024 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3026 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3027 const char * buf, size_t count)
3029 int n;
3031 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3032 (n <= SCSI_DEBUG_CANQUEUE)) {
3033 scsi_debug_max_queue = n;
3034 return count;
3036 return -EINVAL;
3038 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3039 sdebug_max_queue_store);
3041 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3043 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3045 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3047 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3049 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3051 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3053 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3055 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3057 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3058 const char * buf, size_t count)
3060 int n;
3062 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3063 scsi_debug_virtual_gb = n;
3065 sdebug_capacity = get_sdebug_capacity();
3067 return count;
3069 return -EINVAL;
3071 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3072 sdebug_virtual_gb_store);
3074 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3076 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3079 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3080 const char * buf, size_t count)
3082 int delta_hosts;
3084 if (sscanf(buf, "%d", &delta_hosts) != 1)
3085 return -EINVAL;
3086 if (delta_hosts > 0) {
3087 do {
3088 sdebug_add_adapter();
3089 } while (--delta_hosts);
3090 } else if (delta_hosts < 0) {
3091 do {
3092 sdebug_remove_adapter();
3093 } while (++delta_hosts);
3095 return count;
3097 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3098 sdebug_add_host_store);
3100 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3101 char * buf)
3103 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3105 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3106 const char * buf, size_t count)
3108 int n;
3110 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3111 scsi_debug_vpd_use_hostno = n;
3112 return count;
3114 return -EINVAL;
3116 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3117 sdebug_vpd_use_hostno_store);
3119 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3121 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3123 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3125 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3127 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3129 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3131 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3133 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3135 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3137 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3139 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3141 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3143 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3145 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3147 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3149 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3151 ssize_t count;
3153 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0)
3154 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3155 sdebug_store_sectors);
3157 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3159 buf[count++] = '\n';
3160 buf[count++] = 0;
3162 return count;
3164 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3167 /* Note: The following function creates attribute files in the
3168 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3169 files (over those found in the /sys/module/scsi_debug/parameters
3170 directory) is that auxiliary actions can be triggered when an attribute
3171 is changed. For example see: sdebug_add_host_store() above.
3173 static int do_create_driverfs_files(void)
3175 int ret;
3177 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3178 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3179 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3180 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3181 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3182 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3183 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3184 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3185 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3186 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3187 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3188 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3189 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3190 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3191 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3192 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3193 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3194 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3195 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3196 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3197 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3198 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3199 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3200 return ret;
3203 static void do_remove_driverfs_files(void)
3205 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3206 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3207 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3208 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3209 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3210 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3211 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3212 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3213 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3214 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3215 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3216 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3217 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3218 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3219 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3220 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3221 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3222 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3223 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3224 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3225 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3226 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3227 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3230 struct device *pseudo_primary;
3232 static int __init scsi_debug_init(void)
3234 unsigned long sz;
3235 int host_to_add;
3236 int k;
3237 int ret;
3239 switch (scsi_debug_sector_size) {
3240 case 512:
3241 case 1024:
3242 case 2048:
3243 case 4096:
3244 break;
3245 default:
3246 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3247 scsi_debug_sector_size);
3248 return -EINVAL;
3251 switch (scsi_debug_dif) {
3253 case SD_DIF_TYPE0_PROTECTION:
3254 case SD_DIF_TYPE1_PROTECTION:
3255 case SD_DIF_TYPE2_PROTECTION:
3256 case SD_DIF_TYPE3_PROTECTION:
3257 break;
3259 default:
3260 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3261 return -EINVAL;
3264 if (scsi_debug_guard > 1) {
3265 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3266 return -EINVAL;
3269 if (scsi_debug_ato > 1) {
3270 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3271 return -EINVAL;
3274 if (scsi_debug_physblk_exp > 15) {
3275 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3276 scsi_debug_physblk_exp);
3277 return -EINVAL;
3280 if (scsi_debug_lowest_aligned > 0x3fff) {
3281 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3282 scsi_debug_lowest_aligned);
3283 return -EINVAL;
3286 if (scsi_debug_dev_size_mb < 1)
3287 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3288 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3289 sdebug_store_sectors = sz / scsi_debug_sector_size;
3290 sdebug_capacity = get_sdebug_capacity();
3292 /* play around with geometry, don't waste too much on track 0 */
3293 sdebug_heads = 8;
3294 sdebug_sectors_per = 32;
3295 if (scsi_debug_dev_size_mb >= 16)
3296 sdebug_heads = 32;
3297 else if (scsi_debug_dev_size_mb >= 256)
3298 sdebug_heads = 64;
3299 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3300 (sdebug_sectors_per * sdebug_heads);
3301 if (sdebug_cylinders_per >= 1024) {
3302 /* other LLDs do this; implies >= 1GB ram disk ... */
3303 sdebug_heads = 255;
3304 sdebug_sectors_per = 63;
3305 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3306 (sdebug_sectors_per * sdebug_heads);
3309 fake_storep = vmalloc(sz);
3310 if (NULL == fake_storep) {
3311 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3312 return -ENOMEM;
3314 memset(fake_storep, 0, sz);
3315 if (scsi_debug_num_parts > 0)
3316 sdebug_build_parts(fake_storep, sz);
3318 if (scsi_debug_dif) {
3319 int dif_size;
3321 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3322 dif_storep = vmalloc(dif_size);
3324 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3325 dif_size, dif_storep);
3327 if (dif_storep == NULL) {
3328 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3329 ret = -ENOMEM;
3330 goto free_vm;
3333 memset(dif_storep, 0xff, dif_size);
3336 /* Thin Provisioning */
3337 if (scsi_debug_tpu || scsi_debug_tpws) {
3338 unsigned int map_bytes;
3340 scsi_debug_unmap_max_blocks =
3341 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3343 scsi_debug_unmap_max_desc =
3344 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3346 scsi_debug_unmap_granularity =
3347 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3349 if (scsi_debug_unmap_alignment &&
3350 scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) {
3351 printk(KERN_ERR
3352 "%s: ERR: unmap_granularity < unmap_alignment\n",
3353 __func__);
3354 return -EINVAL;
3357 map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity);
3358 map_bytes = map_size >> 3;
3359 map_storep = vmalloc(map_bytes);
3361 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3362 map_size);
3364 if (map_storep == NULL) {
3365 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3366 ret = -ENOMEM;
3367 goto free_vm;
3370 memset(map_storep, 0x0, map_bytes);
3372 /* Map first 1KB for partition table */
3373 if (scsi_debug_num_parts)
3374 map_region(0, 2);
3377 pseudo_primary = root_device_register("pseudo_0");
3378 if (IS_ERR(pseudo_primary)) {
3379 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3380 ret = PTR_ERR(pseudo_primary);
3381 goto free_vm;
3383 ret = bus_register(&pseudo_lld_bus);
3384 if (ret < 0) {
3385 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3386 ret);
3387 goto dev_unreg;
3389 ret = driver_register(&sdebug_driverfs_driver);
3390 if (ret < 0) {
3391 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3392 ret);
3393 goto bus_unreg;
3395 ret = do_create_driverfs_files();
3396 if (ret < 0) {
3397 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3398 ret);
3399 goto del_files;
3402 init_all_queued();
3404 host_to_add = scsi_debug_add_host;
3405 scsi_debug_add_host = 0;
3407 for (k = 0; k < host_to_add; k++) {
3408 if (sdebug_add_adapter()) {
3409 printk(KERN_ERR "scsi_debug_init: "
3410 "sdebug_add_adapter failed k=%d\n", k);
3411 break;
3415 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3416 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3417 scsi_debug_add_host);
3419 return 0;
3421 del_files:
3422 do_remove_driverfs_files();
3423 driver_unregister(&sdebug_driverfs_driver);
3424 bus_unreg:
3425 bus_unregister(&pseudo_lld_bus);
3426 dev_unreg:
3427 root_device_unregister(pseudo_primary);
3428 free_vm:
3429 if (map_storep)
3430 vfree(map_storep);
3431 if (dif_storep)
3432 vfree(dif_storep);
3433 vfree(fake_storep);
3435 return ret;
3438 static void __exit scsi_debug_exit(void)
3440 int k = scsi_debug_add_host;
3442 stop_all_queued();
3443 for (; k; k--)
3444 sdebug_remove_adapter();
3445 do_remove_driverfs_files();
3446 driver_unregister(&sdebug_driverfs_driver);
3447 bus_unregister(&pseudo_lld_bus);
3448 root_device_unregister(pseudo_primary);
3450 if (dif_storep)
3451 vfree(dif_storep);
3453 vfree(fake_storep);
3456 device_initcall(scsi_debug_init);
3457 module_exit(scsi_debug_exit);
3459 static void sdebug_release_adapter(struct device * dev)
3461 struct sdebug_host_info *sdbg_host;
3463 sdbg_host = to_sdebug_host(dev);
3464 kfree(sdbg_host);
3467 static int sdebug_add_adapter(void)
3469 int k, devs_per_host;
3470 int error = 0;
3471 struct sdebug_host_info *sdbg_host;
3472 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3474 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3475 if (NULL == sdbg_host) {
3476 printk(KERN_ERR "%s: out of memory at line %d\n",
3477 __func__, __LINE__);
3478 return -ENOMEM;
3481 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3483 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3484 for (k = 0; k < devs_per_host; k++) {
3485 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3486 if (!sdbg_devinfo) {
3487 printk(KERN_ERR "%s: out of memory at line %d\n",
3488 __func__, __LINE__);
3489 error = -ENOMEM;
3490 goto clean;
3494 spin_lock(&sdebug_host_list_lock);
3495 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3496 spin_unlock(&sdebug_host_list_lock);
3498 sdbg_host->dev.bus = &pseudo_lld_bus;
3499 sdbg_host->dev.parent = pseudo_primary;
3500 sdbg_host->dev.release = &sdebug_release_adapter;
3501 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3503 error = device_register(&sdbg_host->dev);
3505 if (error)
3506 goto clean;
3508 ++scsi_debug_add_host;
3509 return error;
3511 clean:
3512 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3513 dev_list) {
3514 list_del(&sdbg_devinfo->dev_list);
3515 kfree(sdbg_devinfo);
3518 kfree(sdbg_host);
3519 return error;
3522 static void sdebug_remove_adapter(void)
3524 struct sdebug_host_info * sdbg_host = NULL;
3526 spin_lock(&sdebug_host_list_lock);
3527 if (!list_empty(&sdebug_host_list)) {
3528 sdbg_host = list_entry(sdebug_host_list.prev,
3529 struct sdebug_host_info, host_list);
3530 list_del(&sdbg_host->host_list);
3532 spin_unlock(&sdebug_host_list_lock);
3534 if (!sdbg_host)
3535 return;
3537 device_unregister(&sdbg_host->dev);
3538 --scsi_debug_add_host;
3541 static
3542 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3544 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3545 int len, k;
3546 unsigned int num;
3547 unsigned long long lba;
3548 u32 ei_lba;
3549 int errsts = 0;
3550 int target = SCpnt->device->id;
3551 struct sdebug_dev_info *devip = NULL;
3552 int inj_recovered = 0;
3553 int inj_transport = 0;
3554 int inj_dif = 0;
3555 int inj_dix = 0;
3556 int delay_override = 0;
3557 int unmap = 0;
3559 scsi_set_resid(SCpnt, 0);
3560 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3561 printk(KERN_INFO "scsi_debug: cmd ");
3562 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3563 printk("%02x ", (int)cmd[k]);
3564 printk("\n");
3567 if (target == SCpnt->device->host->hostt->this_id) {
3568 printk(KERN_INFO "scsi_debug: initiator's id used as "
3569 "target!\n");
3570 return schedule_resp(SCpnt, NULL, done,
3571 DID_NO_CONNECT << 16, 0);
3574 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3575 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3576 return schedule_resp(SCpnt, NULL, done,
3577 DID_NO_CONNECT << 16, 0);
3578 devip = devInfoReg(SCpnt->device);
3579 if (NULL == devip)
3580 return schedule_resp(SCpnt, NULL, done,
3581 DID_NO_CONNECT << 16, 0);
3583 if ((scsi_debug_every_nth != 0) &&
3584 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3585 scsi_debug_cmnd_count = 0;
3586 if (scsi_debug_every_nth < -1)
3587 scsi_debug_every_nth = -1;
3588 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3589 return 0; /* ignore command causing timeout */
3590 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3591 inj_recovered = 1; /* to reads and writes below */
3592 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3593 inj_transport = 1; /* to reads and writes below */
3594 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3595 inj_dif = 1; /* to reads and writes below */
3596 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3597 inj_dix = 1; /* to reads and writes below */
3600 if (devip->wlun) {
3601 switch (*cmd) {
3602 case INQUIRY:
3603 case REQUEST_SENSE:
3604 case TEST_UNIT_READY:
3605 case REPORT_LUNS:
3606 break; /* only allowable wlun commands */
3607 default:
3608 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3609 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3610 "not supported for wlun\n", *cmd);
3611 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3612 INVALID_OPCODE, 0);
3613 errsts = check_condition_result;
3614 return schedule_resp(SCpnt, devip, done, errsts,
3619 switch (*cmd) {
3620 case INQUIRY: /* mandatory, ignore unit attention */
3621 delay_override = 1;
3622 errsts = resp_inquiry(SCpnt, target, devip);
3623 break;
3624 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3625 delay_override = 1;
3626 errsts = resp_requests(SCpnt, devip);
3627 break;
3628 case REZERO_UNIT: /* actually this is REWIND for SSC */
3629 case START_STOP:
3630 errsts = resp_start_stop(SCpnt, devip);
3631 break;
3632 case ALLOW_MEDIUM_REMOVAL:
3633 errsts = check_readiness(SCpnt, 1, devip);
3634 if (errsts)
3635 break;
3636 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3637 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3638 cmd[4] ? "inhibited" : "enabled");
3639 break;
3640 case SEND_DIAGNOSTIC: /* mandatory */
3641 errsts = check_readiness(SCpnt, 1, devip);
3642 break;
3643 case TEST_UNIT_READY: /* mandatory */
3644 delay_override = 1;
3645 errsts = check_readiness(SCpnt, 0, devip);
3646 break;
3647 case RESERVE:
3648 errsts = check_readiness(SCpnt, 1, devip);
3649 break;
3650 case RESERVE_10:
3651 errsts = check_readiness(SCpnt, 1, devip);
3652 break;
3653 case RELEASE:
3654 errsts = check_readiness(SCpnt, 1, devip);
3655 break;
3656 case RELEASE_10:
3657 errsts = check_readiness(SCpnt, 1, devip);
3658 break;
3659 case READ_CAPACITY:
3660 errsts = resp_readcap(SCpnt, devip);
3661 break;
3662 case SERVICE_ACTION_IN:
3663 if (cmd[1] == SAI_READ_CAPACITY_16)
3664 errsts = resp_readcap16(SCpnt, devip);
3665 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3667 if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) {
3668 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3669 INVALID_COMMAND_OPCODE, 0);
3670 errsts = check_condition_result;
3671 } else
3672 errsts = resp_get_lba_status(SCpnt, devip);
3673 } else {
3674 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3675 INVALID_OPCODE, 0);
3676 errsts = check_condition_result;
3678 break;
3679 case MAINTENANCE_IN:
3680 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3681 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3682 INVALID_OPCODE, 0);
3683 errsts = check_condition_result;
3684 break;
3686 errsts = resp_report_tgtpgs(SCpnt, devip);
3687 break;
3688 case READ_16:
3689 case READ_12:
3690 case READ_10:
3691 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3692 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3693 cmd[1] & 0xe0) {
3694 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3695 INVALID_COMMAND_OPCODE, 0);
3696 errsts = check_condition_result;
3697 break;
3700 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3701 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3702 (cmd[1] & 0xe0) == 0)
3703 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3705 /* fall through */
3706 case READ_6:
3707 read:
3708 errsts = check_readiness(SCpnt, 0, devip);
3709 if (errsts)
3710 break;
3711 if (scsi_debug_fake_rw)
3712 break;
3713 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3714 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3715 if (inj_recovered && (0 == errsts)) {
3716 mk_sense_buffer(devip, RECOVERED_ERROR,
3717 THRESHOLD_EXCEEDED, 0);
3718 errsts = check_condition_result;
3719 } else if (inj_transport && (0 == errsts)) {
3720 mk_sense_buffer(devip, ABORTED_COMMAND,
3721 TRANSPORT_PROBLEM, ACK_NAK_TO);
3722 errsts = check_condition_result;
3723 } else if (inj_dif && (0 == errsts)) {
3724 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3725 errsts = illegal_condition_result;
3726 } else if (inj_dix && (0 == errsts)) {
3727 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3728 errsts = illegal_condition_result;
3730 break;
3731 case REPORT_LUNS: /* mandatory, ignore unit attention */
3732 delay_override = 1;
3733 errsts = resp_report_luns(SCpnt, devip);
3734 break;
3735 case VERIFY: /* 10 byte SBC-2 command */
3736 errsts = check_readiness(SCpnt, 0, devip);
3737 break;
3738 case WRITE_16:
3739 case WRITE_12:
3740 case WRITE_10:
3741 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3742 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3743 cmd[1] & 0xe0) {
3744 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3745 INVALID_COMMAND_OPCODE, 0);
3746 errsts = check_condition_result;
3747 break;
3750 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3751 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3752 (cmd[1] & 0xe0) == 0)
3753 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3755 /* fall through */
3756 case WRITE_6:
3757 write:
3758 errsts = check_readiness(SCpnt, 0, devip);
3759 if (errsts)
3760 break;
3761 if (scsi_debug_fake_rw)
3762 break;
3763 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3764 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3765 if (inj_recovered && (0 == errsts)) {
3766 mk_sense_buffer(devip, RECOVERED_ERROR,
3767 THRESHOLD_EXCEEDED, 0);
3768 errsts = check_condition_result;
3769 } else if (inj_dif && (0 == errsts)) {
3770 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3771 errsts = illegal_condition_result;
3772 } else if (inj_dix && (0 == errsts)) {
3773 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3774 errsts = illegal_condition_result;
3776 break;
3777 case WRITE_SAME_16:
3778 if (cmd[1] & 0x8) {
3779 if (scsi_debug_tpws == 0) {
3780 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3781 INVALID_FIELD_IN_CDB, 0);
3782 errsts = check_condition_result;
3783 } else
3784 unmap = 1;
3786 if (errsts)
3787 break;
3788 /* fall through */
3789 case WRITE_SAME:
3790 errsts = check_readiness(SCpnt, 0, devip);
3791 if (errsts)
3792 break;
3793 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3794 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3795 break;
3796 case UNMAP:
3797 errsts = check_readiness(SCpnt, 0, devip);
3798 if (errsts)
3799 break;
3801 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) {
3802 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3803 INVALID_COMMAND_OPCODE, 0);
3804 errsts = check_condition_result;
3805 } else
3806 errsts = resp_unmap(SCpnt, devip);
3807 break;
3808 case MODE_SENSE:
3809 case MODE_SENSE_10:
3810 errsts = resp_mode_sense(SCpnt, target, devip);
3811 break;
3812 case MODE_SELECT:
3813 errsts = resp_mode_select(SCpnt, 1, devip);
3814 break;
3815 case MODE_SELECT_10:
3816 errsts = resp_mode_select(SCpnt, 0, devip);
3817 break;
3818 case LOG_SENSE:
3819 errsts = resp_log_sense(SCpnt, devip);
3820 break;
3821 case SYNCHRONIZE_CACHE:
3822 delay_override = 1;
3823 errsts = check_readiness(SCpnt, 0, devip);
3824 break;
3825 case WRITE_BUFFER:
3826 errsts = check_readiness(SCpnt, 1, devip);
3827 break;
3828 case XDWRITEREAD_10:
3829 if (!scsi_bidi_cmnd(SCpnt)) {
3830 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3831 INVALID_FIELD_IN_CDB, 0);
3832 errsts = check_condition_result;
3833 break;
3836 errsts = check_readiness(SCpnt, 0, devip);
3837 if (errsts)
3838 break;
3839 if (scsi_debug_fake_rw)
3840 break;
3841 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3842 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3843 if (errsts)
3844 break;
3845 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3846 if (errsts)
3847 break;
3848 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3849 break;
3850 case VARIABLE_LENGTH_CMD:
3851 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3853 if ((cmd[10] & 0xe0) == 0)
3854 printk(KERN_ERR
3855 "Unprotected RD/WR to DIF device\n");
3857 if (cmd[9] == READ_32) {
3858 BUG_ON(SCpnt->cmd_len < 32);
3859 goto read;
3862 if (cmd[9] == WRITE_32) {
3863 BUG_ON(SCpnt->cmd_len < 32);
3864 goto write;
3868 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3869 INVALID_FIELD_IN_CDB, 0);
3870 errsts = check_condition_result;
3871 break;
3873 default:
3874 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3875 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3876 "supported\n", *cmd);
3877 errsts = check_readiness(SCpnt, 1, devip);
3878 if (errsts)
3879 break; /* Unit attention takes precedence */
3880 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3881 errsts = check_condition_result;
3882 break;
3884 return schedule_resp(SCpnt, devip, done, errsts,
3885 (delay_override ? 0 : scsi_debug_delay));
3888 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3890 static struct scsi_host_template sdebug_driver_template = {
3891 .proc_info = scsi_debug_proc_info,
3892 .proc_name = sdebug_proc_name,
3893 .name = "SCSI DEBUG",
3894 .info = scsi_debug_info,
3895 .slave_alloc = scsi_debug_slave_alloc,
3896 .slave_configure = scsi_debug_slave_configure,
3897 .slave_destroy = scsi_debug_slave_destroy,
3898 .ioctl = scsi_debug_ioctl,
3899 .queuecommand = scsi_debug_queuecommand,
3900 .eh_abort_handler = scsi_debug_abort,
3901 .eh_bus_reset_handler = scsi_debug_bus_reset,
3902 .eh_device_reset_handler = scsi_debug_device_reset,
3903 .eh_host_reset_handler = scsi_debug_host_reset,
3904 .bios_param = scsi_debug_biosparam,
3905 .can_queue = SCSI_DEBUG_CANQUEUE,
3906 .this_id = 7,
3907 .sg_tablesize = 256,
3908 .cmd_per_lun = 16,
3909 .max_sectors = 0xffff,
3910 .use_clustering = DISABLE_CLUSTERING,
3911 .module = THIS_MODULE,
3914 static int sdebug_driver_probe(struct device * dev)
3916 int error = 0;
3917 struct sdebug_host_info *sdbg_host;
3918 struct Scsi_Host *hpnt;
3919 int host_prot;
3921 sdbg_host = to_sdebug_host(dev);
3923 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3924 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3925 if (NULL == hpnt) {
3926 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3927 error = -ENODEV;
3928 return error;
3931 sdbg_host->shost = hpnt;
3932 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3933 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3934 hpnt->max_id = scsi_debug_num_tgts + 1;
3935 else
3936 hpnt->max_id = scsi_debug_num_tgts;
3937 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3939 host_prot = 0;
3941 switch (scsi_debug_dif) {
3943 case SD_DIF_TYPE1_PROTECTION:
3944 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3945 if (scsi_debug_dix)
3946 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
3947 break;
3949 case SD_DIF_TYPE2_PROTECTION:
3950 host_prot = SHOST_DIF_TYPE2_PROTECTION;
3951 if (scsi_debug_dix)
3952 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
3953 break;
3955 case SD_DIF_TYPE3_PROTECTION:
3956 host_prot = SHOST_DIF_TYPE3_PROTECTION;
3957 if (scsi_debug_dix)
3958 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
3959 break;
3961 default:
3962 if (scsi_debug_dix)
3963 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
3964 break;
3967 scsi_host_set_prot(hpnt, host_prot);
3969 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
3970 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
3971 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
3972 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
3973 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
3974 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
3975 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
3976 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
3978 if (scsi_debug_guard == 1)
3979 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
3980 else
3981 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
3983 error = scsi_add_host(hpnt, &sdbg_host->dev);
3984 if (error) {
3985 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
3986 error = -ENODEV;
3987 scsi_host_put(hpnt);
3988 } else
3989 scsi_scan_host(hpnt);
3992 return error;
3995 static int sdebug_driver_remove(struct device * dev)
3997 struct sdebug_host_info *sdbg_host;
3998 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4000 sdbg_host = to_sdebug_host(dev);
4002 if (!sdbg_host) {
4003 printk(KERN_ERR "%s: Unable to locate host info\n",
4004 __func__);
4005 return -ENODEV;
4008 scsi_remove_host(sdbg_host->shost);
4010 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4011 dev_list) {
4012 list_del(&sdbg_devinfo->dev_list);
4013 kfree(sdbg_devinfo);
4016 scsi_host_put(sdbg_host->shost);
4017 return 0;
4020 static int pseudo_lld_bus_match(struct device *dev,
4021 struct device_driver *dev_driver)
4023 return 1;
4026 static struct bus_type pseudo_lld_bus = {
4027 .name = "pseudo",
4028 .match = pseudo_lld_bus_match,
4029 .probe = sdebug_driver_probe,
4030 .remove = sdebug_driver_remove,