Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
[linux-2.6.git] / drivers / scsi / scsi_debug.c
blobcb4fefa1bfbaea58a03e973de770233f41f6b34c
1 /*
2 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
3 * Copyright (C) 1992 Eric Youngdale
4 * Simulate a host adapter with 2 disks attached. Do a lot of checking
5 * to make sure that we are not getting blocks mixed up, and PANIC if
6 * anything out of the ordinary is seen.
7 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9 * This version is more generic, simulating a variable number of disk
10 * (or disk like devices) sharing a common amount of RAM. To be more
11 * realistic, the simulated devices have the transport attributes of
12 * SAS disks.
15 * For documentation see http://sg.danny.cz/sg/sdebug26.html
17 * D. Gilbert (dpg) work for Magneto-Optical device test [20010421]
18 * dpg: work for devfs large number of disks [20010809]
19 * forked for lk 2.5 series [20011216, 20020101]
20 * use vmalloc() more inquiry+mode_sense [20020302]
21 * add timers for delayed responses [20020721]
22 * Patrick Mansfield <patmans@us.ibm.com> max_luns+scsi_level [20021031]
23 * Mike Anderson <andmike@us.ibm.com> sysfs work [20021118]
24 * dpg: change style of boot options to "scsi_debug.num_tgts=2" and
25 * module options to "modprobe scsi_debug num_tgts=2" [20021221]
28 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/errno.h>
32 #include <linux/timer.h>
33 #include <linux/slab.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/genhd.h>
37 #include <linux/fs.h>
38 #include <linux/init.h>
39 #include <linux/proc_fs.h>
40 #include <linux/vmalloc.h>
41 #include <linux/moduleparam.h>
42 #include <linux/scatterlist.h>
43 #include <linux/blkdev.h>
44 #include <linux/crc-t10dif.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_dbg.h>
58 #include "sd.h"
59 #include "scsi_logging.h"
61 #define SCSI_DEBUG_VERSION "1.82"
62 static const char * scsi_debug_version_date = "20100324";
64 /* Additional Sense Code (ASC) */
65 #define NO_ADDITIONAL_SENSE 0x0
66 #define LOGICAL_UNIT_NOT_READY 0x4
67 #define UNRECOVERED_READ_ERR 0x11
68 #define PARAMETER_LIST_LENGTH_ERR 0x1a
69 #define INVALID_OPCODE 0x20
70 #define ADDR_OUT_OF_RANGE 0x21
71 #define INVALID_COMMAND_OPCODE 0x20
72 #define INVALID_FIELD_IN_CDB 0x24
73 #define INVALID_FIELD_IN_PARAM_LIST 0x26
74 #define POWERON_RESET 0x29
75 #define SAVING_PARAMS_UNSUP 0x39
76 #define TRANSPORT_PROBLEM 0x4b
77 #define THRESHOLD_EXCEEDED 0x5d
78 #define LOW_POWER_COND_ON 0x5e
80 /* Additional Sense Code Qualifier (ASCQ) */
81 #define ACK_NAK_TO 0x3
83 #define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
85 /* Default values for driver parameters */
86 #define DEF_NUM_HOST 1
87 #define DEF_NUM_TGTS 1
88 #define DEF_MAX_LUNS 1
89 /* With these defaults, this driver will make 1 host with 1 target
90 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
92 #define DEF_ATO 1
93 #define DEF_DELAY 1
94 #define DEF_DEV_SIZE_MB 8
95 #define DEF_DIF 0
96 #define DEF_DIX 0
97 #define DEF_D_SENSE 0
98 #define DEF_EVERY_NTH 0
99 #define DEF_FAKE_RW 0
100 #define DEF_GUARD 0
101 #define DEF_LBPU 0
102 #define DEF_LBPWS 0
103 #define DEF_LBPWS10 0
104 #define DEF_LBPRZ 1
105 #define DEF_LOWEST_ALIGNED 0
106 #define DEF_NO_LUN_0 0
107 #define DEF_NUM_PARTS 0
108 #define DEF_OPTS 0
109 #define DEF_OPT_BLKS 64
110 #define DEF_PHYSBLK_EXP 0
111 #define DEF_PTYPE 0
112 #define DEF_REMOVABLE false
113 #define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
114 #define DEF_SECTOR_SIZE 512
115 #define DEF_UNMAP_ALIGNMENT 0
116 #define DEF_UNMAP_GRANULARITY 1
117 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
118 #define DEF_UNMAP_MAX_DESC 256
119 #define DEF_VIRTUAL_GB 0
120 #define DEF_VPD_USE_HOSTNO 1
121 #define DEF_WRITESAME_LENGTH 0xFFFF
123 /* bit mask values for scsi_debug_opts */
124 #define SCSI_DEBUG_OPT_NOISE 1
125 #define SCSI_DEBUG_OPT_MEDIUM_ERR 2
126 #define SCSI_DEBUG_OPT_TIMEOUT 4
127 #define SCSI_DEBUG_OPT_RECOVERED_ERR 8
128 #define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
129 #define SCSI_DEBUG_OPT_DIF_ERR 32
130 #define SCSI_DEBUG_OPT_DIX_ERR 64
131 #define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
132 /* When "every_nth" > 0 then modulo "every_nth" commands:
133 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
134 * - a RECOVERED_ERROR is simulated on successful read and write
135 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
136 * - a TRANSPORT_ERROR is simulated on successful read and write
137 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
139 * When "every_nth" < 0 then after "- every_nth" commands:
140 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
141 * - a RECOVERED_ERROR is simulated on successful read and write
142 * commands if SCSI_DEBUG_OPT_RECOVERED_ERR is set.
143 * - a TRANSPORT_ERROR is simulated on successful read and write
144 * commands if SCSI_DEBUG_OPT_TRANSPORT_ERR is set.
145 * This will continue until some other action occurs (e.g. the user
146 * writing a new value (other than -1 or 1) to every_nth via sysfs).
149 /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
150 * sector on read commands: */
151 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
152 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
154 /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
155 * or "peripheral device" addressing (value 0) */
156 #define SAM2_LUN_ADDRESS_METHOD 0
157 #define SAM2_WLUN_REPORT_LUNS 0xc101
159 /* Can queue up to this number of commands. Typically commands that
160 * that have a non-zero delay are queued. */
161 #define SCSI_DEBUG_CANQUEUE 255
163 static int scsi_debug_add_host = DEF_NUM_HOST;
164 static int scsi_debug_ato = DEF_ATO;
165 static int scsi_debug_delay = DEF_DELAY;
166 static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
167 static int scsi_debug_dif = DEF_DIF;
168 static int scsi_debug_dix = DEF_DIX;
169 static int scsi_debug_dsense = DEF_D_SENSE;
170 static int scsi_debug_every_nth = DEF_EVERY_NTH;
171 static int scsi_debug_fake_rw = DEF_FAKE_RW;
172 static int scsi_debug_guard = DEF_GUARD;
173 static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
174 static int scsi_debug_max_luns = DEF_MAX_LUNS;
175 static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
176 static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
177 static int scsi_debug_no_uld = 0;
178 static int scsi_debug_num_parts = DEF_NUM_PARTS;
179 static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
180 static int scsi_debug_opt_blks = DEF_OPT_BLKS;
181 static int scsi_debug_opts = DEF_OPTS;
182 static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
183 static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
184 static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
185 static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
186 static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
187 static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
188 static unsigned int scsi_debug_lbpu = DEF_LBPU;
189 static unsigned int scsi_debug_lbpws = DEF_LBPWS;
190 static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
191 static unsigned int scsi_debug_lbprz = DEF_LBPRZ;
192 static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
193 static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
194 static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
195 static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
196 static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
197 static bool scsi_debug_removable = DEF_REMOVABLE;
199 static int scsi_debug_cmnd_count = 0;
201 #define DEV_READONLY(TGT) (0)
203 static unsigned int sdebug_store_sectors;
204 static sector_t sdebug_capacity; /* in sectors */
206 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
207 may still need them */
208 static int sdebug_heads; /* heads per disk */
209 static int sdebug_cylinders_per; /* cylinders per surface */
210 static int sdebug_sectors_per; /* sectors per cylinder */
212 #define SDEBUG_MAX_PARTS 4
214 #define SDEBUG_SENSE_LEN 32
216 #define SCSI_DEBUG_MAX_CMD_LEN 32
218 static unsigned int scsi_debug_lbp(void)
220 return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
223 struct sdebug_dev_info {
224 struct list_head dev_list;
225 unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
226 unsigned int channel;
227 unsigned int target;
228 unsigned int lun;
229 struct sdebug_host_info *sdbg_host;
230 unsigned int wlun;
231 char reset;
232 char stopped;
233 char used;
236 struct sdebug_host_info {
237 struct list_head host_list;
238 struct Scsi_Host *shost;
239 struct device dev;
240 struct list_head dev_info_list;
243 #define to_sdebug_host(d) \
244 container_of(d, struct sdebug_host_info, dev)
246 static LIST_HEAD(sdebug_host_list);
247 static DEFINE_SPINLOCK(sdebug_host_list_lock);
249 typedef void (* done_funct_t) (struct scsi_cmnd *);
251 struct sdebug_queued_cmd {
252 int in_use;
253 struct timer_list cmnd_timer;
254 done_funct_t done_funct;
255 struct scsi_cmnd * a_cmnd;
256 int scsi_result;
258 static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
260 static unsigned char * fake_storep; /* ramdisk storage */
261 static struct sd_dif_tuple *dif_storep; /* protection info */
262 static void *map_storep; /* provisioning map */
264 static unsigned long map_size;
265 static int num_aborts = 0;
266 static int num_dev_resets = 0;
267 static int num_bus_resets = 0;
268 static int num_host_resets = 0;
269 static int dix_writes;
270 static int dix_reads;
271 static int dif_errors;
273 static DEFINE_SPINLOCK(queued_arr_lock);
274 static DEFINE_RWLOCK(atomic_rw);
276 static char sdebug_proc_name[] = "scsi_debug";
278 static struct bus_type pseudo_lld_bus;
280 static struct device_driver sdebug_driverfs_driver = {
281 .name = sdebug_proc_name,
282 .bus = &pseudo_lld_bus,
285 static const int check_condition_result =
286 (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
288 static const int illegal_condition_result =
289 (DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
291 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
292 0, 0, 0x2, 0x4b};
293 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
294 0, 0, 0x0, 0x0};
296 static int sdebug_add_adapter(void);
297 static void sdebug_remove_adapter(void);
299 static void sdebug_max_tgts_luns(void)
301 struct sdebug_host_info *sdbg_host;
302 struct Scsi_Host *hpnt;
304 spin_lock(&sdebug_host_list_lock);
305 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
306 hpnt = sdbg_host->shost;
307 if ((hpnt->this_id >= 0) &&
308 (scsi_debug_num_tgts > hpnt->this_id))
309 hpnt->max_id = scsi_debug_num_tgts + 1;
310 else
311 hpnt->max_id = scsi_debug_num_tgts;
312 /* scsi_debug_max_luns; */
313 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS;
315 spin_unlock(&sdebug_host_list_lock);
318 static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
319 int asc, int asq)
321 unsigned char *sbuff;
323 sbuff = devip->sense_buff;
324 memset(sbuff, 0, SDEBUG_SENSE_LEN);
326 scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
328 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
329 printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
330 "[0x%x,0x%x,0x%x]\n", key, asc, asq);
333 static void get_data_transfer_info(unsigned char *cmd,
334 unsigned long long *lba, unsigned int *num,
335 u32 *ei_lba)
337 *ei_lba = 0;
339 switch (*cmd) {
340 case VARIABLE_LENGTH_CMD:
341 *lba = (u64)cmd[19] | (u64)cmd[18] << 8 |
342 (u64)cmd[17] << 16 | (u64)cmd[16] << 24 |
343 (u64)cmd[15] << 32 | (u64)cmd[14] << 40 |
344 (u64)cmd[13] << 48 | (u64)cmd[12] << 56;
346 *ei_lba = (u32)cmd[23] | (u32)cmd[22] << 8 |
347 (u32)cmd[21] << 16 | (u32)cmd[20] << 24;
349 *num = (u32)cmd[31] | (u32)cmd[30] << 8 | (u32)cmd[29] << 16 |
350 (u32)cmd[28] << 24;
351 break;
353 case WRITE_SAME_16:
354 case WRITE_16:
355 case READ_16:
356 *lba = (u64)cmd[9] | (u64)cmd[8] << 8 |
357 (u64)cmd[7] << 16 | (u64)cmd[6] << 24 |
358 (u64)cmd[5] << 32 | (u64)cmd[4] << 40 |
359 (u64)cmd[3] << 48 | (u64)cmd[2] << 56;
361 *num = (u32)cmd[13] | (u32)cmd[12] << 8 | (u32)cmd[11] << 16 |
362 (u32)cmd[10] << 24;
363 break;
364 case WRITE_12:
365 case READ_12:
366 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
367 (u32)cmd[2] << 24;
369 *num = (u32)cmd[9] | (u32)cmd[8] << 8 | (u32)cmd[7] << 16 |
370 (u32)cmd[6] << 24;
371 break;
372 case WRITE_SAME:
373 case WRITE_10:
374 case READ_10:
375 case XDWRITEREAD_10:
376 *lba = (u32)cmd[5] | (u32)cmd[4] << 8 | (u32)cmd[3] << 16 |
377 (u32)cmd[2] << 24;
379 *num = (u32)cmd[8] | (u32)cmd[7] << 8;
380 break;
381 case WRITE_6:
382 case READ_6:
383 *lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
384 (u32)(cmd[1] & 0x1f) << 16;
385 *num = (0 == cmd[4]) ? 256 : cmd[4];
386 break;
387 default:
388 break;
392 static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
394 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
395 printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
397 return -EINVAL;
398 /* return -ENOTTY; // correct return but upsets fdisk */
401 static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
402 struct sdebug_dev_info * devip)
404 if (devip->reset) {
405 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
406 printk(KERN_INFO "scsi_debug: Reporting Unit "
407 "attention: power on reset\n");
408 devip->reset = 0;
409 mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
410 return check_condition_result;
412 if ((0 == reset_only) && devip->stopped) {
413 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
414 printk(KERN_INFO "scsi_debug: Reporting Not "
415 "ready: initializing command required\n");
416 mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
417 0x2);
418 return check_condition_result;
420 return 0;
423 /* Returns 0 if ok else (DID_ERROR << 16). Sets scp->resid . */
424 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
425 int arr_len)
427 int act_len;
428 struct scsi_data_buffer *sdb = scsi_in(scp);
430 if (!sdb->length)
431 return 0;
432 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_FROM_DEVICE))
433 return (DID_ERROR << 16);
435 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
436 arr, arr_len);
437 sdb->resid = scsi_bufflen(scp) - act_len;
439 return 0;
442 /* Returns number of bytes fetched into 'arr' or -1 if error. */
443 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
444 int arr_len)
446 if (!scsi_bufflen(scp))
447 return 0;
448 if (!(scsi_bidi_cmnd(scp) || scp->sc_data_direction == DMA_TO_DEVICE))
449 return -1;
451 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
455 static const char * inq_vendor_id = "Linux ";
456 static const char * inq_product_id = "scsi_debug ";
457 static const char * inq_product_rev = "0004";
459 static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
460 int target_dev_id, int dev_id_num,
461 const char * dev_id_str,
462 int dev_id_str_len)
464 int num, port_a;
465 char b[32];
467 port_a = target_dev_id + 1;
468 /* T10 vendor identifier field format (faked) */
469 arr[0] = 0x2; /* ASCII */
470 arr[1] = 0x1;
471 arr[2] = 0x0;
472 memcpy(&arr[4], inq_vendor_id, 8);
473 memcpy(&arr[12], inq_product_id, 16);
474 memcpy(&arr[28], dev_id_str, dev_id_str_len);
475 num = 8 + 16 + dev_id_str_len;
476 arr[3] = num;
477 num += 4;
478 if (dev_id_num >= 0) {
479 /* NAA-5, Logical unit identifier (binary) */
480 arr[num++] = 0x1; /* binary (not necessarily sas) */
481 arr[num++] = 0x3; /* PIV=0, lu, naa */
482 arr[num++] = 0x0;
483 arr[num++] = 0x8;
484 arr[num++] = 0x53; /* naa-5 ieee company id=0x333333 (fake) */
485 arr[num++] = 0x33;
486 arr[num++] = 0x33;
487 arr[num++] = 0x30;
488 arr[num++] = (dev_id_num >> 24);
489 arr[num++] = (dev_id_num >> 16) & 0xff;
490 arr[num++] = (dev_id_num >> 8) & 0xff;
491 arr[num++] = dev_id_num & 0xff;
492 /* Target relative port number */
493 arr[num++] = 0x61; /* proto=sas, binary */
494 arr[num++] = 0x94; /* PIV=1, target port, rel port */
495 arr[num++] = 0x0; /* reserved */
496 arr[num++] = 0x4; /* length */
497 arr[num++] = 0x0; /* reserved */
498 arr[num++] = 0x0; /* reserved */
499 arr[num++] = 0x0;
500 arr[num++] = 0x1; /* relative port A */
502 /* NAA-5, Target port identifier */
503 arr[num++] = 0x61; /* proto=sas, binary */
504 arr[num++] = 0x93; /* piv=1, target port, naa */
505 arr[num++] = 0x0;
506 arr[num++] = 0x8;
507 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
508 arr[num++] = 0x22;
509 arr[num++] = 0x22;
510 arr[num++] = 0x20;
511 arr[num++] = (port_a >> 24);
512 arr[num++] = (port_a >> 16) & 0xff;
513 arr[num++] = (port_a >> 8) & 0xff;
514 arr[num++] = port_a & 0xff;
515 /* NAA-5, Target port group identifier */
516 arr[num++] = 0x61; /* proto=sas, binary */
517 arr[num++] = 0x95; /* piv=1, target port group id */
518 arr[num++] = 0x0;
519 arr[num++] = 0x4;
520 arr[num++] = 0;
521 arr[num++] = 0;
522 arr[num++] = (port_group_id >> 8) & 0xff;
523 arr[num++] = port_group_id & 0xff;
524 /* NAA-5, Target device identifier */
525 arr[num++] = 0x61; /* proto=sas, binary */
526 arr[num++] = 0xa3; /* piv=1, target device, naa */
527 arr[num++] = 0x0;
528 arr[num++] = 0x8;
529 arr[num++] = 0x52; /* naa-5, company id=0x222222 (fake) */
530 arr[num++] = 0x22;
531 arr[num++] = 0x22;
532 arr[num++] = 0x20;
533 arr[num++] = (target_dev_id >> 24);
534 arr[num++] = (target_dev_id >> 16) & 0xff;
535 arr[num++] = (target_dev_id >> 8) & 0xff;
536 arr[num++] = target_dev_id & 0xff;
537 /* SCSI name string: Target device identifier */
538 arr[num++] = 0x63; /* proto=sas, UTF-8 */
539 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
540 arr[num++] = 0x0;
541 arr[num++] = 24;
542 memcpy(arr + num, "naa.52222220", 12);
543 num += 12;
544 snprintf(b, sizeof(b), "%08X", target_dev_id);
545 memcpy(arr + num, b, 8);
546 num += 8;
547 memset(arr + num, 0, 4);
548 num += 4;
549 return num;
553 static unsigned char vpd84_data[] = {
554 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
555 0x22,0x22,0x22,0x0,0xbb,0x1,
556 0x22,0x22,0x22,0x0,0xbb,0x2,
559 static int inquiry_evpd_84(unsigned char * arr)
561 memcpy(arr, vpd84_data, sizeof(vpd84_data));
562 return sizeof(vpd84_data);
565 static int inquiry_evpd_85(unsigned char * arr)
567 int num = 0;
568 const char * na1 = "https://www.kernel.org/config";
569 const char * na2 = "http://www.kernel.org/log";
570 int plen, olen;
572 arr[num++] = 0x1; /* lu, storage config */
573 arr[num++] = 0x0; /* reserved */
574 arr[num++] = 0x0;
575 olen = strlen(na1);
576 plen = olen + 1;
577 if (plen % 4)
578 plen = ((plen / 4) + 1) * 4;
579 arr[num++] = plen; /* length, null termianted, padded */
580 memcpy(arr + num, na1, olen);
581 memset(arr + num + olen, 0, plen - olen);
582 num += plen;
584 arr[num++] = 0x4; /* lu, logging */
585 arr[num++] = 0x0; /* reserved */
586 arr[num++] = 0x0;
587 olen = strlen(na2);
588 plen = olen + 1;
589 if (plen % 4)
590 plen = ((plen / 4) + 1) * 4;
591 arr[num++] = plen; /* length, null terminated, padded */
592 memcpy(arr + num, na2, olen);
593 memset(arr + num + olen, 0, plen - olen);
594 num += plen;
596 return num;
599 /* SCSI ports VPD page */
600 static int inquiry_evpd_88(unsigned char * arr, int target_dev_id)
602 int num = 0;
603 int port_a, port_b;
605 port_a = target_dev_id + 1;
606 port_b = port_a + 1;
607 arr[num++] = 0x0; /* reserved */
608 arr[num++] = 0x0; /* reserved */
609 arr[num++] = 0x0;
610 arr[num++] = 0x1; /* relative port 1 (primary) */
611 memset(arr + num, 0, 6);
612 num += 6;
613 arr[num++] = 0x0;
614 arr[num++] = 12; /* length tp descriptor */
615 /* naa-5 target port identifier (A) */
616 arr[num++] = 0x61; /* proto=sas, binary */
617 arr[num++] = 0x93; /* PIV=1, target port, NAA */
618 arr[num++] = 0x0; /* reserved */
619 arr[num++] = 0x8; /* length */
620 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
621 arr[num++] = 0x22;
622 arr[num++] = 0x22;
623 arr[num++] = 0x20;
624 arr[num++] = (port_a >> 24);
625 arr[num++] = (port_a >> 16) & 0xff;
626 arr[num++] = (port_a >> 8) & 0xff;
627 arr[num++] = port_a & 0xff;
629 arr[num++] = 0x0; /* reserved */
630 arr[num++] = 0x0; /* reserved */
631 arr[num++] = 0x0;
632 arr[num++] = 0x2; /* relative port 2 (secondary) */
633 memset(arr + num, 0, 6);
634 num += 6;
635 arr[num++] = 0x0;
636 arr[num++] = 12; /* length tp descriptor */
637 /* naa-5 target port identifier (B) */
638 arr[num++] = 0x61; /* proto=sas, binary */
639 arr[num++] = 0x93; /* PIV=1, target port, NAA */
640 arr[num++] = 0x0; /* reserved */
641 arr[num++] = 0x8; /* length */
642 arr[num++] = 0x52; /* NAA-5, company_id=0x222222 (fake) */
643 arr[num++] = 0x22;
644 arr[num++] = 0x22;
645 arr[num++] = 0x20;
646 arr[num++] = (port_b >> 24);
647 arr[num++] = (port_b >> 16) & 0xff;
648 arr[num++] = (port_b >> 8) & 0xff;
649 arr[num++] = port_b & 0xff;
651 return num;
655 static unsigned char vpd89_data[] = {
656 /* from 4th byte */ 0,0,0,0,
657 'l','i','n','u','x',' ',' ',' ',
658 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
659 '1','2','3','4',
660 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
661 0xec,0,0,0,
662 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
663 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
664 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
665 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
666 0x53,0x41,
667 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
668 0x20,0x20,
669 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
670 0x10,0x80,
671 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
672 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
673 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
674 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
675 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
676 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
677 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
678 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
679 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
680 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
681 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
682 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
683 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
684 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
685 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
686 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
687 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
688 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
689 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
690 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
691 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
692 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
693 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
694 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
695 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
696 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
699 static int inquiry_evpd_89(unsigned char * arr)
701 memcpy(arr, vpd89_data, sizeof(vpd89_data));
702 return sizeof(vpd89_data);
706 /* Block limits VPD page (SBC-3) */
707 static unsigned char vpdb0_data[] = {
708 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
709 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
710 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
711 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
714 static int inquiry_evpd_b0(unsigned char * arr)
716 unsigned int gran;
718 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
720 /* Optimal transfer length granularity */
721 gran = 1 << scsi_debug_physblk_exp;
722 arr[2] = (gran >> 8) & 0xff;
723 arr[3] = gran & 0xff;
725 /* Maximum Transfer Length */
726 if (sdebug_store_sectors > 0x400) {
727 arr[4] = (sdebug_store_sectors >> 24) & 0xff;
728 arr[5] = (sdebug_store_sectors >> 16) & 0xff;
729 arr[6] = (sdebug_store_sectors >> 8) & 0xff;
730 arr[7] = sdebug_store_sectors & 0xff;
733 /* Optimal Transfer Length */
734 put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
736 if (scsi_debug_lbpu) {
737 /* Maximum Unmap LBA Count */
738 put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
740 /* Maximum Unmap Block Descriptor Count */
741 put_unaligned_be32(scsi_debug_unmap_max_desc, &arr[20]);
744 /* Unmap Granularity Alignment */
745 if (scsi_debug_unmap_alignment) {
746 put_unaligned_be32(scsi_debug_unmap_alignment, &arr[28]);
747 arr[28] |= 0x80; /* UGAVALID */
750 /* Optimal Unmap Granularity */
751 put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
753 /* Maximum WRITE SAME Length */
754 put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
756 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
758 return sizeof(vpdb0_data);
761 /* Block device characteristics VPD page (SBC-3) */
762 static int inquiry_evpd_b1(unsigned char *arr)
764 memset(arr, 0, 0x3c);
765 arr[0] = 0;
766 arr[1] = 1; /* non rotating medium (e.g. solid state) */
767 arr[2] = 0;
768 arr[3] = 5; /* less than 1.8" */
770 return 0x3c;
773 /* Logical block provisioning VPD page (SBC-3) */
774 static int inquiry_evpd_b2(unsigned char *arr)
776 memset(arr, 0, 0x4);
777 arr[0] = 0; /* threshold exponent */
779 if (scsi_debug_lbpu)
780 arr[1] = 1 << 7;
782 if (scsi_debug_lbpws)
783 arr[1] |= 1 << 6;
785 if (scsi_debug_lbpws10)
786 arr[1] |= 1 << 5;
788 if (scsi_debug_lbprz)
789 arr[1] |= 1 << 2;
791 return 0x4;
794 #define SDEBUG_LONG_INQ_SZ 96
795 #define SDEBUG_MAX_INQ_ARR_SZ 584
797 static int resp_inquiry(struct scsi_cmnd * scp, int target,
798 struct sdebug_dev_info * devip)
800 unsigned char pq_pdt;
801 unsigned char * arr;
802 unsigned char *cmd = (unsigned char *)scp->cmnd;
803 int alloc_len, n, ret;
805 alloc_len = (cmd[3] << 8) + cmd[4];
806 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
807 if (! arr)
808 return DID_REQUEUE << 16;
809 if (devip->wlun)
810 pq_pdt = 0x1e; /* present, wlun */
811 else if (scsi_debug_no_lun_0 && (0 == devip->lun))
812 pq_pdt = 0x7f; /* not present, no device type */
813 else
814 pq_pdt = (scsi_debug_ptype & 0x1f);
815 arr[0] = pq_pdt;
816 if (0x2 & cmd[1]) { /* CMDDT bit set */
817 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
819 kfree(arr);
820 return check_condition_result;
821 } else if (0x1 & cmd[1]) { /* EVPD bit set */
822 int lu_id_num, port_group_id, target_dev_id, len;
823 char lu_id_str[6];
824 int host_no = devip->sdbg_host->shost->host_no;
826 port_group_id = (((host_no + 1) & 0x7f) << 8) +
827 (devip->channel & 0x7f);
828 if (0 == scsi_debug_vpd_use_hostno)
829 host_no = 0;
830 lu_id_num = devip->wlun ? -1 : (((host_no + 1) * 2000) +
831 (devip->target * 1000) + devip->lun);
832 target_dev_id = ((host_no + 1) * 2000) +
833 (devip->target * 1000) - 3;
834 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
835 if (0 == cmd[2]) { /* supported vital product data pages */
836 arr[1] = cmd[2]; /*sanity */
837 n = 4;
838 arr[n++] = 0x0; /* this page */
839 arr[n++] = 0x80; /* unit serial number */
840 arr[n++] = 0x83; /* device identification */
841 arr[n++] = 0x84; /* software interface ident. */
842 arr[n++] = 0x85; /* management network addresses */
843 arr[n++] = 0x86; /* extended inquiry */
844 arr[n++] = 0x87; /* mode page policy */
845 arr[n++] = 0x88; /* SCSI ports */
846 arr[n++] = 0x89; /* ATA information */
847 arr[n++] = 0xb0; /* Block limits (SBC) */
848 arr[n++] = 0xb1; /* Block characteristics (SBC) */
849 if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
850 arr[n++] = 0xb2;
851 arr[3] = n - 4; /* number of supported VPD pages */
852 } else if (0x80 == cmd[2]) { /* unit serial number */
853 arr[1] = cmd[2]; /*sanity */
854 arr[3] = len;
855 memcpy(&arr[4], lu_id_str, len);
856 } else if (0x83 == cmd[2]) { /* device identification */
857 arr[1] = cmd[2]; /*sanity */
858 arr[3] = inquiry_evpd_83(&arr[4], port_group_id,
859 target_dev_id, lu_id_num,
860 lu_id_str, len);
861 } else if (0x84 == cmd[2]) { /* Software interface ident. */
862 arr[1] = cmd[2]; /*sanity */
863 arr[3] = inquiry_evpd_84(&arr[4]);
864 } else if (0x85 == cmd[2]) { /* Management network addresses */
865 arr[1] = cmd[2]; /*sanity */
866 arr[3] = inquiry_evpd_85(&arr[4]);
867 } else if (0x86 == cmd[2]) { /* extended inquiry */
868 arr[1] = cmd[2]; /*sanity */
869 arr[3] = 0x3c; /* number of following entries */
870 if (scsi_debug_dif == SD_DIF_TYPE3_PROTECTION)
871 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
872 else if (scsi_debug_dif)
873 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
874 else
875 arr[4] = 0x0; /* no protection stuff */
876 arr[5] = 0x7; /* head of q, ordered + simple q's */
877 } else if (0x87 == cmd[2]) { /* mode page policy */
878 arr[1] = cmd[2]; /*sanity */
879 arr[3] = 0x8; /* number of following entries */
880 arr[4] = 0x2; /* disconnect-reconnect mp */
881 arr[6] = 0x80; /* mlus, shared */
882 arr[8] = 0x18; /* protocol specific lu */
883 arr[10] = 0x82; /* mlus, per initiator port */
884 } else if (0x88 == cmd[2]) { /* SCSI Ports */
885 arr[1] = cmd[2]; /*sanity */
886 arr[3] = inquiry_evpd_88(&arr[4], target_dev_id);
887 } else if (0x89 == cmd[2]) { /* ATA information */
888 arr[1] = cmd[2]; /*sanity */
889 n = inquiry_evpd_89(&arr[4]);
890 arr[2] = (n >> 8);
891 arr[3] = (n & 0xff);
892 } else if (0xb0 == cmd[2]) { /* Block limits (SBC) */
893 arr[1] = cmd[2]; /*sanity */
894 arr[3] = inquiry_evpd_b0(&arr[4]);
895 } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
896 arr[1] = cmd[2]; /*sanity */
897 arr[3] = inquiry_evpd_b1(&arr[4]);
898 } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
899 arr[1] = cmd[2]; /*sanity */
900 arr[3] = inquiry_evpd_b2(&arr[4]);
901 } else {
902 /* Illegal request, invalid field in cdb */
903 mk_sense_buffer(devip, ILLEGAL_REQUEST,
904 INVALID_FIELD_IN_CDB, 0);
905 kfree(arr);
906 return check_condition_result;
908 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
909 ret = fill_from_dev_buffer(scp, arr,
910 min(len, SDEBUG_MAX_INQ_ARR_SZ));
911 kfree(arr);
912 return ret;
914 /* drops through here for a standard inquiry */
915 arr[1] = scsi_debug_removable ? 0x80 : 0; /* Removable disk */
916 arr[2] = scsi_debug_scsi_level;
917 arr[3] = 2; /* response_data_format==2 */
918 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
919 arr[5] = scsi_debug_dif ? 1 : 0; /* PROTECT bit */
920 if (0 == scsi_debug_vpd_use_hostno)
921 arr[5] = 0x10; /* claim: implicit TGPS */
922 arr[6] = 0x10; /* claim: MultiP */
923 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
924 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
925 memcpy(&arr[8], inq_vendor_id, 8);
926 memcpy(&arr[16], inq_product_id, 16);
927 memcpy(&arr[32], inq_product_rev, 4);
928 /* version descriptors (2 bytes each) follow */
929 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
930 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */
931 n = 62;
932 if (scsi_debug_ptype == 0) {
933 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
934 } else if (scsi_debug_ptype == 1) {
935 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
937 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */
938 ret = fill_from_dev_buffer(scp, arr,
939 min(alloc_len, SDEBUG_LONG_INQ_SZ));
940 kfree(arr);
941 return ret;
944 static int resp_requests(struct scsi_cmnd * scp,
945 struct sdebug_dev_info * devip)
947 unsigned char * sbuff;
948 unsigned char *cmd = (unsigned char *)scp->cmnd;
949 unsigned char arr[SDEBUG_SENSE_LEN];
950 int want_dsense;
951 int len = 18;
953 memset(arr, 0, sizeof(arr));
954 if (devip->reset == 1)
955 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
956 want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
957 sbuff = devip->sense_buff;
958 if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
959 if (want_dsense) {
960 arr[0] = 0x72;
961 arr[1] = 0x0; /* NO_SENSE in sense_key */
962 arr[2] = THRESHOLD_EXCEEDED;
963 arr[3] = 0xff; /* TEST set and MRIE==6 */
964 } else {
965 arr[0] = 0x70;
966 arr[2] = 0x0; /* NO_SENSE in sense_key */
967 arr[7] = 0xa; /* 18 byte sense buffer */
968 arr[12] = THRESHOLD_EXCEEDED;
969 arr[13] = 0xff; /* TEST set and MRIE==6 */
971 } else {
972 memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
973 if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
974 /* DESC bit set and sense_buff in fixed format */
975 memset(arr, 0, sizeof(arr));
976 arr[0] = 0x72;
977 arr[1] = sbuff[2]; /* sense key */
978 arr[2] = sbuff[12]; /* asc */
979 arr[3] = sbuff[13]; /* ascq */
980 len = 8;
983 mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
984 return fill_from_dev_buffer(scp, arr, len);
987 static int resp_start_stop(struct scsi_cmnd * scp,
988 struct sdebug_dev_info * devip)
990 unsigned char *cmd = (unsigned char *)scp->cmnd;
991 int power_cond, errsts, start;
993 if ((errsts = check_readiness(scp, 1, devip)))
994 return errsts;
995 power_cond = (cmd[4] & 0xf0) >> 4;
996 if (power_cond) {
997 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
999 return check_condition_result;
1001 start = cmd[4] & 1;
1002 if (start == devip->stopped)
1003 devip->stopped = !start;
1004 return 0;
1007 static sector_t get_sdebug_capacity(void)
1009 if (scsi_debug_virtual_gb > 0)
1010 return (sector_t)scsi_debug_virtual_gb *
1011 (1073741824 / scsi_debug_sector_size);
1012 else
1013 return sdebug_store_sectors;
1016 #define SDEBUG_READCAP_ARR_SZ 8
1017 static int resp_readcap(struct scsi_cmnd * scp,
1018 struct sdebug_dev_info * devip)
1020 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1021 unsigned int capac;
1022 int errsts;
1024 if ((errsts = check_readiness(scp, 1, devip)))
1025 return errsts;
1026 /* following just in case virtual_gb changed */
1027 sdebug_capacity = get_sdebug_capacity();
1028 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1029 if (sdebug_capacity < 0xffffffff) {
1030 capac = (unsigned int)sdebug_capacity - 1;
1031 arr[0] = (capac >> 24);
1032 arr[1] = (capac >> 16) & 0xff;
1033 arr[2] = (capac >> 8) & 0xff;
1034 arr[3] = capac & 0xff;
1035 } else {
1036 arr[0] = 0xff;
1037 arr[1] = 0xff;
1038 arr[2] = 0xff;
1039 arr[3] = 0xff;
1041 arr[6] = (scsi_debug_sector_size >> 8) & 0xff;
1042 arr[7] = scsi_debug_sector_size & 0xff;
1043 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1046 #define SDEBUG_READCAP16_ARR_SZ 32
1047 static int resp_readcap16(struct scsi_cmnd * scp,
1048 struct sdebug_dev_info * devip)
1050 unsigned char *cmd = (unsigned char *)scp->cmnd;
1051 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1052 unsigned long long capac;
1053 int errsts, k, alloc_len;
1055 if ((errsts = check_readiness(scp, 1, devip)))
1056 return errsts;
1057 alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
1058 + cmd[13]);
1059 /* following just in case virtual_gb changed */
1060 sdebug_capacity = get_sdebug_capacity();
1061 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1062 capac = sdebug_capacity - 1;
1063 for (k = 0; k < 8; ++k, capac >>= 8)
1064 arr[7 - k] = capac & 0xff;
1065 arr[8] = (scsi_debug_sector_size >> 24) & 0xff;
1066 arr[9] = (scsi_debug_sector_size >> 16) & 0xff;
1067 arr[10] = (scsi_debug_sector_size >> 8) & 0xff;
1068 arr[11] = scsi_debug_sector_size & 0xff;
1069 arr[13] = scsi_debug_physblk_exp & 0xf;
1070 arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
1072 if (scsi_debug_lbp()) {
1073 arr[14] |= 0x80; /* LBPME */
1074 if (scsi_debug_lbprz)
1075 arr[14] |= 0x40; /* LBPRZ */
1078 arr[15] = scsi_debug_lowest_aligned & 0xff;
1080 if (scsi_debug_dif) {
1081 arr[12] = (scsi_debug_dif - 1) << 1; /* P_TYPE */
1082 arr[12] |= 1; /* PROT_EN */
1085 return fill_from_dev_buffer(scp, arr,
1086 min(alloc_len, SDEBUG_READCAP16_ARR_SZ));
1089 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1091 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1092 struct sdebug_dev_info * devip)
1094 unsigned char *cmd = (unsigned char *)scp->cmnd;
1095 unsigned char * arr;
1096 int host_no = devip->sdbg_host->shost->host_no;
1097 int n, ret, alen, rlen;
1098 int port_group_a, port_group_b, port_a, port_b;
1100 alen = ((cmd[6] << 24) + (cmd[7] << 16) + (cmd[8] << 8)
1101 + cmd[9]);
1103 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1104 if (! arr)
1105 return DID_REQUEUE << 16;
1107 * EVPD page 0x88 states we have two ports, one
1108 * real and a fake port with no device connected.
1109 * So we create two port groups with one port each
1110 * and set the group with port B to unavailable.
1112 port_a = 0x1; /* relative port A */
1113 port_b = 0x2; /* relative port B */
1114 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1115 (devip->channel & 0x7f);
1116 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1117 (devip->channel & 0x7f) + 0x80;
1120 * The asymmetric access state is cycled according to the host_id.
1122 n = 4;
1123 if (0 == scsi_debug_vpd_use_hostno) {
1124 arr[n++] = host_no % 3; /* Asymm access state */
1125 arr[n++] = 0x0F; /* claim: all states are supported */
1126 } else {
1127 arr[n++] = 0x0; /* Active/Optimized path */
1128 arr[n++] = 0x01; /* claim: only support active/optimized paths */
1130 arr[n++] = (port_group_a >> 8) & 0xff;
1131 arr[n++] = port_group_a & 0xff;
1132 arr[n++] = 0; /* Reserved */
1133 arr[n++] = 0; /* Status code */
1134 arr[n++] = 0; /* Vendor unique */
1135 arr[n++] = 0x1; /* One port per group */
1136 arr[n++] = 0; /* Reserved */
1137 arr[n++] = 0; /* Reserved */
1138 arr[n++] = (port_a >> 8) & 0xff;
1139 arr[n++] = port_a & 0xff;
1140 arr[n++] = 3; /* Port unavailable */
1141 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1142 arr[n++] = (port_group_b >> 8) & 0xff;
1143 arr[n++] = port_group_b & 0xff;
1144 arr[n++] = 0; /* Reserved */
1145 arr[n++] = 0; /* Status code */
1146 arr[n++] = 0; /* Vendor unique */
1147 arr[n++] = 0x1; /* One port per group */
1148 arr[n++] = 0; /* Reserved */
1149 arr[n++] = 0; /* Reserved */
1150 arr[n++] = (port_b >> 8) & 0xff;
1151 arr[n++] = port_b & 0xff;
1153 rlen = n - 4;
1154 arr[0] = (rlen >> 24) & 0xff;
1155 arr[1] = (rlen >> 16) & 0xff;
1156 arr[2] = (rlen >> 8) & 0xff;
1157 arr[3] = rlen & 0xff;
1160 * Return the smallest value of either
1161 * - The allocated length
1162 * - The constructed command length
1163 * - The maximum array size
1165 rlen = min(alen,n);
1166 ret = fill_from_dev_buffer(scp, arr,
1167 min(rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1168 kfree(arr);
1169 return ret;
1172 /* <<Following mode page info copied from ST318451LW>> */
1174 static int resp_err_recov_pg(unsigned char * p, int pcontrol, int target)
1175 { /* Read-Write Error Recovery page for mode_sense */
1176 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
1177 5, 0, 0xff, 0xff};
1179 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
1180 if (1 == pcontrol)
1181 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
1182 return sizeof(err_recov_pg);
1185 static int resp_disconnect_pg(unsigned char * p, int pcontrol, int target)
1186 { /* Disconnect-Reconnect page for mode_sense */
1187 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
1188 0, 0, 0, 0, 0, 0, 0, 0};
1190 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
1191 if (1 == pcontrol)
1192 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
1193 return sizeof(disconnect_pg);
1196 static int resp_format_pg(unsigned char * p, int pcontrol, int target)
1197 { /* Format device page for mode_sense */
1198 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
1199 0, 0, 0, 0, 0, 0, 0, 0,
1200 0, 0, 0, 0, 0x40, 0, 0, 0};
1202 memcpy(p, format_pg, sizeof(format_pg));
1203 p[10] = (sdebug_sectors_per >> 8) & 0xff;
1204 p[11] = sdebug_sectors_per & 0xff;
1205 p[12] = (scsi_debug_sector_size >> 8) & 0xff;
1206 p[13] = scsi_debug_sector_size & 0xff;
1207 if (scsi_debug_removable)
1208 p[20] |= 0x20; /* should agree with INQUIRY */
1209 if (1 == pcontrol)
1210 memset(p + 2, 0, sizeof(format_pg) - 2);
1211 return sizeof(format_pg);
1214 static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
1215 { /* Caching page for mode_sense */
1216 unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
1217 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
1219 memcpy(p, caching_pg, sizeof(caching_pg));
1220 if (1 == pcontrol)
1221 memset(p + 2, 0, sizeof(caching_pg) - 2);
1222 return sizeof(caching_pg);
1225 static int resp_ctrl_m_pg(unsigned char * p, int pcontrol, int target)
1226 { /* Control mode page for mode_sense */
1227 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
1228 0, 0, 0, 0};
1229 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
1230 0, 0, 0x2, 0x4b};
1232 if (scsi_debug_dsense)
1233 ctrl_m_pg[2] |= 0x4;
1234 else
1235 ctrl_m_pg[2] &= ~0x4;
1237 if (scsi_debug_ato)
1238 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
1240 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
1241 if (1 == pcontrol)
1242 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
1243 else if (2 == pcontrol)
1244 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
1245 return sizeof(ctrl_m_pg);
1249 static int resp_iec_m_pg(unsigned char * p, int pcontrol, int target)
1250 { /* Informational Exceptions control mode page for mode_sense */
1251 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
1252 0, 0, 0x0, 0x0};
1253 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1254 0, 0, 0x0, 0x0};
1256 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
1257 if (1 == pcontrol)
1258 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
1259 else if (2 == pcontrol)
1260 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
1261 return sizeof(iec_m_pg);
1264 static int resp_sas_sf_m_pg(unsigned char * p, int pcontrol, int target)
1265 { /* SAS SSP mode page - short format for mode_sense */
1266 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
1267 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
1269 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
1270 if (1 == pcontrol)
1271 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
1272 return sizeof(sas_sf_m_pg);
1276 static int resp_sas_pcd_m_spg(unsigned char * p, int pcontrol, int target,
1277 int target_dev_id)
1278 { /* SAS phy control and discover mode page for mode_sense */
1279 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
1280 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
1281 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1282 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1283 0x2, 0, 0, 0, 0, 0, 0, 0,
1284 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1285 0, 0, 0, 0, 0, 0, 0, 0,
1286 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
1287 0x52, 0x22, 0x22, 0x20, 0x0, 0x0, 0x0, 0x0,
1288 0x51, 0x11, 0x11, 0x10, 0x0, 0x0, 0x0, 0x1,
1289 0x3, 0, 0, 0, 0, 0, 0, 0,
1290 0x88, 0x99, 0, 0, 0, 0, 0, 0,
1291 0, 0, 0, 0, 0, 0, 0, 0,
1293 int port_a, port_b;
1295 port_a = target_dev_id + 1;
1296 port_b = port_a + 1;
1297 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
1298 p[20] = (port_a >> 24);
1299 p[21] = (port_a >> 16) & 0xff;
1300 p[22] = (port_a >> 8) & 0xff;
1301 p[23] = port_a & 0xff;
1302 p[48 + 20] = (port_b >> 24);
1303 p[48 + 21] = (port_b >> 16) & 0xff;
1304 p[48 + 22] = (port_b >> 8) & 0xff;
1305 p[48 + 23] = port_b & 0xff;
1306 if (1 == pcontrol)
1307 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
1308 return sizeof(sas_pcd_m_pg);
1311 static int resp_sas_sha_m_spg(unsigned char * p, int pcontrol)
1312 { /* SAS SSP shared protocol specific port mode subpage */
1313 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
1314 0, 0, 0, 0, 0, 0, 0, 0,
1317 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
1318 if (1 == pcontrol)
1319 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
1320 return sizeof(sas_sha_m_pg);
1323 #define SDEBUG_MAX_MSENSE_SZ 256
1325 static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1326 struct sdebug_dev_info * devip)
1328 unsigned char dbd, llbaa;
1329 int pcontrol, pcode, subpcode, bd_len;
1330 unsigned char dev_spec;
1331 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1332 unsigned char * ap;
1333 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1334 unsigned char *cmd = (unsigned char *)scp->cmnd;
1336 if ((errsts = check_readiness(scp, 1, devip)))
1337 return errsts;
1338 dbd = !!(cmd[1] & 0x8);
1339 pcontrol = (cmd[2] & 0xc0) >> 6;
1340 pcode = cmd[2] & 0x3f;
1341 subpcode = cmd[3];
1342 msense_6 = (MODE_SENSE == cmd[0]);
1343 llbaa = msense_6 ? 0 : !!(cmd[1] & 0x10);
1344 if ((0 == scsi_debug_ptype) && (0 == dbd))
1345 bd_len = llbaa ? 16 : 8;
1346 else
1347 bd_len = 0;
1348 alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
1349 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
1350 if (0x3 == pcontrol) { /* Saving values not supported */
1351 mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
1353 return check_condition_result;
1355 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
1356 (devip->target * 1000) - 3;
1357 /* set DPOFUA bit for disks */
1358 if (0 == scsi_debug_ptype)
1359 dev_spec = (DEV_READONLY(target) ? 0x80 : 0x0) | 0x10;
1360 else
1361 dev_spec = 0x0;
1362 if (msense_6) {
1363 arr[2] = dev_spec;
1364 arr[3] = bd_len;
1365 offset = 4;
1366 } else {
1367 arr[3] = dev_spec;
1368 if (16 == bd_len)
1369 arr[4] = 0x1; /* set LONGLBA bit */
1370 arr[7] = bd_len; /* assume 255 or less */
1371 offset = 8;
1373 ap = arr + offset;
1374 if ((bd_len > 0) && (!sdebug_capacity))
1375 sdebug_capacity = get_sdebug_capacity();
1377 if (8 == bd_len) {
1378 if (sdebug_capacity > 0xfffffffe) {
1379 ap[0] = 0xff;
1380 ap[1] = 0xff;
1381 ap[2] = 0xff;
1382 ap[3] = 0xff;
1383 } else {
1384 ap[0] = (sdebug_capacity >> 24) & 0xff;
1385 ap[1] = (sdebug_capacity >> 16) & 0xff;
1386 ap[2] = (sdebug_capacity >> 8) & 0xff;
1387 ap[3] = sdebug_capacity & 0xff;
1389 ap[6] = (scsi_debug_sector_size >> 8) & 0xff;
1390 ap[7] = scsi_debug_sector_size & 0xff;
1391 offset += bd_len;
1392 ap = arr + offset;
1393 } else if (16 == bd_len) {
1394 unsigned long long capac = sdebug_capacity;
1396 for (k = 0; k < 8; ++k, capac >>= 8)
1397 ap[7 - k] = capac & 0xff;
1398 ap[12] = (scsi_debug_sector_size >> 24) & 0xff;
1399 ap[13] = (scsi_debug_sector_size >> 16) & 0xff;
1400 ap[14] = (scsi_debug_sector_size >> 8) & 0xff;
1401 ap[15] = scsi_debug_sector_size & 0xff;
1402 offset += bd_len;
1403 ap = arr + offset;
1406 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
1407 /* TODO: Control Extension page */
1408 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1410 return check_condition_result;
1412 switch (pcode) {
1413 case 0x1: /* Read-Write error recovery page, direct access */
1414 len = resp_err_recov_pg(ap, pcontrol, target);
1415 offset += len;
1416 break;
1417 case 0x2: /* Disconnect-Reconnect page, all devices */
1418 len = resp_disconnect_pg(ap, pcontrol, target);
1419 offset += len;
1420 break;
1421 case 0x3: /* Format device page, direct access */
1422 len = resp_format_pg(ap, pcontrol, target);
1423 offset += len;
1424 break;
1425 case 0x8: /* Caching page, direct access */
1426 len = resp_caching_pg(ap, pcontrol, target);
1427 offset += len;
1428 break;
1429 case 0xa: /* Control Mode page, all devices */
1430 len = resp_ctrl_m_pg(ap, pcontrol, target);
1431 offset += len;
1432 break;
1433 case 0x19: /* if spc==1 then sas phy, control+discover */
1434 if ((subpcode > 0x2) && (subpcode < 0xff)) {
1435 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1436 INVALID_FIELD_IN_CDB, 0);
1437 return check_condition_result;
1439 len = 0;
1440 if ((0x0 == subpcode) || (0xff == subpcode))
1441 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1442 if ((0x1 == subpcode) || (0xff == subpcode))
1443 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
1444 target_dev_id);
1445 if ((0x2 == subpcode) || (0xff == subpcode))
1446 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1447 offset += len;
1448 break;
1449 case 0x1c: /* Informational Exceptions Mode page, all devices */
1450 len = resp_iec_m_pg(ap, pcontrol, target);
1451 offset += len;
1452 break;
1453 case 0x3f: /* Read all Mode pages */
1454 if ((0 == subpcode) || (0xff == subpcode)) {
1455 len = resp_err_recov_pg(ap, pcontrol, target);
1456 len += resp_disconnect_pg(ap + len, pcontrol, target);
1457 len += resp_format_pg(ap + len, pcontrol, target);
1458 len += resp_caching_pg(ap + len, pcontrol, target);
1459 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
1460 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
1461 if (0xff == subpcode) {
1462 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
1463 target, target_dev_id);
1464 len += resp_sas_sha_m_spg(ap + len, pcontrol);
1466 len += resp_iec_m_pg(ap + len, pcontrol, target);
1467 } else {
1468 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1469 INVALID_FIELD_IN_CDB, 0);
1470 return check_condition_result;
1472 offset += len;
1473 break;
1474 default:
1475 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
1477 return check_condition_result;
1479 if (msense_6)
1480 arr[0] = offset - 1;
1481 else {
1482 arr[0] = ((offset - 2) >> 8) & 0xff;
1483 arr[1] = (offset - 2) & 0xff;
1485 return fill_from_dev_buffer(scp, arr, min(alloc_len, offset));
1488 #define SDEBUG_MAX_MSELECT_SZ 512
1490 static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1491 struct sdebug_dev_info * devip)
1493 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1494 int param_len, res, errsts, mpage;
1495 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1496 unsigned char *cmd = (unsigned char *)scp->cmnd;
1498 if ((errsts = check_readiness(scp, 1, devip)))
1499 return errsts;
1500 memset(arr, 0, sizeof(arr));
1501 pf = cmd[1] & 0x10;
1502 sp = cmd[1] & 0x1;
1503 param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
1504 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
1505 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1506 INVALID_FIELD_IN_CDB, 0);
1507 return check_condition_result;
1509 res = fetch_to_dev_buffer(scp, arr, param_len);
1510 if (-1 == res)
1511 return (DID_ERROR << 16);
1512 else if ((res < param_len) &&
1513 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
1514 printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
1515 " IO sent=%d bytes\n", param_len, res);
1516 md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
1517 bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
1518 if (md_len > 2) {
1519 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1520 INVALID_FIELD_IN_PARAM_LIST, 0);
1521 return check_condition_result;
1523 off = bd_len + (mselect6 ? 4 : 8);
1524 mpage = arr[off] & 0x3f;
1525 ps = !!(arr[off] & 0x80);
1526 if (ps) {
1527 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1528 INVALID_FIELD_IN_PARAM_LIST, 0);
1529 return check_condition_result;
1531 spf = !!(arr[off] & 0x40);
1532 pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
1533 (arr[off + 1] + 2);
1534 if ((pg_len + off) > param_len) {
1535 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1536 PARAMETER_LIST_LENGTH_ERR, 0);
1537 return check_condition_result;
1539 switch (mpage) {
1540 case 0xa: /* Control Mode page */
1541 if (ctrl_m_pg[1] == arr[off + 1]) {
1542 memcpy(ctrl_m_pg + 2, arr + off + 2,
1543 sizeof(ctrl_m_pg) - 2);
1544 scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
1545 return 0;
1547 break;
1548 case 0x1c: /* Informational Exceptions Mode page */
1549 if (iec_m_pg[1] == arr[off + 1]) {
1550 memcpy(iec_m_pg + 2, arr + off + 2,
1551 sizeof(iec_m_pg) - 2);
1552 return 0;
1554 break;
1555 default:
1556 break;
1558 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1559 INVALID_FIELD_IN_PARAM_LIST, 0);
1560 return check_condition_result;
1563 static int resp_temp_l_pg(unsigned char * arr)
1565 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
1566 0x0, 0x1, 0x3, 0x2, 0x0, 65,
1569 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
1570 return sizeof(temp_l_pg);
1573 static int resp_ie_l_pg(unsigned char * arr)
1575 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
1578 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
1579 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
1580 arr[4] = THRESHOLD_EXCEEDED;
1581 arr[5] = 0xff;
1583 return sizeof(ie_l_pg);
1586 #define SDEBUG_MAX_LSENSE_SZ 512
1588 static int resp_log_sense(struct scsi_cmnd * scp,
1589 struct sdebug_dev_info * devip)
1591 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1592 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1593 unsigned char *cmd = (unsigned char *)scp->cmnd;
1595 if ((errsts = check_readiness(scp, 1, devip)))
1596 return errsts;
1597 memset(arr, 0, sizeof(arr));
1598 ppc = cmd[1] & 0x2;
1599 sp = cmd[1] & 0x1;
1600 if (ppc || sp) {
1601 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1602 INVALID_FIELD_IN_CDB, 0);
1603 return check_condition_result;
1605 pcontrol = (cmd[2] & 0xc0) >> 6;
1606 pcode = cmd[2] & 0x3f;
1607 subpcode = cmd[3] & 0xff;
1608 alloc_len = (cmd[7] << 8) + cmd[8];
1609 arr[0] = pcode;
1610 if (0 == subpcode) {
1611 switch (pcode) {
1612 case 0x0: /* Supported log pages log page */
1613 n = 4;
1614 arr[n++] = 0x0; /* this page */
1615 arr[n++] = 0xd; /* Temperature */
1616 arr[n++] = 0x2f; /* Informational exceptions */
1617 arr[3] = n - 4;
1618 break;
1619 case 0xd: /* Temperature log page */
1620 arr[3] = resp_temp_l_pg(arr + 4);
1621 break;
1622 case 0x2f: /* Informational exceptions log page */
1623 arr[3] = resp_ie_l_pg(arr + 4);
1624 break;
1625 default:
1626 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1627 INVALID_FIELD_IN_CDB, 0);
1628 return check_condition_result;
1630 } else if (0xff == subpcode) {
1631 arr[0] |= 0x40;
1632 arr[1] = subpcode;
1633 switch (pcode) {
1634 case 0x0: /* Supported log pages and subpages log page */
1635 n = 4;
1636 arr[n++] = 0x0;
1637 arr[n++] = 0x0; /* 0,0 page */
1638 arr[n++] = 0x0;
1639 arr[n++] = 0xff; /* this page */
1640 arr[n++] = 0xd;
1641 arr[n++] = 0x0; /* Temperature */
1642 arr[n++] = 0x2f;
1643 arr[n++] = 0x0; /* Informational exceptions */
1644 arr[3] = n - 4;
1645 break;
1646 case 0xd: /* Temperature subpages */
1647 n = 4;
1648 arr[n++] = 0xd;
1649 arr[n++] = 0x0; /* Temperature */
1650 arr[3] = n - 4;
1651 break;
1652 case 0x2f: /* Informational exceptions subpages */
1653 n = 4;
1654 arr[n++] = 0x2f;
1655 arr[n++] = 0x0; /* Informational exceptions */
1656 arr[3] = n - 4;
1657 break;
1658 default:
1659 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1660 INVALID_FIELD_IN_CDB, 0);
1661 return check_condition_result;
1663 } else {
1664 mk_sense_buffer(devip, ILLEGAL_REQUEST,
1665 INVALID_FIELD_IN_CDB, 0);
1666 return check_condition_result;
1668 len = min(((arr[2] << 8) + arr[3]) + 4, alloc_len);
1669 return fill_from_dev_buffer(scp, arr,
1670 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1673 static int check_device_access_params(struct sdebug_dev_info *devi,
1674 unsigned long long lba, unsigned int num)
1676 if (lba + num > sdebug_capacity) {
1677 mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
1678 return check_condition_result;
1680 /* transfer length excessive (tie in to block limits VPD page) */
1681 if (num > sdebug_store_sectors) {
1682 mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
1683 return check_condition_result;
1685 return 0;
1688 /* Returns number of bytes copied or -1 if error. */
1689 static int do_device_access(struct scsi_cmnd *scmd,
1690 struct sdebug_dev_info *devi,
1691 unsigned long long lba, unsigned int num, int write)
1693 int ret;
1694 unsigned long long block, rest = 0;
1695 struct scsi_data_buffer *sdb;
1696 enum dma_data_direction dir;
1697 size_t (*func)(struct scatterlist *, unsigned int, void *, size_t,
1698 off_t);
1700 if (write) {
1701 sdb = scsi_out(scmd);
1702 dir = DMA_TO_DEVICE;
1703 func = sg_pcopy_to_buffer;
1704 } else {
1705 sdb = scsi_in(scmd);
1706 dir = DMA_FROM_DEVICE;
1707 func = sg_pcopy_from_buffer;
1710 if (!sdb->length)
1711 return 0;
1712 if (!(scsi_bidi_cmnd(scmd) || scmd->sc_data_direction == dir))
1713 return -1;
1715 block = do_div(lba, sdebug_store_sectors);
1716 if (block + num > sdebug_store_sectors)
1717 rest = block + num - sdebug_store_sectors;
1719 ret = func(sdb->table.sgl, sdb->table.nents,
1720 fake_storep + (block * scsi_debug_sector_size),
1721 (num - rest) * scsi_debug_sector_size, 0);
1722 if (ret != (num - rest) * scsi_debug_sector_size)
1723 return ret;
1725 if (rest) {
1726 ret += func(sdb->table.sgl, sdb->table.nents,
1727 fake_storep, rest * scsi_debug_sector_size,
1728 (num - rest) * scsi_debug_sector_size);
1731 return ret;
1734 static u16 dif_compute_csum(const void *buf, int len)
1736 u16 csum;
1738 switch (scsi_debug_guard) {
1739 case 1:
1740 csum = ip_compute_csum(buf, len);
1741 break;
1742 case 0:
1743 csum = cpu_to_be16(crc_t10dif(buf, len));
1744 break;
1746 return csum;
1749 static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
1750 sector_t sector, u32 ei_lba)
1752 u16 csum = dif_compute_csum(data, scsi_debug_sector_size);
1754 if (sdt->guard_tag != csum) {
1755 pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
1756 __func__,
1757 (unsigned long)sector,
1758 be16_to_cpu(sdt->guard_tag),
1759 be16_to_cpu(csum));
1760 return 0x01;
1762 if (scsi_debug_dif == SD_DIF_TYPE1_PROTECTION &&
1763 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
1764 pr_err("%s: REF check failed on sector %lu\n",
1765 __func__, (unsigned long)sector);
1766 return 0x03;
1768 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
1769 be32_to_cpu(sdt->ref_tag) != ei_lba) {
1770 pr_err("%s: REF check failed on sector %lu\n",
1771 __func__, (unsigned long)sector);
1772 dif_errors++;
1773 return 0x03;
1775 return 0;
1778 static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1779 unsigned int sectors, u32 ei_lba)
1781 unsigned int i, resid;
1782 struct scatterlist *psgl;
1783 struct sd_dif_tuple *sdt;
1784 sector_t sector;
1785 sector_t tmp_sec = start_sec;
1786 void *paddr;
1788 start_sec = do_div(tmp_sec, sdebug_store_sectors);
1790 sdt = dif_storep + start_sec;
1792 for (i = 0 ; i < sectors ; i++) {
1793 int ret;
1795 if (sdt[i].app_tag == 0xffff)
1796 continue;
1798 sector = start_sec + i;
1800 ret = dif_verify(&sdt[i],
1801 fake_storep + sector * scsi_debug_sector_size,
1802 sector, ei_lba);
1803 if (ret) {
1804 dif_errors++;
1805 return ret;
1808 ei_lba++;
1811 /* Bytes of protection data to copy into sgl */
1812 resid = sectors * sizeof(*dif_storep);
1813 sector = start_sec;
1815 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1816 int len = min(psgl->length, resid);
1818 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1819 memcpy(paddr, dif_storep + sector, len);
1821 sector += len / sizeof(*dif_storep);
1822 if (sector >= sdebug_store_sectors) {
1823 /* Force wrap */
1824 tmp_sec = sector;
1825 sector = do_div(tmp_sec, sdebug_store_sectors);
1827 resid -= len;
1828 kunmap_atomic(paddr);
1831 dix_reads++;
1833 return 0;
1836 static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
1837 unsigned int num, struct sdebug_dev_info *devip,
1838 u32 ei_lba)
1840 unsigned long iflags;
1841 int ret;
1843 ret = check_device_access_params(devip, lba, num);
1844 if (ret)
1845 return ret;
1847 if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
1848 (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
1849 ((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
1850 /* claim unrecoverable read error */
1851 mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
1852 /* set info field and valid bit for fixed descriptor */
1853 if (0x70 == (devip->sense_buff[0] & 0x7f)) {
1854 devip->sense_buff[0] |= 0x80; /* Valid bit */
1855 ret = (lba < OPT_MEDIUM_ERR_ADDR)
1856 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
1857 devip->sense_buff[3] = (ret >> 24) & 0xff;
1858 devip->sense_buff[4] = (ret >> 16) & 0xff;
1859 devip->sense_buff[5] = (ret >> 8) & 0xff;
1860 devip->sense_buff[6] = ret & 0xff;
1862 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
1863 return check_condition_result;
1866 /* DIX + T10 DIF */
1867 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
1868 int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);
1870 if (prot_ret) {
1871 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
1872 return illegal_condition_result;
1876 read_lock_irqsave(&atomic_rw, iflags);
1877 ret = do_device_access(SCpnt, devip, lba, num, 0);
1878 read_unlock_irqrestore(&atomic_rw, iflags);
1879 if (ret == -1)
1880 return DID_ERROR << 16;
1882 scsi_in(SCpnt)->resid = scsi_bufflen(SCpnt) - ret;
1884 return 0;
1887 void dump_sector(unsigned char *buf, int len)
1889 int i, j;
1891 printk(KERN_ERR ">>> Sector Dump <<<\n");
1893 for (i = 0 ; i < len ; i += 16) {
1894 printk(KERN_ERR "%04d: ", i);
1896 for (j = 0 ; j < 16 ; j++) {
1897 unsigned char c = buf[i+j];
1898 if (c >= 0x20 && c < 0x7e)
1899 printk(" %c ", buf[i+j]);
1900 else
1901 printk("%02x ", buf[i+j]);
1904 printk("\n");
1908 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1909 unsigned int sectors, u32 ei_lba)
1911 int i, j, ret;
1912 struct sd_dif_tuple *sdt;
1913 struct scatterlist *dsgl = scsi_sglist(SCpnt);
1914 struct scatterlist *psgl = scsi_prot_sglist(SCpnt);
1915 void *daddr, *paddr;
1916 sector_t tmp_sec = start_sec;
1917 sector_t sector;
1918 int ppage_offset;
1920 sector = do_div(tmp_sec, sdebug_store_sectors);
1922 BUG_ON(scsi_sg_count(SCpnt) == 0);
1923 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1925 ppage_offset = 0;
1927 /* For each data page */
1928 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1929 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1930 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1932 /* For each sector-sized chunk in data page */
1933 for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) {
1935 /* If we're at the end of the current
1936 * protection page advance to the next one
1938 if (ppage_offset >= psgl->length) {
1939 kunmap_atomic(paddr);
1940 psgl = sg_next(psgl);
1941 BUG_ON(psgl == NULL);
1942 paddr = kmap_atomic(sg_page(psgl))
1943 + psgl->offset;
1944 ppage_offset = 0;
1947 sdt = paddr + ppage_offset;
1949 ret = dif_verify(sdt, daddr + j, start_sec, ei_lba);
1950 if (ret) {
1951 dump_sector(daddr + j, scsi_debug_sector_size);
1952 goto out;
1955 /* Would be great to copy this in bigger
1956 * chunks. However, for the sake of
1957 * correctness we need to verify each sector
1958 * before writing it to "stable" storage
1960 memcpy(dif_storep + sector, sdt, sizeof(*sdt));
1962 sector++;
1964 if (sector == sdebug_store_sectors)
1965 sector = 0; /* Force wrap */
1967 start_sec++;
1968 ei_lba++;
1969 ppage_offset += sizeof(struct sd_dif_tuple);
1972 kunmap_atomic(paddr);
1973 kunmap_atomic(daddr);
1976 dix_writes++;
1978 return 0;
1980 out:
1981 dif_errors++;
1982 kunmap_atomic(paddr);
1983 kunmap_atomic(daddr);
1984 return ret;
1987 static unsigned long lba_to_map_index(sector_t lba)
1989 if (scsi_debug_unmap_alignment) {
1990 lba += scsi_debug_unmap_granularity -
1991 scsi_debug_unmap_alignment;
1993 do_div(lba, scsi_debug_unmap_granularity);
1995 return lba;
1998 static sector_t map_index_to_lba(unsigned long index)
2000 return index * scsi_debug_unmap_granularity -
2001 scsi_debug_unmap_alignment;
2004 static unsigned int map_state(sector_t lba, unsigned int *num)
2006 sector_t end;
2007 unsigned int mapped;
2008 unsigned long index;
2009 unsigned long next;
2011 index = lba_to_map_index(lba);
2012 mapped = test_bit(index, map_storep);
2014 if (mapped)
2015 next = find_next_zero_bit(map_storep, map_size, index);
2016 else
2017 next = find_next_bit(map_storep, map_size, index);
2019 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
2020 *num = end - lba;
2022 return mapped;
2025 static void map_region(sector_t lba, unsigned int len)
2027 sector_t end = lba + len;
2029 while (lba < end) {
2030 unsigned long index = lba_to_map_index(lba);
2032 if (index < map_size)
2033 set_bit(index, map_storep);
2035 lba = map_index_to_lba(index + 1);
2039 static void unmap_region(sector_t lba, unsigned int len)
2041 sector_t end = lba + len;
2043 while (lba < end) {
2044 unsigned long index = lba_to_map_index(lba);
2046 if (lba == map_index_to_lba(index) &&
2047 lba + scsi_debug_unmap_granularity <= end &&
2048 index < map_size) {
2049 clear_bit(index, map_storep);
2050 if (scsi_debug_lbprz) {
2051 memset(fake_storep +
2052 lba * scsi_debug_sector_size, 0,
2053 scsi_debug_sector_size *
2054 scsi_debug_unmap_granularity);
2056 if (dif_storep) {
2057 memset(dif_storep + lba, 0xff,
2058 sizeof(*dif_storep) *
2059 scsi_debug_unmap_granularity);
2062 lba = map_index_to_lba(index + 1);
2066 static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
2067 unsigned int num, struct sdebug_dev_info *devip,
2068 u32 ei_lba)
2070 unsigned long iflags;
2071 int ret;
2073 ret = check_device_access_params(devip, lba, num);
2074 if (ret)
2075 return ret;
2077 /* DIX + T10 DIF */
2078 if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {
2079 int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);
2081 if (prot_ret) {
2082 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
2083 return illegal_condition_result;
2087 write_lock_irqsave(&atomic_rw, iflags);
2088 ret = do_device_access(SCpnt, devip, lba, num, 1);
2089 if (scsi_debug_lbp())
2090 map_region(lba, num);
2091 write_unlock_irqrestore(&atomic_rw, iflags);
2092 if (-1 == ret)
2093 return (DID_ERROR << 16);
2094 else if ((ret < (num * scsi_debug_sector_size)) &&
2095 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2096 printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
2097 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2099 return 0;
2102 static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
2103 unsigned int num, struct sdebug_dev_info *devip,
2104 u32 ei_lba, unsigned int unmap)
2106 unsigned long iflags;
2107 unsigned long long i;
2108 int ret;
2110 ret = check_device_access_params(devip, lba, num);
2111 if (ret)
2112 return ret;
2114 if (num > scsi_debug_write_same_length) {
2115 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2117 return check_condition_result;
2120 write_lock_irqsave(&atomic_rw, iflags);
2122 if (unmap && scsi_debug_lbp()) {
2123 unmap_region(lba, num);
2124 goto out;
2127 /* Else fetch one logical block */
2128 ret = fetch_to_dev_buffer(scmd,
2129 fake_storep + (lba * scsi_debug_sector_size),
2130 scsi_debug_sector_size);
2132 if (-1 == ret) {
2133 write_unlock_irqrestore(&atomic_rw, iflags);
2134 return (DID_ERROR << 16);
2135 } else if ((ret < (num * scsi_debug_sector_size)) &&
2136 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
2137 printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
2138 " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
2140 /* Copy first sector to remaining blocks */
2141 for (i = 1 ; i < num ; i++)
2142 memcpy(fake_storep + ((lba + i) * scsi_debug_sector_size),
2143 fake_storep + (lba * scsi_debug_sector_size),
2144 scsi_debug_sector_size);
2146 if (scsi_debug_lbp())
2147 map_region(lba, num);
2148 out:
2149 write_unlock_irqrestore(&atomic_rw, iflags);
2151 return 0;
2154 struct unmap_block_desc {
2155 __be64 lba;
2156 __be32 blocks;
2157 __be32 __reserved;
2160 static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
2162 unsigned char *buf;
2163 struct unmap_block_desc *desc;
2164 unsigned int i, payload_len, descriptors;
2165 int ret;
2167 ret = check_readiness(scmd, 1, devip);
2168 if (ret)
2169 return ret;
2171 payload_len = get_unaligned_be16(&scmd->cmnd[7]);
2172 BUG_ON(scsi_bufflen(scmd) != payload_len);
2174 descriptors = (payload_len - 8) / 16;
2176 buf = kmalloc(scsi_bufflen(scmd), GFP_ATOMIC);
2177 if (!buf)
2178 return check_condition_result;
2180 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
2182 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
2183 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
2185 desc = (void *)&buf[8];
2187 for (i = 0 ; i < descriptors ; i++) {
2188 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
2189 unsigned int num = get_unaligned_be32(&desc[i].blocks);
2191 ret = check_device_access_params(devip, lba, num);
2192 if (ret)
2193 goto out;
2195 unmap_region(lba, num);
2198 ret = 0;
2200 out:
2201 kfree(buf);
2203 return ret;
2206 #define SDEBUG_GET_LBA_STATUS_LEN 32
2208 static int resp_get_lba_status(struct scsi_cmnd * scmd,
2209 struct sdebug_dev_info * devip)
2211 unsigned long long lba;
2212 unsigned int alloc_len, mapped, num;
2213 unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
2214 int ret;
2216 ret = check_readiness(scmd, 1, devip);
2217 if (ret)
2218 return ret;
2220 lba = get_unaligned_be64(&scmd->cmnd[2]);
2221 alloc_len = get_unaligned_be32(&scmd->cmnd[10]);
2223 if (alloc_len < 24)
2224 return 0;
2226 ret = check_device_access_params(devip, lba, 1);
2227 if (ret)
2228 return ret;
2230 mapped = map_state(lba, &num);
2232 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2233 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2234 put_unaligned_be64(lba, &arr[8]); /* LBA */
2235 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2236 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
2238 return fill_from_dev_buffer(scmd, arr, SDEBUG_GET_LBA_STATUS_LEN);
2241 #define SDEBUG_RLUN_ARR_SZ 256
2243 static int resp_report_luns(struct scsi_cmnd * scp,
2244 struct sdebug_dev_info * devip)
2246 unsigned int alloc_len;
2247 int lun_cnt, i, upper, num, n, wlun, lun;
2248 unsigned char *cmd = (unsigned char *)scp->cmnd;
2249 int select_report = (int)cmd[2];
2250 struct scsi_lun *one_lun;
2251 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
2252 unsigned char * max_addr;
2254 alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
2255 if ((alloc_len < 4) || (select_report > 2)) {
2256 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
2258 return check_condition_result;
2260 /* can produce response with up to 16k luns (lun 0 to lun 16383) */
2261 memset(arr, 0, SDEBUG_RLUN_ARR_SZ);
2262 lun_cnt = scsi_debug_max_luns;
2263 if (1 == select_report)
2264 lun_cnt = 0;
2265 else if (scsi_debug_no_lun_0 && (lun_cnt > 0))
2266 --lun_cnt;
2267 wlun = (select_report > 0) ? 1 : 0;
2268 num = lun_cnt + wlun;
2269 arr[2] = ((sizeof(struct scsi_lun) * num) >> 8) & 0xff;
2270 arr[3] = (sizeof(struct scsi_lun) * num) & 0xff;
2271 n = min((int)((SDEBUG_RLUN_ARR_SZ - 8) /
2272 sizeof(struct scsi_lun)), num);
2273 if (n < num) {
2274 wlun = 0;
2275 lun_cnt = n;
2277 one_lun = (struct scsi_lun *) &arr[8];
2278 max_addr = arr + SDEBUG_RLUN_ARR_SZ;
2279 for (i = 0, lun = (scsi_debug_no_lun_0 ? 1 : 0);
2280 ((i < lun_cnt) && ((unsigned char *)(one_lun + i) < max_addr));
2281 i++, lun++) {
2282 upper = (lun >> 8) & 0x3f;
2283 if (upper)
2284 one_lun[i].scsi_lun[0] =
2285 (upper | (SAM2_LUN_ADDRESS_METHOD << 6));
2286 one_lun[i].scsi_lun[1] = lun & 0xff;
2288 if (wlun) {
2289 one_lun[i].scsi_lun[0] = (SAM2_WLUN_REPORT_LUNS >> 8) & 0xff;
2290 one_lun[i].scsi_lun[1] = SAM2_WLUN_REPORT_LUNS & 0xff;
2291 i++;
2293 alloc_len = (unsigned char *)(one_lun + i) - arr;
2294 return fill_from_dev_buffer(scp, arr,
2295 min((int)alloc_len, SDEBUG_RLUN_ARR_SZ));
2298 static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2299 unsigned int num, struct sdebug_dev_info *devip)
2301 int i, j, ret = -1;
2302 unsigned char *kaddr, *buf;
2303 unsigned int offset;
2304 struct scatterlist *sg;
2305 struct scsi_data_buffer *sdb = scsi_in(scp);
2307 /* better not to use temporary buffer. */
2308 buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
2309 if (!buf)
2310 return ret;
2312 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
2314 offset = 0;
2315 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2316 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2317 if (!kaddr)
2318 goto out;
2320 for (j = 0; j < sg->length; j++)
2321 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2323 offset += sg->length;
2324 kunmap_atomic(kaddr);
2326 ret = 0;
2327 out:
2328 kfree(buf);
2330 return ret;
2333 /* When timer goes off this function is called. */
2334 static void timer_intr_handler(unsigned long indx)
2336 struct sdebug_queued_cmd * sqcp;
2337 unsigned long iflags;
2339 if (indx >= scsi_debug_max_queue) {
2340 printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
2341 "large\n");
2342 return;
2344 spin_lock_irqsave(&queued_arr_lock, iflags);
2345 sqcp = &queued_arr[(int)indx];
2346 if (! sqcp->in_use) {
2347 printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
2348 "interrupt\n");
2349 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2350 return;
2352 sqcp->in_use = 0;
2353 if (sqcp->done_funct) {
2354 sqcp->a_cmnd->result = sqcp->scsi_result;
2355 sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
2357 sqcp->done_funct = NULL;
2358 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2362 static struct sdebug_dev_info *
2363 sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
2365 struct sdebug_dev_info *devip;
2367 devip = kzalloc(sizeof(*devip), flags);
2368 if (devip) {
2369 devip->sdbg_host = sdbg_host;
2370 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
2372 return devip;
2375 static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
2377 struct sdebug_host_info * sdbg_host;
2378 struct sdebug_dev_info * open_devip = NULL;
2379 struct sdebug_dev_info * devip =
2380 (struct sdebug_dev_info *)sdev->hostdata;
2382 if (devip)
2383 return devip;
2384 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
2385 if (!sdbg_host) {
2386 printk(KERN_ERR "Host info NULL\n");
2387 return NULL;
2389 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
2390 if ((devip->used) && (devip->channel == sdev->channel) &&
2391 (devip->target == sdev->id) &&
2392 (devip->lun == sdev->lun))
2393 return devip;
2394 else {
2395 if ((!devip->used) && (!open_devip))
2396 open_devip = devip;
2399 if (!open_devip) { /* try and make a new one */
2400 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
2401 if (!open_devip) {
2402 printk(KERN_ERR "%s: out of memory at line %d\n",
2403 __func__, __LINE__);
2404 return NULL;
2408 open_devip->channel = sdev->channel;
2409 open_devip->target = sdev->id;
2410 open_devip->lun = sdev->lun;
2411 open_devip->sdbg_host = sdbg_host;
2412 open_devip->reset = 1;
2413 open_devip->used = 1;
2414 memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
2415 if (scsi_debug_dsense)
2416 open_devip->sense_buff[0] = 0x72;
2417 else {
2418 open_devip->sense_buff[0] = 0x70;
2419 open_devip->sense_buff[7] = 0xa;
2421 if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
2422 open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
2424 return open_devip;
2427 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
2429 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2430 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
2431 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2432 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
2433 return 0;
2436 static int scsi_debug_slave_configure(struct scsi_device *sdp)
2438 struct sdebug_dev_info *devip;
2440 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2441 printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
2442 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2443 if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
2444 sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
2445 devip = devInfoReg(sdp);
2446 if (NULL == devip)
2447 return 1; /* no resources, will be marked offline */
2448 sdp->hostdata = devip;
2449 if (sdp->host->cmd_per_lun)
2450 scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
2451 sdp->host->cmd_per_lun);
2452 blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
2453 if (scsi_debug_no_uld)
2454 sdp->no_uld_attach = 1;
2455 return 0;
2458 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
2460 struct sdebug_dev_info *devip =
2461 (struct sdebug_dev_info *)sdp->hostdata;
2463 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2464 printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
2465 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
2466 if (devip) {
2467 /* make this slot available for re-use */
2468 devip->used = 0;
2469 sdp->hostdata = NULL;
2473 /* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
2474 static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2476 unsigned long iflags;
2477 int k;
2478 struct sdebug_queued_cmd *sqcp;
2480 spin_lock_irqsave(&queued_arr_lock, iflags);
2481 for (k = 0; k < scsi_debug_max_queue; ++k) {
2482 sqcp = &queued_arr[k];
2483 if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
2484 del_timer_sync(&sqcp->cmnd_timer);
2485 sqcp->in_use = 0;
2486 sqcp->a_cmnd = NULL;
2487 break;
2490 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2491 return (k < scsi_debug_max_queue) ? 1 : 0;
2494 /* Deletes (stops) timers of all queued commands */
2495 static void stop_all_queued(void)
2497 unsigned long iflags;
2498 int k;
2499 struct sdebug_queued_cmd *sqcp;
2501 spin_lock_irqsave(&queued_arr_lock, iflags);
2502 for (k = 0; k < scsi_debug_max_queue; ++k) {
2503 sqcp = &queued_arr[k];
2504 if (sqcp->in_use && sqcp->a_cmnd) {
2505 del_timer_sync(&sqcp->cmnd_timer);
2506 sqcp->in_use = 0;
2507 sqcp->a_cmnd = NULL;
2510 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2513 static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
2515 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2516 printk(KERN_INFO "scsi_debug: abort\n");
2517 ++num_aborts;
2518 stop_queued_cmnd(SCpnt);
2519 return SUCCESS;
2522 static int scsi_debug_biosparam(struct scsi_device *sdev,
2523 struct block_device * bdev, sector_t capacity, int *info)
2525 int res;
2526 unsigned char *buf;
2528 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2529 printk(KERN_INFO "scsi_debug: biosparam\n");
2530 buf = scsi_bios_ptable(bdev);
2531 if (buf) {
2532 res = scsi_partsize(buf, capacity,
2533 &info[2], &info[0], &info[1]);
2534 kfree(buf);
2535 if (! res)
2536 return res;
2538 info[0] = sdebug_heads;
2539 info[1] = sdebug_sectors_per;
2540 info[2] = sdebug_cylinders_per;
2541 return 0;
2544 static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
2546 struct sdebug_dev_info * devip;
2548 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2549 printk(KERN_INFO "scsi_debug: device_reset\n");
2550 ++num_dev_resets;
2551 if (SCpnt) {
2552 devip = devInfoReg(SCpnt->device);
2553 if (devip)
2554 devip->reset = 1;
2556 return SUCCESS;
2559 static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
2561 struct sdebug_host_info *sdbg_host;
2562 struct sdebug_dev_info * dev_info;
2563 struct scsi_device * sdp;
2564 struct Scsi_Host * hp;
2566 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2567 printk(KERN_INFO "scsi_debug: bus_reset\n");
2568 ++num_bus_resets;
2569 if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
2570 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
2571 if (sdbg_host) {
2572 list_for_each_entry(dev_info,
2573 &sdbg_host->dev_info_list,
2574 dev_list)
2575 dev_info->reset = 1;
2578 return SUCCESS;
2581 static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
2583 struct sdebug_host_info * sdbg_host;
2584 struct sdebug_dev_info * dev_info;
2586 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
2587 printk(KERN_INFO "scsi_debug: host_reset\n");
2588 ++num_host_resets;
2589 spin_lock(&sdebug_host_list_lock);
2590 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
2591 list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
2592 dev_list)
2593 dev_info->reset = 1;
2595 spin_unlock(&sdebug_host_list_lock);
2596 stop_all_queued();
2597 return SUCCESS;
2600 /* Initializes timers in queued array */
2601 static void __init init_all_queued(void)
2603 unsigned long iflags;
2604 int k;
2605 struct sdebug_queued_cmd * sqcp;
2607 spin_lock_irqsave(&queued_arr_lock, iflags);
2608 for (k = 0; k < scsi_debug_max_queue; ++k) {
2609 sqcp = &queued_arr[k];
2610 init_timer(&sqcp->cmnd_timer);
2611 sqcp->in_use = 0;
2612 sqcp->a_cmnd = NULL;
2614 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2617 static void __init sdebug_build_parts(unsigned char *ramp,
2618 unsigned long store_size)
2620 struct partition * pp;
2621 int starts[SDEBUG_MAX_PARTS + 2];
2622 int sectors_per_part, num_sectors, k;
2623 int heads_by_sects, start_sec, end_sec;
2625 /* assume partition table already zeroed */
2626 if ((scsi_debug_num_parts < 1) || (store_size < 1048576))
2627 return;
2628 if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
2629 scsi_debug_num_parts = SDEBUG_MAX_PARTS;
2630 printk(KERN_WARNING "scsi_debug:build_parts: reducing "
2631 "partitions to %d\n", SDEBUG_MAX_PARTS);
2633 num_sectors = (int)sdebug_store_sectors;
2634 sectors_per_part = (num_sectors - sdebug_sectors_per)
2635 / scsi_debug_num_parts;
2636 heads_by_sects = sdebug_heads * sdebug_sectors_per;
2637 starts[0] = sdebug_sectors_per;
2638 for (k = 1; k < scsi_debug_num_parts; ++k)
2639 starts[k] = ((k * sectors_per_part) / heads_by_sects)
2640 * heads_by_sects;
2641 starts[scsi_debug_num_parts] = num_sectors;
2642 starts[scsi_debug_num_parts + 1] = 0;
2644 ramp[510] = 0x55; /* magic partition markings */
2645 ramp[511] = 0xAA;
2646 pp = (struct partition *)(ramp + 0x1be);
2647 for (k = 0; starts[k + 1]; ++k, ++pp) {
2648 start_sec = starts[k];
2649 end_sec = starts[k + 1] - 1;
2650 pp->boot_ind = 0;
2652 pp->cyl = start_sec / heads_by_sects;
2653 pp->head = (start_sec - (pp->cyl * heads_by_sects))
2654 / sdebug_sectors_per;
2655 pp->sector = (start_sec % sdebug_sectors_per) + 1;
2657 pp->end_cyl = end_sec / heads_by_sects;
2658 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
2659 / sdebug_sectors_per;
2660 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
2662 pp->start_sect = start_sec;
2663 pp->nr_sects = end_sec - start_sec + 1;
2664 pp->sys_ind = 0x83; /* plain Linux partition */
2668 static int schedule_resp(struct scsi_cmnd * cmnd,
2669 struct sdebug_dev_info * devip,
2670 done_funct_t done, int scsi_result, int delta_jiff)
2672 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
2673 if (scsi_result) {
2674 struct scsi_device * sdp = cmnd->device;
2676 printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
2677 "non-zero result=0x%x\n", sdp->host->host_no,
2678 sdp->channel, sdp->id, sdp->lun, scsi_result);
2681 if (cmnd && devip) {
2682 /* simulate autosense by this driver */
2683 if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
2684 memcpy(cmnd->sense_buffer, devip->sense_buff,
2685 (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
2686 SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
2688 if (delta_jiff <= 0) {
2689 if (cmnd)
2690 cmnd->result = scsi_result;
2691 if (done)
2692 done(cmnd);
2693 return 0;
2694 } else {
2695 unsigned long iflags;
2696 int k;
2697 struct sdebug_queued_cmd * sqcp = NULL;
2699 spin_lock_irqsave(&queued_arr_lock, iflags);
2700 for (k = 0; k < scsi_debug_max_queue; ++k) {
2701 sqcp = &queued_arr[k];
2702 if (! sqcp->in_use)
2703 break;
2705 if (k >= scsi_debug_max_queue) {
2706 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2707 printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
2708 return 1; /* report busy to mid level */
2710 sqcp->in_use = 1;
2711 sqcp->a_cmnd = cmnd;
2712 sqcp->scsi_result = scsi_result;
2713 sqcp->done_funct = done;
2714 sqcp->cmnd_timer.function = timer_intr_handler;
2715 sqcp->cmnd_timer.data = k;
2716 sqcp->cmnd_timer.expires = jiffies + delta_jiff;
2717 add_timer(&sqcp->cmnd_timer);
2718 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2719 if (cmnd)
2720 cmnd->result = 0;
2721 return 0;
2724 /* Note: The following macros create attribute files in the
2725 /sys/module/scsi_debug/parameters directory. Unfortunately this
2726 driver is unaware of a change and cannot trigger auxiliary actions
2727 as it can when the corresponding attribute in the
2728 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
2730 module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
2731 module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
2732 module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
2733 module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
2734 module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
2735 module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
2736 module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
2737 module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
2738 module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
2739 module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
2740 module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
2741 module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
2742 module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
2743 module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
2744 module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
2745 module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
2746 module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
2747 module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
2748 module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
2749 module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
2750 module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
2751 module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
2752 module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
2753 module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
2754 module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
2755 module_param_named(removable, scsi_debug_removable, bool, S_IRUGO | S_IWUSR);
2756 module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
2757 module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
2758 module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
2759 module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
2760 module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
2761 module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
2762 module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
2763 module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
2764 S_IRUGO | S_IWUSR);
2765 module_param_named(write_same_length, scsi_debug_write_same_length, int,
2766 S_IRUGO | S_IWUSR);
2768 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
2769 MODULE_DESCRIPTION("SCSI debug adapter driver");
2770 MODULE_LICENSE("GPL");
2771 MODULE_VERSION(SCSI_DEBUG_VERSION);
2773 MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
2774 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
2775 MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
2776 MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
2777 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
2778 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
2779 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
2780 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
2781 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
2782 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
2783 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
2784 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
2785 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
2786 MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
2787 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
2788 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
2789 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
2790 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
2791 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
2792 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
2793 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
2794 MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
2795 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
2796 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
2797 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
2798 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
2799 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
2800 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
2801 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
2802 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
2803 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
2804 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
2805 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
2806 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
2807 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
2809 static char sdebug_info[256];
2811 static const char * scsi_debug_info(struct Scsi_Host * shp)
2813 sprintf(sdebug_info, "scsi_debug, version %s [%s], "
2814 "dev_size_mb=%d, opts=0x%x", SCSI_DEBUG_VERSION,
2815 scsi_debug_version_date, scsi_debug_dev_size_mb,
2816 scsi_debug_opts);
2817 return sdebug_info;
2820 /* scsi_debug_proc_info
2821 * Used if the driver currently has no own support for /proc/scsi
2823 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
2825 char arr[16];
2826 int opts;
2827 int minLen = length > 15 ? 15 : length;
2829 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
2830 return -EACCES;
2831 memcpy(arr, buffer, minLen);
2832 arr[minLen] = '\0';
2833 if (1 != sscanf(arr, "%d", &opts))
2834 return -EINVAL;
2835 scsi_debug_opts = opts;
2836 if (scsi_debug_every_nth != 0)
2837 scsi_debug_cmnd_count = 0;
2838 return length;
2841 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
2843 seq_printf(m, "scsi_debug adapter driver, version "
2844 "%s [%s]\n"
2845 "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
2846 "every_nth=%d(curr:%d)\n"
2847 "delay=%d, max_luns=%d, scsi_level=%d\n"
2848 "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
2849 "number of aborts=%d, device_reset=%d, bus_resets=%d, "
2850 "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
2851 SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
2852 scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
2853 scsi_debug_cmnd_count, scsi_debug_delay,
2854 scsi_debug_max_luns, scsi_debug_scsi_level,
2855 scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
2856 sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
2857 num_host_resets, dix_reads, dix_writes, dif_errors);
2858 return 0;
2861 static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf)
2863 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
2866 static ssize_t sdebug_delay_store(struct device_driver * ddp,
2867 const char * buf, size_t count)
2869 int delay;
2870 char work[20];
2872 if (1 == sscanf(buf, "%10s", work)) {
2873 if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
2874 scsi_debug_delay = delay;
2875 return count;
2878 return -EINVAL;
2880 DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show,
2881 sdebug_delay_store);
2883 static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf)
2885 return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
2888 static ssize_t sdebug_opts_store(struct device_driver * ddp,
2889 const char * buf, size_t count)
2891 int opts;
2892 char work[20];
2894 if (1 == sscanf(buf, "%10s", work)) {
2895 if (0 == strnicmp(work,"0x", 2)) {
2896 if (1 == sscanf(&work[2], "%x", &opts))
2897 goto opts_done;
2898 } else {
2899 if (1 == sscanf(work, "%d", &opts))
2900 goto opts_done;
2903 return -EINVAL;
2904 opts_done:
2905 scsi_debug_opts = opts;
2906 scsi_debug_cmnd_count = 0;
2907 return count;
2909 DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show,
2910 sdebug_opts_store);
2912 static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf)
2914 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);
2916 static ssize_t sdebug_ptype_store(struct device_driver * ddp,
2917 const char * buf, size_t count)
2919 int n;
2921 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2922 scsi_debug_ptype = n;
2923 return count;
2925 return -EINVAL;
2927 DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store);
2929 static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf)
2931 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);
2933 static ssize_t sdebug_dsense_store(struct device_driver * ddp,
2934 const char * buf, size_t count)
2936 int n;
2938 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2939 scsi_debug_dsense = n;
2940 return count;
2942 return -EINVAL;
2944 DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show,
2945 sdebug_dsense_store);
2947 static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf)
2949 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);
2951 static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,
2952 const char * buf, size_t count)
2954 int n;
2956 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2957 scsi_debug_fake_rw = n;
2958 return count;
2960 return -EINVAL;
2962 DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show,
2963 sdebug_fake_rw_store);
2965 static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf)
2967 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);
2969 static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,
2970 const char * buf, size_t count)
2972 int n;
2974 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2975 scsi_debug_no_lun_0 = n;
2976 return count;
2978 return -EINVAL;
2980 DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show,
2981 sdebug_no_lun_0_store);
2983 static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf)
2985 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);
2987 static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,
2988 const char * buf, size_t count)
2990 int n;
2992 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
2993 scsi_debug_num_tgts = n;
2994 sdebug_max_tgts_luns();
2995 return count;
2997 return -EINVAL;
2999 DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show,
3000 sdebug_num_tgts_store);
3002 static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf)
3004 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);
3006 DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL);
3008 static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf)
3010 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);
3012 DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL);
3014 static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf)
3016 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);
3018 static ssize_t sdebug_every_nth_store(struct device_driver * ddp,
3019 const char * buf, size_t count)
3021 int nth;
3023 if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
3024 scsi_debug_every_nth = nth;
3025 scsi_debug_cmnd_count = 0;
3026 return count;
3028 return -EINVAL;
3030 DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show,
3031 sdebug_every_nth_store);
3033 static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf)
3035 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);
3037 static ssize_t sdebug_max_luns_store(struct device_driver * ddp,
3038 const char * buf, size_t count)
3040 int n;
3042 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3043 scsi_debug_max_luns = n;
3044 sdebug_max_tgts_luns();
3045 return count;
3047 return -EINVAL;
3049 DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show,
3050 sdebug_max_luns_store);
3052 static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf)
3054 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
3056 static ssize_t sdebug_max_queue_store(struct device_driver * ddp,
3057 const char * buf, size_t count)
3059 int n;
3061 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
3062 (n <= SCSI_DEBUG_CANQUEUE)) {
3063 scsi_debug_max_queue = n;
3064 return count;
3066 return -EINVAL;
3068 DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show,
3069 sdebug_max_queue_store);
3071 static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf)
3073 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);
3075 DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL);
3077 static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf)
3079 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);
3081 DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL);
3083 static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf)
3085 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);
3087 static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,
3088 const char * buf, size_t count)
3090 int n;
3092 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3093 scsi_debug_virtual_gb = n;
3095 sdebug_capacity = get_sdebug_capacity();
3097 return count;
3099 return -EINVAL;
3101 DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show,
3102 sdebug_virtual_gb_store);
3104 static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf)
3106 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);
3109 static ssize_t sdebug_add_host_store(struct device_driver * ddp,
3110 const char * buf, size_t count)
3112 int delta_hosts;
3114 if (sscanf(buf, "%d", &delta_hosts) != 1)
3115 return -EINVAL;
3116 if (delta_hosts > 0) {
3117 do {
3118 sdebug_add_adapter();
3119 } while (--delta_hosts);
3120 } else if (delta_hosts < 0) {
3121 do {
3122 sdebug_remove_adapter();
3123 } while (++delta_hosts);
3125 return count;
3127 DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show,
3128 sdebug_add_host_store);
3130 static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp,
3131 char * buf)
3133 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);
3135 static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,
3136 const char * buf, size_t count)
3138 int n;
3140 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3141 scsi_debug_vpd_use_hostno = n;
3142 return count;
3144 return -EINVAL;
3146 DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show,
3147 sdebug_vpd_use_hostno_store);
3149 static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf)
3151 return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);
3153 DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL);
3155 static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf)
3157 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);
3159 DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL);
3161 static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf)
3163 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);
3165 DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL);
3167 static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf)
3169 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard);
3171 DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL);
3173 static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf)
3175 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);
3177 DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL);
3179 static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
3181 ssize_t count;
3183 if (!scsi_debug_lbp())
3184 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
3185 sdebug_store_sectors);
3187 count = bitmap_scnlistprintf(buf, PAGE_SIZE, map_storep, map_size);
3189 buf[count++] = '\n';
3190 buf[count++] = 0;
3192 return count;
3194 DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL);
3196 static ssize_t sdebug_removable_show(struct device_driver *ddp,
3197 char *buf)
3199 return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);
3201 static ssize_t sdebug_removable_store(struct device_driver *ddp,
3202 const char *buf, size_t count)
3204 int n;
3206 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
3207 scsi_debug_removable = (n > 0);
3208 return count;
3210 return -EINVAL;
3212 DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show,
3213 sdebug_removable_store);
3216 /* Note: The following function creates attribute files in the
3217 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
3218 files (over those found in the /sys/module/scsi_debug/parameters
3219 directory) is that auxiliary actions can be triggered when an attribute
3220 is changed. For example see: sdebug_add_host_store() above.
3222 static int do_create_driverfs_files(void)
3224 int ret;
3226 ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3227 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay);
3228 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3229 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3230 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3231 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3232 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3233 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3234 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3235 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3236 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3237 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3238 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3239 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts);
3240 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable);
3241 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3242 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3243 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3244 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3245 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix);
3246 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif);
3247 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard);
3248 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato);
3249 ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map);
3250 return ret;
3253 static void do_remove_driverfs_files(void)
3255 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map);
3256 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato);
3257 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard);
3258 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif);
3259 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix);
3260 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size);
3261 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno);
3262 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb);
3263 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level);
3264 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts);
3265 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype);
3266 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable);
3267 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts);
3268 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts);
3269 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld);
3270 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0);
3271 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue);
3272 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns);
3273 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw);
3274 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth);
3275 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense);
3276 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb);
3277 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay);
3278 driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host);
3281 struct device *pseudo_primary;
3283 static int __init scsi_debug_init(void)
3285 unsigned long sz;
3286 int host_to_add;
3287 int k;
3288 int ret;
3290 switch (scsi_debug_sector_size) {
3291 case 512:
3292 case 1024:
3293 case 2048:
3294 case 4096:
3295 break;
3296 default:
3297 printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
3298 scsi_debug_sector_size);
3299 return -EINVAL;
3302 switch (scsi_debug_dif) {
3304 case SD_DIF_TYPE0_PROTECTION:
3305 case SD_DIF_TYPE1_PROTECTION:
3306 case SD_DIF_TYPE2_PROTECTION:
3307 case SD_DIF_TYPE3_PROTECTION:
3308 break;
3310 default:
3311 printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
3312 return -EINVAL;
3315 if (scsi_debug_guard > 1) {
3316 printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
3317 return -EINVAL;
3320 if (scsi_debug_ato > 1) {
3321 printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
3322 return -EINVAL;
3325 if (scsi_debug_physblk_exp > 15) {
3326 printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
3327 scsi_debug_physblk_exp);
3328 return -EINVAL;
3331 if (scsi_debug_lowest_aligned > 0x3fff) {
3332 printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
3333 scsi_debug_lowest_aligned);
3334 return -EINVAL;
3337 if (scsi_debug_dev_size_mb < 1)
3338 scsi_debug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
3339 sz = (unsigned long)scsi_debug_dev_size_mb * 1048576;
3340 sdebug_store_sectors = sz / scsi_debug_sector_size;
3341 sdebug_capacity = get_sdebug_capacity();
3343 /* play around with geometry, don't waste too much on track 0 */
3344 sdebug_heads = 8;
3345 sdebug_sectors_per = 32;
3346 if (scsi_debug_dev_size_mb >= 16)
3347 sdebug_heads = 32;
3348 else if (scsi_debug_dev_size_mb >= 256)
3349 sdebug_heads = 64;
3350 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3351 (sdebug_sectors_per * sdebug_heads);
3352 if (sdebug_cylinders_per >= 1024) {
3353 /* other LLDs do this; implies >= 1GB ram disk ... */
3354 sdebug_heads = 255;
3355 sdebug_sectors_per = 63;
3356 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
3357 (sdebug_sectors_per * sdebug_heads);
3360 fake_storep = vmalloc(sz);
3361 if (NULL == fake_storep) {
3362 printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
3363 return -ENOMEM;
3365 memset(fake_storep, 0, sz);
3366 if (scsi_debug_num_parts > 0)
3367 sdebug_build_parts(fake_storep, sz);
3369 if (scsi_debug_dix) {
3370 int dif_size;
3372 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
3373 dif_storep = vmalloc(dif_size);
3375 printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
3376 dif_size, dif_storep);
3378 if (dif_storep == NULL) {
3379 printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
3380 ret = -ENOMEM;
3381 goto free_vm;
3384 memset(dif_storep, 0xff, dif_size);
3387 /* Logical Block Provisioning */
3388 if (scsi_debug_lbp()) {
3389 scsi_debug_unmap_max_blocks =
3390 clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU);
3392 scsi_debug_unmap_max_desc =
3393 clamp(scsi_debug_unmap_max_desc, 0U, 256U);
3395 scsi_debug_unmap_granularity =
3396 clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU);
3398 if (scsi_debug_unmap_alignment &&
3399 scsi_debug_unmap_granularity <=
3400 scsi_debug_unmap_alignment) {
3401 printk(KERN_ERR
3402 "%s: ERR: unmap_granularity <= unmap_alignment\n",
3403 __func__);
3404 return -EINVAL;
3407 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
3408 map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
3410 printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
3411 map_size);
3413 if (map_storep == NULL) {
3414 printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
3415 ret = -ENOMEM;
3416 goto free_vm;
3419 bitmap_zero(map_storep, map_size);
3421 /* Map first 1KB for partition table */
3422 if (scsi_debug_num_parts)
3423 map_region(0, 2);
3426 pseudo_primary = root_device_register("pseudo_0");
3427 if (IS_ERR(pseudo_primary)) {
3428 printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
3429 ret = PTR_ERR(pseudo_primary);
3430 goto free_vm;
3432 ret = bus_register(&pseudo_lld_bus);
3433 if (ret < 0) {
3434 printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
3435 ret);
3436 goto dev_unreg;
3438 ret = driver_register(&sdebug_driverfs_driver);
3439 if (ret < 0) {
3440 printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
3441 ret);
3442 goto bus_unreg;
3444 ret = do_create_driverfs_files();
3445 if (ret < 0) {
3446 printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n",
3447 ret);
3448 goto del_files;
3451 init_all_queued();
3453 host_to_add = scsi_debug_add_host;
3454 scsi_debug_add_host = 0;
3456 for (k = 0; k < host_to_add; k++) {
3457 if (sdebug_add_adapter()) {
3458 printk(KERN_ERR "scsi_debug_init: "
3459 "sdebug_add_adapter failed k=%d\n", k);
3460 break;
3464 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
3465 printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
3466 scsi_debug_add_host);
3468 return 0;
3470 del_files:
3471 do_remove_driverfs_files();
3472 driver_unregister(&sdebug_driverfs_driver);
3473 bus_unreg:
3474 bus_unregister(&pseudo_lld_bus);
3475 dev_unreg:
3476 root_device_unregister(pseudo_primary);
3477 free_vm:
3478 if (map_storep)
3479 vfree(map_storep);
3480 if (dif_storep)
3481 vfree(dif_storep);
3482 vfree(fake_storep);
3484 return ret;
3487 static void __exit scsi_debug_exit(void)
3489 int k = scsi_debug_add_host;
3491 stop_all_queued();
3492 for (; k; k--)
3493 sdebug_remove_adapter();
3494 do_remove_driverfs_files();
3495 driver_unregister(&sdebug_driverfs_driver);
3496 bus_unregister(&pseudo_lld_bus);
3497 root_device_unregister(pseudo_primary);
3499 if (dif_storep)
3500 vfree(dif_storep);
3502 vfree(fake_storep);
3505 device_initcall(scsi_debug_init);
3506 module_exit(scsi_debug_exit);
3508 static void sdebug_release_adapter(struct device * dev)
3510 struct sdebug_host_info *sdbg_host;
3512 sdbg_host = to_sdebug_host(dev);
3513 kfree(sdbg_host);
3516 static int sdebug_add_adapter(void)
3518 int k, devs_per_host;
3519 int error = 0;
3520 struct sdebug_host_info *sdbg_host;
3521 struct sdebug_dev_info *sdbg_devinfo, *tmp;
3523 sdbg_host = kzalloc(sizeof(*sdbg_host),GFP_KERNEL);
3524 if (NULL == sdbg_host) {
3525 printk(KERN_ERR "%s: out of memory at line %d\n",
3526 __func__, __LINE__);
3527 return -ENOMEM;
3530 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
3532 devs_per_host = scsi_debug_num_tgts * scsi_debug_max_luns;
3533 for (k = 0; k < devs_per_host; k++) {
3534 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
3535 if (!sdbg_devinfo) {
3536 printk(KERN_ERR "%s: out of memory at line %d\n",
3537 __func__, __LINE__);
3538 error = -ENOMEM;
3539 goto clean;
3543 spin_lock(&sdebug_host_list_lock);
3544 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
3545 spin_unlock(&sdebug_host_list_lock);
3547 sdbg_host->dev.bus = &pseudo_lld_bus;
3548 sdbg_host->dev.parent = pseudo_primary;
3549 sdbg_host->dev.release = &sdebug_release_adapter;
3550 dev_set_name(&sdbg_host->dev, "adapter%d", scsi_debug_add_host);
3552 error = device_register(&sdbg_host->dev);
3554 if (error)
3555 goto clean;
3557 ++scsi_debug_add_host;
3558 return error;
3560 clean:
3561 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
3562 dev_list) {
3563 list_del(&sdbg_devinfo->dev_list);
3564 kfree(sdbg_devinfo);
3567 kfree(sdbg_host);
3568 return error;
3571 static void sdebug_remove_adapter(void)
3573 struct sdebug_host_info * sdbg_host = NULL;
3575 spin_lock(&sdebug_host_list_lock);
3576 if (!list_empty(&sdebug_host_list)) {
3577 sdbg_host = list_entry(sdebug_host_list.prev,
3578 struct sdebug_host_info, host_list);
3579 list_del(&sdbg_host->host_list);
3581 spin_unlock(&sdebug_host_list_lock);
3583 if (!sdbg_host)
3584 return;
3586 device_unregister(&sdbg_host->dev);
3587 --scsi_debug_add_host;
3590 static
3591 int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3593 unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
3594 int len, k;
3595 unsigned int num;
3596 unsigned long long lba;
3597 u32 ei_lba;
3598 int errsts = 0;
3599 int target = SCpnt->device->id;
3600 struct sdebug_dev_info *devip = NULL;
3601 int inj_recovered = 0;
3602 int inj_transport = 0;
3603 int inj_dif = 0;
3604 int inj_dix = 0;
3605 int delay_override = 0;
3606 int unmap = 0;
3608 scsi_set_resid(SCpnt, 0);
3609 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
3610 printk(KERN_INFO "scsi_debug: cmd ");
3611 for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
3612 printk("%02x ", (int)cmd[k]);
3613 printk("\n");
3616 if (target == SCpnt->device->host->hostt->this_id) {
3617 printk(KERN_INFO "scsi_debug: initiator's id used as "
3618 "target!\n");
3619 return schedule_resp(SCpnt, NULL, done,
3620 DID_NO_CONNECT << 16, 0);
3623 if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
3624 (SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
3625 return schedule_resp(SCpnt, NULL, done,
3626 DID_NO_CONNECT << 16, 0);
3627 devip = devInfoReg(SCpnt->device);
3628 if (NULL == devip)
3629 return schedule_resp(SCpnt, NULL, done,
3630 DID_NO_CONNECT << 16, 0);
3632 if ((scsi_debug_every_nth != 0) &&
3633 (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
3634 scsi_debug_cmnd_count = 0;
3635 if (scsi_debug_every_nth < -1)
3636 scsi_debug_every_nth = -1;
3637 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3638 return 0; /* ignore command causing timeout */
3639 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3640 scsi_medium_access_command(SCpnt))
3641 return 0; /* time out reads and writes */
3642 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3643 inj_recovered = 1; /* to reads and writes below */
3644 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
3645 inj_transport = 1; /* to reads and writes below */
3646 else if (SCSI_DEBUG_OPT_DIF_ERR & scsi_debug_opts)
3647 inj_dif = 1; /* to reads and writes below */
3648 else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
3649 inj_dix = 1; /* to reads and writes below */
3652 if (devip->wlun) {
3653 switch (*cmd) {
3654 case INQUIRY:
3655 case REQUEST_SENSE:
3656 case TEST_UNIT_READY:
3657 case REPORT_LUNS:
3658 break; /* only allowable wlun commands */
3659 default:
3660 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3661 printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
3662 "not supported for wlun\n", *cmd);
3663 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3664 INVALID_OPCODE, 0);
3665 errsts = check_condition_result;
3666 return schedule_resp(SCpnt, devip, done, errsts,
3671 switch (*cmd) {
3672 case INQUIRY: /* mandatory, ignore unit attention */
3673 delay_override = 1;
3674 errsts = resp_inquiry(SCpnt, target, devip);
3675 break;
3676 case REQUEST_SENSE: /* mandatory, ignore unit attention */
3677 delay_override = 1;
3678 errsts = resp_requests(SCpnt, devip);
3679 break;
3680 case REZERO_UNIT: /* actually this is REWIND for SSC */
3681 case START_STOP:
3682 errsts = resp_start_stop(SCpnt, devip);
3683 break;
3684 case ALLOW_MEDIUM_REMOVAL:
3685 errsts = check_readiness(SCpnt, 1, devip);
3686 if (errsts)
3687 break;
3688 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3689 printk(KERN_INFO "scsi_debug: Medium removal %s\n",
3690 cmd[4] ? "inhibited" : "enabled");
3691 break;
3692 case SEND_DIAGNOSTIC: /* mandatory */
3693 errsts = check_readiness(SCpnt, 1, devip);
3694 break;
3695 case TEST_UNIT_READY: /* mandatory */
3696 delay_override = 1;
3697 errsts = check_readiness(SCpnt, 0, devip);
3698 break;
3699 case RESERVE:
3700 errsts = check_readiness(SCpnt, 1, devip);
3701 break;
3702 case RESERVE_10:
3703 errsts = check_readiness(SCpnt, 1, devip);
3704 break;
3705 case RELEASE:
3706 errsts = check_readiness(SCpnt, 1, devip);
3707 break;
3708 case RELEASE_10:
3709 errsts = check_readiness(SCpnt, 1, devip);
3710 break;
3711 case READ_CAPACITY:
3712 errsts = resp_readcap(SCpnt, devip);
3713 break;
3714 case SERVICE_ACTION_IN:
3715 if (cmd[1] == SAI_READ_CAPACITY_16)
3716 errsts = resp_readcap16(SCpnt, devip);
3717 else if (cmd[1] == SAI_GET_LBA_STATUS) {
3719 if (scsi_debug_lbp() == 0) {
3720 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3721 INVALID_COMMAND_OPCODE, 0);
3722 errsts = check_condition_result;
3723 } else
3724 errsts = resp_get_lba_status(SCpnt, devip);
3725 } else {
3726 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3727 INVALID_OPCODE, 0);
3728 errsts = check_condition_result;
3730 break;
3731 case MAINTENANCE_IN:
3732 if (MI_REPORT_TARGET_PGS != cmd[1]) {
3733 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3734 INVALID_OPCODE, 0);
3735 errsts = check_condition_result;
3736 break;
3738 errsts = resp_report_tgtpgs(SCpnt, devip);
3739 break;
3740 case READ_16:
3741 case READ_12:
3742 case READ_10:
3743 /* READ{10,12,16} and DIF Type 2 are natural enemies */
3744 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3745 cmd[1] & 0xe0) {
3746 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3747 INVALID_COMMAND_OPCODE, 0);
3748 errsts = check_condition_result;
3749 break;
3752 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3753 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3754 (cmd[1] & 0xe0) == 0)
3755 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3757 /* fall through */
3758 case READ_6:
3759 read:
3760 errsts = check_readiness(SCpnt, 0, devip);
3761 if (errsts)
3762 break;
3763 if (scsi_debug_fake_rw)
3764 break;
3765 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3766 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3767 if (inj_recovered && (0 == errsts)) {
3768 mk_sense_buffer(devip, RECOVERED_ERROR,
3769 THRESHOLD_EXCEEDED, 0);
3770 errsts = check_condition_result;
3771 } else if (inj_transport && (0 == errsts)) {
3772 mk_sense_buffer(devip, ABORTED_COMMAND,
3773 TRANSPORT_PROBLEM, ACK_NAK_TO);
3774 errsts = check_condition_result;
3775 } else if (inj_dif && (0 == errsts)) {
3776 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3777 errsts = illegal_condition_result;
3778 } else if (inj_dix && (0 == errsts)) {
3779 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3780 errsts = illegal_condition_result;
3782 break;
3783 case REPORT_LUNS: /* mandatory, ignore unit attention */
3784 delay_override = 1;
3785 errsts = resp_report_luns(SCpnt, devip);
3786 break;
3787 case VERIFY: /* 10 byte SBC-2 command */
3788 errsts = check_readiness(SCpnt, 0, devip);
3789 break;
3790 case WRITE_16:
3791 case WRITE_12:
3792 case WRITE_10:
3793 /* WRITE{10,12,16} and DIF Type 2 are natural enemies */
3794 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
3795 cmd[1] & 0xe0) {
3796 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3797 INVALID_COMMAND_OPCODE, 0);
3798 errsts = check_condition_result;
3799 break;
3802 if ((scsi_debug_dif == SD_DIF_TYPE1_PROTECTION ||
3803 scsi_debug_dif == SD_DIF_TYPE3_PROTECTION) &&
3804 (cmd[1] & 0xe0) == 0)
3805 printk(KERN_ERR "Unprotected RD/WR to DIF device\n");
3807 /* fall through */
3808 case WRITE_6:
3809 write:
3810 errsts = check_readiness(SCpnt, 0, devip);
3811 if (errsts)
3812 break;
3813 if (scsi_debug_fake_rw)
3814 break;
3815 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3816 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3817 if (inj_recovered && (0 == errsts)) {
3818 mk_sense_buffer(devip, RECOVERED_ERROR,
3819 THRESHOLD_EXCEEDED, 0);
3820 errsts = check_condition_result;
3821 } else if (inj_dif && (0 == errsts)) {
3822 mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
3823 errsts = illegal_condition_result;
3824 } else if (inj_dix && (0 == errsts)) {
3825 mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
3826 errsts = illegal_condition_result;
3828 break;
3829 case WRITE_SAME_16:
3830 case WRITE_SAME:
3831 if (cmd[1] & 0x8) {
3832 if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
3833 (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
3834 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3835 INVALID_FIELD_IN_CDB, 0);
3836 errsts = check_condition_result;
3837 } else
3838 unmap = 1;
3840 if (errsts)
3841 break;
3842 errsts = check_readiness(SCpnt, 0, devip);
3843 if (errsts)
3844 break;
3845 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3846 errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
3847 break;
3848 case UNMAP:
3849 errsts = check_readiness(SCpnt, 0, devip);
3850 if (errsts)
3851 break;
3853 if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
3854 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3855 INVALID_COMMAND_OPCODE, 0);
3856 errsts = check_condition_result;
3857 } else
3858 errsts = resp_unmap(SCpnt, devip);
3859 break;
3860 case MODE_SENSE:
3861 case MODE_SENSE_10:
3862 errsts = resp_mode_sense(SCpnt, target, devip);
3863 break;
3864 case MODE_SELECT:
3865 errsts = resp_mode_select(SCpnt, 1, devip);
3866 break;
3867 case MODE_SELECT_10:
3868 errsts = resp_mode_select(SCpnt, 0, devip);
3869 break;
3870 case LOG_SENSE:
3871 errsts = resp_log_sense(SCpnt, devip);
3872 break;
3873 case SYNCHRONIZE_CACHE:
3874 delay_override = 1;
3875 errsts = check_readiness(SCpnt, 0, devip);
3876 break;
3877 case WRITE_BUFFER:
3878 errsts = check_readiness(SCpnt, 1, devip);
3879 break;
3880 case XDWRITEREAD_10:
3881 if (!scsi_bidi_cmnd(SCpnt)) {
3882 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3883 INVALID_FIELD_IN_CDB, 0);
3884 errsts = check_condition_result;
3885 break;
3888 errsts = check_readiness(SCpnt, 0, devip);
3889 if (errsts)
3890 break;
3891 if (scsi_debug_fake_rw)
3892 break;
3893 get_data_transfer_info(cmd, &lba, &num, &ei_lba);
3894 errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
3895 if (errsts)
3896 break;
3897 errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
3898 if (errsts)
3899 break;
3900 errsts = resp_xdwriteread(SCpnt, lba, num, devip);
3901 break;
3902 case VARIABLE_LENGTH_CMD:
3903 if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION) {
3905 if ((cmd[10] & 0xe0) == 0)
3906 printk(KERN_ERR
3907 "Unprotected RD/WR to DIF device\n");
3909 if (cmd[9] == READ_32) {
3910 BUG_ON(SCpnt->cmd_len < 32);
3911 goto read;
3914 if (cmd[9] == WRITE_32) {
3915 BUG_ON(SCpnt->cmd_len < 32);
3916 goto write;
3920 mk_sense_buffer(devip, ILLEGAL_REQUEST,
3921 INVALID_FIELD_IN_CDB, 0);
3922 errsts = check_condition_result;
3923 break;
3925 default:
3926 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
3927 printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
3928 "supported\n", *cmd);
3929 errsts = check_readiness(SCpnt, 1, devip);
3930 if (errsts)
3931 break; /* Unit attention takes precedence */
3932 mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
3933 errsts = check_condition_result;
3934 break;
3936 return schedule_resp(SCpnt, devip, done, errsts,
3937 (delay_override ? 0 : scsi_debug_delay));
3940 static DEF_SCSI_QCMD(scsi_debug_queuecommand)
3942 static struct scsi_host_template sdebug_driver_template = {
3943 .show_info = scsi_debug_show_info,
3944 .write_info = scsi_debug_write_info,
3945 .proc_name = sdebug_proc_name,
3946 .name = "SCSI DEBUG",
3947 .info = scsi_debug_info,
3948 .slave_alloc = scsi_debug_slave_alloc,
3949 .slave_configure = scsi_debug_slave_configure,
3950 .slave_destroy = scsi_debug_slave_destroy,
3951 .ioctl = scsi_debug_ioctl,
3952 .queuecommand = scsi_debug_queuecommand,
3953 .eh_abort_handler = scsi_debug_abort,
3954 .eh_bus_reset_handler = scsi_debug_bus_reset,
3955 .eh_device_reset_handler = scsi_debug_device_reset,
3956 .eh_host_reset_handler = scsi_debug_host_reset,
3957 .bios_param = scsi_debug_biosparam,
3958 .can_queue = SCSI_DEBUG_CANQUEUE,
3959 .this_id = 7,
3960 .sg_tablesize = 256,
3961 .cmd_per_lun = 16,
3962 .max_sectors = 0xffff,
3963 .use_clustering = DISABLE_CLUSTERING,
3964 .module = THIS_MODULE,
3967 static int sdebug_driver_probe(struct device * dev)
3969 int error = 0;
3970 struct sdebug_host_info *sdbg_host;
3971 struct Scsi_Host *hpnt;
3972 int host_prot;
3974 sdbg_host = to_sdebug_host(dev);
3976 sdebug_driver_template.can_queue = scsi_debug_max_queue;
3977 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
3978 if (NULL == hpnt) {
3979 printk(KERN_ERR "%s: scsi_register failed\n", __func__);
3980 error = -ENODEV;
3981 return error;
3984 sdbg_host->shost = hpnt;
3985 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
3986 if ((hpnt->this_id >= 0) && (scsi_debug_num_tgts > hpnt->this_id))
3987 hpnt->max_id = scsi_debug_num_tgts + 1;
3988 else
3989 hpnt->max_id = scsi_debug_num_tgts;
3990 hpnt->max_lun = SAM2_WLUN_REPORT_LUNS; /* = scsi_debug_max_luns; */
3992 host_prot = 0;
3994 switch (scsi_debug_dif) {
3996 case SD_DIF_TYPE1_PROTECTION:
3997 host_prot = SHOST_DIF_TYPE1_PROTECTION;
3998 if (scsi_debug_dix)
3999 host_prot |= SHOST_DIX_TYPE1_PROTECTION;
4000 break;
4002 case SD_DIF_TYPE2_PROTECTION:
4003 host_prot = SHOST_DIF_TYPE2_PROTECTION;
4004 if (scsi_debug_dix)
4005 host_prot |= SHOST_DIX_TYPE2_PROTECTION;
4006 break;
4008 case SD_DIF_TYPE3_PROTECTION:
4009 host_prot = SHOST_DIF_TYPE3_PROTECTION;
4010 if (scsi_debug_dix)
4011 host_prot |= SHOST_DIX_TYPE3_PROTECTION;
4012 break;
4014 default:
4015 if (scsi_debug_dix)
4016 host_prot |= SHOST_DIX_TYPE0_PROTECTION;
4017 break;
4020 scsi_host_set_prot(hpnt, host_prot);
4022 printk(KERN_INFO "scsi_debug: host protection%s%s%s%s%s%s%s\n",
4023 (host_prot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4024 (host_prot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4025 (host_prot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4026 (host_prot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4027 (host_prot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4028 (host_prot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4029 (host_prot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4031 if (scsi_debug_guard == 1)
4032 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
4033 else
4034 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
4036 error = scsi_add_host(hpnt, &sdbg_host->dev);
4037 if (error) {
4038 printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
4039 error = -ENODEV;
4040 scsi_host_put(hpnt);
4041 } else
4042 scsi_scan_host(hpnt);
4045 return error;
4048 static int sdebug_driver_remove(struct device * dev)
4050 struct sdebug_host_info *sdbg_host;
4051 struct sdebug_dev_info *sdbg_devinfo, *tmp;
4053 sdbg_host = to_sdebug_host(dev);
4055 if (!sdbg_host) {
4056 printk(KERN_ERR "%s: Unable to locate host info\n",
4057 __func__);
4058 return -ENODEV;
4061 scsi_remove_host(sdbg_host->shost);
4063 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
4064 dev_list) {
4065 list_del(&sdbg_devinfo->dev_list);
4066 kfree(sdbg_devinfo);
4069 scsi_host_put(sdbg_host->shost);
4070 return 0;
4073 static int pseudo_lld_bus_match(struct device *dev,
4074 struct device_driver *dev_driver)
4076 return 1;
4079 static struct bus_type pseudo_lld_bus = {
4080 .name = "pseudo",
4081 .match = pseudo_lld_bus_match,
4082 .probe = sdebug_driver_probe,
4083 .remove = sdebug_driver_remove,